ReactOS 0.4.15-dev-7788-g1ad9096
linux.h
Go to the documentation of this file.
1#pragma once
2
3#include <ntddk.h>
4
5#ifndef NULL
6#define NULL (void*)0
7#endif
8
9typedef struct page {
10 int x;
12
13
14
15
16
17
18
19
20
21
22
23
24
25/* i386 */
26
27typedef unsigned short umode_t;
28
29/*
30 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
31 * header files exported to user space
32 */
33
34typedef __signed__ char __s8;
35typedef unsigned char __u8;
36
37typedef __signed__ short __s16;
38typedef unsigned short __u16;
39
40typedef __signed__ int __s32;
41typedef unsigned int __u32;
42
43#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
44typedef __signed__ long long __s64;
45typedef unsigned long long __u64;
46#endif
47
48/*
49 * These aren't exported outside the kernel to avoid name space clashes
50 */
51typedef signed char s8;
52typedef unsigned char u8;
53
54typedef signed short s16;
55typedef unsigned short u16;
56
57typedef signed int s32;
58typedef unsigned int u32;
59
60typedef signed long long s64;
61typedef unsigned long long u64;
62
63#define BITS_PER_LONG 32
64
65/* DMA addresses come in generic and 64-bit flavours. */
66
67#ifdef CONFIG_HIGHMEM64G
68typedef u64 dma_addr_t;
69#else
71#endif
73
74
75
76/*
77 * This allows for 1024 file descriptors: if NR_OPEN is ever grown
78 * beyond that you'll have to change this too. But 1024 fd's seem to be
79 * enough even for such "real" unices like OSF/1, so hopefully this is
80 * one limit that doesn't have to be changed [again].
81 *
82 * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
83 * <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
84 * place for them. Solved by having dummy defines in <sys/time.h>.
85 */
86
87/*
88 * Those macros may have been defined in <gnu/types.h>. But we always
89 * use the ones here.
90 */
91#undef __NFDBITS
92#define __NFDBITS (8 * sizeof(unsigned long))
93
94#undef __FD_SETSIZE
95#define __FD_SETSIZE 1024
96
97#undef __FDSET_LONGS
98#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
99
100#undef __FDELT
101#define __FDELT(d) ((d) / __NFDBITS)
102
103#undef __FDMASK
104#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
105
106typedef struct {
107 unsigned long fds_bits [__FDSET_LONGS];
109
110/* Type of a signal handler. */
112
113/* Type of a SYSV IPC key. */
114typedef int __kernel_key_t;
115
116
117/*
118 * This file is generally used by user-level software, so you need to
119 * be a little careful about namespace pollution etc. Also, we cannot
120 * assume GCC is being used.
121 */
122
123typedef unsigned short __kernel_dev_t;
124typedef unsigned long __kernel_ino_t;
125typedef unsigned short __kernel_mode_t;
126typedef unsigned short __kernel_nlink_t;
127typedef long __kernel_off_t;
128typedef int __kernel_pid_t;
129typedef unsigned short __kernel_ipc_pid_t;
130typedef unsigned short __kernel_uid_t;
131typedef unsigned short __kernel_gid_t;
132typedef unsigned int __kernel_size_t;
135typedef long __kernel_time_t;
137typedef long __kernel_clock_t;
139typedef char * __kernel_caddr_t;
140typedef unsigned short __kernel_uid16_t;
141typedef unsigned short __kernel_gid16_t;
142typedef unsigned int __kernel_uid32_t;
143typedef unsigned int __kernel_gid32_t;
144
145typedef unsigned short __kernel_old_uid_t;
146typedef unsigned short __kernel_old_gid_t;
147
148#ifdef __GNUC__
149typedef long long __kernel_loff_t;
150#endif
151
152typedef struct {
153#if defined(__KERNEL__) || defined(__USE_ALL)
154 int val[2];
155#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
156 int __val[2];
157#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
159
160#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
161
162#undef __FD_SET
163#define __FD_SET(fd,fdsetp) \
164 __asm__ __volatile__("btsl %1,%0": \
165 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
166
167#undef __FD_CLR
168#define __FD_CLR(fd,fdsetp) \
169 __asm__ __volatile__("btrl %1,%0": \
170 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
171
172#undef __FD_ISSET
173#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
174 unsigned char __result; \
175 __asm__ __volatile__("btl %1,%2 ; setb %0" \
176 :"=q" (__result) :"r" ((int) (fd)), \
177 "m" (*(__kernel_fd_set *) (fdsetp))); \
178 __result; }))
179
180#undef __FD_ZERO
181#define __FD_ZERO(fdsetp) \
182do { \
183 int __d0, __d1; \
184 __asm__ __volatile__("cld ; rep ; stosl" \
185 :"=m" (*(__kernel_fd_set *) (fdsetp)), \
186 "=&c" (__d0), "=&D" (__d1) \
187 :"a" (0), "1" (__FDSET_LONGS), \
188 "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
189} while (0)
190
191#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
192
193
194#ifndef __KERNEL_STRICT_NAMES
195
206
207#ifdef __KERNEL__
208typedef __kernel_uid32_t uid_t;
209typedef __kernel_gid32_t gid_t;
210typedef __kernel_uid16_t uid16_t;
211typedef __kernel_gid16_t gid16_t;
212
213#ifdef CONFIG_UID16
214/* This is defined by include/asm-{arch}/posix_types.h */
215typedef __kernel_old_uid_t old_uid_t;
216typedef __kernel_old_gid_t old_gid_t;
217#endif /* CONFIG_UID16 */
218
219/* libc5 includes this file to define uid_t, thus uid_t can never change
220 * when it is included by non-kernel code
221 */
222#else
225#endif /* __KERNEL__ */
226
227#if defined(__GNUC__)
228typedef __kernel_loff_t loff_t;
229#endif
230
231/*
232 * The following typedefs are also protected by individual ifdefs for
233 * historical reasons:
234 */
235#ifndef _SIZE_T
236#define _SIZE_T
238#endif
239
240#ifndef _SSIZE_T
241#define _SSIZE_T
243#endif
244
245#ifndef _PTRDIFF_T
246#define _PTRDIFF_T
248#endif
249
250#ifndef _TIME_T
251#define _TIME_T
253#endif
254
255#ifndef _CLOCK_T
256#define _CLOCK_T
258#endif
259
260#ifndef _CADDR_T
261#define _CADDR_T
263#endif
264
265/* bsd */
266typedef unsigned char u_char;
267typedef unsigned short u_short;
268typedef unsigned int u_int;
269typedef unsigned long u_long;
270
271/* sysv */
272typedef unsigned char unchar;
273typedef unsigned short ushort;
274typedef unsigned int uint;
275typedef unsigned long ulong;
276
277#ifndef __BIT_TYPES_DEFINED__
278#define __BIT_TYPES_DEFINED__
279
281typedef __s8 int8_t;
286
287#endif /* !(__BIT_TYPES_DEFINED__) */
288
289typedef __u8 uint8_t;
292
293#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
294typedef __u64 uint64_t;
295typedef __u64 u_int64_t;
296typedef __s64 int64_t;
297#endif
298
299#endif /* __KERNEL_STRICT_NAMES */
300
301/*
302 * Below are truly Linux-specific types that should never collide with
303 * any application/library that wants linux/types.h.
304 */
305
306struct ustat {
309 char f_fname[6];
310 char f_fpack[6];
311};
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326#ifndef __LITTLE_ENDIAN
327#define __LITTLE_ENDIAN 1234
328#endif
329#ifndef __LITTLE_ENDIAN_BITFIELD
330#define __LITTLE_ENDIAN_BITFIELD
331#endif
332
333#if 1 /* swab */
334
335/*
336 * linux/byteorder/swab.h
337 * Byte-swapping, independently from CPU endianness
338 * swabXX[ps]?(foo)
339 *
340 * Francois-Rene Rideau <fare@tunes.org> 19971205
341 * separated swab functions from cpu_to_XX,
342 * to clean up support for bizarre-endian architectures.
343 *
344 * See asm-i386/byteorder.h and such for examples of how to provide
345 * architecture-dependent optimized versions
346 *
347 */
348
349/* casts are necessary for constants, because we never know how for sure
350 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
351 */
352#define ___swab16(x) \
353({ \
354 __u16 __x = (x); \
355 ((__u16)( \
356 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
357 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
358})
359
360#define ___swab24(x) \
361({ \
362 __u32 __x = (x); \
363 ((__u32)( \
364 ((__x & (__u32)0x000000ffUL) << 16) | \
365 (__x & (__u32)0x0000ff00UL) | \
366 ((__x & (__u32)0x00ff0000UL) >> 16) )); \
367})
368
369#define ___swab32(x) \
370({ \
371 __u32 __x = (x); \
372 ((__u32)( \
373 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
374 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
375 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
376 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
377})
378
379#define ___swab64(x) \
380({ \
381 __u64 __x = (x); \
382 ((__u64)( \
383 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
384 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
385 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
386 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
387 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
388 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
389 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
390 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
391})
392
393#define ___constant_swab16(x) \
394 ((__u16)( \
395 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
396 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
397#define ___constant_swab24(x) \
398 ((__u32)( \
399 (((__u32)(x) & (__u32)0x000000ffU) << 16) | \
400 (((__u32)(x) & (__u32)0x0000ff00U) | \
401 (((__u32)(x) & (__u32)0x00ff0000U) >> 16) ))
402#define ___constant_swab32(x) \
403 ((__u32)( \
404 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
405 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
406 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
407 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
408#define ___constant_swab64(x) \
409 ((__u64)( \
410 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
411 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
412 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
413 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
414 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
415 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
416 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
417 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
418
419/*
420 * provide defaults when no architecture-specific optimization is detected
421 */
422#ifndef __arch__swab16
423# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
424#endif
425#ifndef __arch__swab24
426# define __arch__swab24(x) ({ __u32 __tmp = (x) ; ___swab24(__tmp); })
427#endif
428#ifndef __arch__swab32
429# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
430#endif
431#ifndef __arch__swab64
432# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
433#endif
434
435#ifndef __arch__swab16p
436# define __arch__swab16p(x) __arch__swab16(*(x))
437#endif
438#ifndef __arch__swab24p
439# define __arch__swab24p(x) __arch__swab24(*(x))
440#endif
441#ifndef __arch__swab32p
442# define __arch__swab32p(x) __arch__swab32(*(x))
443#endif
444#ifndef __arch__swab64p
445# define __arch__swab64p(x) __arch__swab64(*(x))
446#endif
447
448#ifndef __arch__swab16s
449# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
450#endif
451#ifndef __arch__swab24s
452# define __arch__swab24s(x) do { *(x) = __arch__swab24p((x)); } while (0)
453#endif
454#ifndef __arch__swab32s
455# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
456#endif
457#ifndef __arch__swab64s
458# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
459#endif
460
461
462/*
463 * Allow constant folding
464 */
465#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
466# define __swab16(x) \
467(__builtin_constant_p((__u16)(x)) ? \
468 ___swab16((x)) : \
469 __fswab16((x)))
470# define __swab24(x) \
471(__builtin_constant_p((__u32)(x)) ? \
472 ___swab24((x)) : \
473 __fswab24((x)))
474# define __swab32(x) \
475(__builtin_constant_p((__u32)(x)) ? \
476 ___swab32((x)) : \
477 __fswab32((x)))
478# define __swab64(x) \
479(__builtin_constant_p((__u64)(x)) ? \
480 ___swab64((x)) : \
481 __fswab64((x)))
482#else
483# define __swab16(x) __fswab16(x)
484# define __swab24(x) __fswab24(x)
485# define __swab32(x) __fswab32(x)
486# define __swab64(x) __fswab64(x)
487#endif /* OPTIMIZE */
488
489
490static __inline__ __const__ __u16 __fswab16(__u16 x)
491{
492 return __arch__swab16(x);
493}
494static __inline__ __u16 __swab16p(__u16 *x)
495{
496 return __arch__swab16p(x);
497}
498static __inline__ void __swab16s(__u16 *addr)
499{
501}
502
503static __inline__ __const__ __u32 __fswab24(__u32 x)
504{
505 return __arch__swab24(x);
506}
507static __inline__ __u32 __swab24p(__u32 *x)
508{
509 return __arch__swab24p(x);
510}
511static __inline__ void __swab24s(__u32 *addr)
512{
514}
515
516static __inline__ __const__ __u32 __fswab32(__u32 x)
517{
518 return __arch__swab32(x);
519}
520static __inline__ __u32 __swab32p(__u32 *x)
521{
522 return __arch__swab32p(x);
523}
524static __inline__ void __swab32s(__u32 *addr)
525{
527}
528
529#ifdef __BYTEORDER_HAS_U64__
530static __inline__ __const__ __u64 __fswab64(__u64 x)
531{
532# ifdef __SWAB_64_THRU_32__
533 __u32 h = x >> 32;
534 __u32 l = x & ((1ULL<<32)-1);
535 return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
536# else
537 return __arch__swab64(x);
538# endif
539}
540static __inline__ __u64 __swab64p(__u64 *x)
541{
542 return __arch__swab64p(x);
543}
544static __inline__ void __swab64s(__u64 *addr)
545{
547}
548#endif /* __BYTEORDER_HAS_U64__ */
549
550#if defined(__KERNEL__)
551#define swab16 __swab16
552#define swab24 __swab24
553#define swab32 __swab32
554#define swab64 __swab64
555#define swab16p __swab16p
556#define swab24p __swab24p
557#define swab32p __swab32p
558#define swab64p __swab64p
559#define swab16s __swab16s
560#define swab24s __swab24s
561#define swab32s __swab32s
562#define swab64s __swab64s
563#endif
564
565#endif /* swab */
566
567
568
569#if 1 /* generic */
570
571/*
572 * linux/byteorder_generic.h
573 * Generic Byte-reordering support
574 *
575 * Francois-Rene Rideau <fare@tunes.org> 19970707
576 * gathered all the good ideas from all asm-foo/byteorder.h into one file,
577 * cleaned them up.
578 * I hope it is compliant with non-GCC compilers.
579 * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
580 * because I wasn't sure it would be ok to put it in types.h
581 * Upgraded it to 2.1.43
582 * Francois-Rene Rideau <fare@tunes.org> 19971012
583 * Upgraded it to 2.1.57
584 * to please Linus T., replaced huge #ifdef's between little/big endian
585 * by nestedly #include'd files.
586 * Francois-Rene Rideau <fare@tunes.org> 19971205
587 * Made it to 2.1.71; now a facelift:
588 * Put files under include/linux/byteorder/
589 * Split swab from generic support.
590 *
591 * TODO:
592 * = Regular kernel maintainers could also replace all these manual
593 * byteswap macros that remain, disseminated among drivers,
594 * after some grep or the sources...
595 * = Linus might want to rename all these macros and files to fit his taste,
596 * to fit his personal naming scheme.
597 * = it seems that a few drivers would also appreciate
598 * nybble swapping support...
599 * = every architecture could add their byteswap macro in asm/byteorder.h
600 * see how some architectures already do (i386, alpha, ppc, etc)
601 * = cpu_to_beXX and beXX_to_cpu might some day need to be well
602 * distinguished throughout the kernel. This is not the case currently,
603 * since little endian, big endian, and pdp endian machines needn't it.
604 * But this might be the case for, say, a port of Linux to 20/21 bit
605 * architectures (and F21 Linux addict around?).
606 */
607
608/*
609 * The following macros are to be defined by <asm/byteorder.h>:
610 *
611 * Conversion of long and short int between network and host format
612 * ntohl(__u32 x)
613 * ntohs(__u16 x)
614 * htonl(__u32 x)
615 * htons(__u16 x)
616 * It seems that some programs (which? where? or perhaps a standard? POSIX?)
617 * might like the above to be functions, not macros (why?).
618 * if that's true, then detect them, and take measures.
619 * Anyway, the measure is: define only ___ntohl as a macro instead,
620 * and in a separate file, have
621 * unsigned long inline ntohl(x){return ___ntohl(x);}
622 *
623 * The same for constant arguments
624 * __constant_ntohl(__u32 x)
625 * __constant_ntohs(__u16 x)
626 * __constant_htonl(__u32 x)
627 * __constant_htons(__u16 x)
628 *
629 * Conversion of XX-bit integers (16- 32- or 64-)
630 * between native CPU format and little/big endian format
631 * 64-bit stuff only defined for proper architectures
632 * cpu_to_[bl]eXX(__uXX x)
633 * [bl]eXX_to_cpu(__uXX x)
634 *
635 * The same, but takes a pointer to the value to convert
636 * cpu_to_[bl]eXXp(__uXX x)
637 * [bl]eXX_to_cpup(__uXX x)
638 *
639 * The same, but change in situ
640 * cpu_to_[bl]eXXs(__uXX x)
641 * [bl]eXX_to_cpus(__uXX x)
642 *
643 * See asm-foo/byteorder.h for examples of how to provide
644 * architecture-optimized versions
645 *
646 */
647
648
649#if defined(__KERNEL__)
650/*
651 * inside the kernel, we can use nicknames;
652 * outside of it, we must avoid POSIX namespace pollution...
653 */
654#define cpu_to_le64 __cpu_to_le64
655#define le64_to_cpu __le64_to_cpu
656#define cpu_to_le32 __cpu_to_le32
657#define le32_to_cpu __le32_to_cpu
658#define cpu_to_le16 __cpu_to_le16
659#define le16_to_cpu __le16_to_cpu
660#define cpu_to_be64 __cpu_to_be64
661#define be64_to_cpu __be64_to_cpu
662#define cpu_to_be32 __cpu_to_be32
663#define be32_to_cpu __be32_to_cpu
664#define cpu_to_be16 __cpu_to_be16
665#define be16_to_cpu __be16_to_cpu
666#define cpu_to_le64p __cpu_to_le64p
667#define le64_to_cpup __le64_to_cpup
668#define cpu_to_le32p __cpu_to_le32p
669#define le32_to_cpup __le32_to_cpup
670#define cpu_to_le16p __cpu_to_le16p
671#define le16_to_cpup __le16_to_cpup
672#define cpu_to_be64p __cpu_to_be64p
673#define be64_to_cpup __be64_to_cpup
674#define cpu_to_be32p __cpu_to_be32p
675#define be32_to_cpup __be32_to_cpup
676#define cpu_to_be16p __cpu_to_be16p
677#define be16_to_cpup __be16_to_cpup
678#define cpu_to_le64s __cpu_to_le64s
679#define le64_to_cpus __le64_to_cpus
680#define cpu_to_le32s __cpu_to_le32s
681#define le32_to_cpus __le32_to_cpus
682#define cpu_to_le16s __cpu_to_le16s
683#define le16_to_cpus __le16_to_cpus
684#define cpu_to_be64s __cpu_to_be64s
685#define be64_to_cpus __be64_to_cpus
686#define cpu_to_be32s __cpu_to_be32s
687#define be32_to_cpus __be32_to_cpus
688#define cpu_to_be16s __cpu_to_be16s
689#define be16_to_cpus __be16_to_cpus
690#endif
691
692
693/*
694 * Handle ntohl and suches. These have various compatibility
695 * issues - like we want to give the prototype even though we
696 * also have a macro for them in case some strange program
697 * wants to take the address of the thing or something..
698 *
699 * Note that these used to return a "long" in libc5, even though
700 * long is often 64-bit these days.. Thus the casts.
701 *
702 * They have to be macros in order to do the constant folding
703 * correctly - if the argument passed into a inline function
704 * it is no longer constant according to gcc..
705 */
706
707#undef ntohl
708#undef ntohs
709#undef htonl
710#undef htons
711
712/*
713 * Do the prototypes. Somebody might want to take the
714 * address or some such sick thing..
715 */
716#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
717extern __u32 ntohl(__u32);
718extern __u32 htonl(__u32);
719#else
720extern unsigned long int ntohl(unsigned long int);
721extern unsigned long int htonl(unsigned long int);
722#endif
723extern unsigned short int ntohs(unsigned short int);
724extern unsigned short int htons(unsigned short int);
725
726
727#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) && !defined(__STRICT_ANSI__)
728
729#define ___htonl(x) __cpu_to_be32(x)
730#define ___htons(x) __cpu_to_be16(x)
731#define ___ntohl(x) __be32_to_cpu(x)
732#define ___ntohs(x) __be16_to_cpu(x)
733
734#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
735#define htonl(x) ___htonl(x)
736#define ntohl(x) ___ntohl(x)
737#else
738#define htonl(x) ((unsigned long)___htonl(x))
739#define ntohl(x) ((unsigned long)___ntohl(x))
740#endif
741#define htons(x) ___htons(x)
742#define ntohs(x) ___ntohs(x)
743
744#endif /* OPTIMIZE */
745
746#endif /* generic */
747
748
749#define __constant_htonl(x) ___constant_swab32((x))
750#define __constant_ntohl(x) ___constant_swab32((x))
751#define __constant_htons(x) ___constant_swab16((x))
752#define __constant_ntohs(x) ___constant_swab16((x))
753#define __constant_cpu_to_le64(x) ((__u64)(x))
754#define __constant_le64_to_cpu(x) ((__u64)(x))
755#define __constant_cpu_to_le32(x) ((__u32)(x))
756#define __constant_le32_to_cpu(x) ((__u32)(x))
757#define __constant_cpu_to_le24(x) ((__u32)(x))
758#define __constant_le24_to_cpu(x) ((__u32)(x))
759#define __constant_cpu_to_le16(x) ((__u16)(x))
760#define __constant_le16_to_cpu(x) ((__u16)(x))
761#define __constant_cpu_to_be64(x) ___constant_swab64((x))
762#define __constant_be64_to_cpu(x) ___constant_swab64((x))
763#define __constant_cpu_to_be32(x) ___constant_swab32((x))
764#define __constant_be32_to_cpu(x) ___constant_swab32((x))
765#define __constant_cpu_to_be24(x) ___constant_swab24((x))
766#define __constant_be24_to_cpu(x) ___constant_swab24((x))
767#define __constant_cpu_to_be16(x) ___constant_swab16((x))
768#define __constant_be16_to_cpu(x) ___constant_swab16((x))
769#define __cpu_to_le64(x) ((__u64)(x))
770#define __le64_to_cpu(x) ((__u64)(x))
771#define __cpu_to_le32(x) ((__u32)(x))
772#define __le32_to_cpu(x) ((__u32)(x))
773#define __cpu_to_le24(x) ((__u32)(x))
774#define __le24_to_cpu(x) ((__u32)(x))
775#define __cpu_to_le16(x) ((__u16)(x))
776#define __le16_to_cpu(x) ((__u16)(x))
777#define __cpu_to_be64(x) __swab64((x))
778#define __be64_to_cpu(x) __swab64((x))
779#define __cpu_to_be32(x) __swab32((x))
780#define __be32_to_cpu(x) __swab32((x))
781#define __cpu_to_be24(x) __swab24((x))
782#define __be24_to_cpu(x) __swab24((x))
783#define __cpu_to_be16(x) __swab16((x))
784#define __be16_to_cpu(x) __swab16((x))
785#define __cpu_to_le64p(x) (*(__u64*)(x))
786#define __le64_to_cpup(x) (*(__u64*)(x))
787#define __cpu_to_le32p(x) (*(__u32*)(x))
788#define __le32_to_cpup(x) (*(__u32*)(x))
789#define __cpu_to_le24p(x) (*(__u32*)(x))
790#define __le24_to_cpup(x) (*(__u32*)(x))
791#define __cpu_to_le16p(x) (*(__u16*)(x))
792#define __le16_to_cpup(x) (*(__u16*)(x))
793#define __cpu_to_be64p(x) __swab64p((x))
794#define __be64_to_cpup(x) __swab64p((x))
795#define __cpu_to_be32p(x) __swab32p((x))
796#define __be32_to_cpup(x) __swab32p((x))
797#define __cpu_to_be24p(x) __swab24p((x))
798#define __be24_to_cpup(x) __swab24p((x))
799#define __cpu_to_be16p(x) __swab16p((x))
800#define __be16_to_cpup(x) __swab16p((x))
801#define __cpu_to_le64s(x) do {} while (0)
802#define __le64_to_cpus(x) do {} while (0)
803#define __cpu_to_le32s(x) do {} while (0)
804#define __le32_to_cpus(x) do {} while (0)
805#define __cpu_to_le24s(x) do {} while (0)
806#define __le24_to_cpus(x) do {} while (0)
807#define __cpu_to_le16s(x) do {} while (0)
808#define __le16_to_cpus(x) do {} while (0)
809#define __cpu_to_be64s(x) __swab64s((x))
810#define __be64_to_cpus(x) __swab64s((x))
811#define __cpu_to_be32s(x) __swab32s((x))
812#define __be32_to_cpus(x) __swab32s((x))
813#define __cpu_to_be24s(x) __swab24s((x))
814#define __be24_to_cpus(x) __swab24s((x))
815#define __cpu_to_be16s(x) __swab16s((x))
816#define __be16_to_cpus(x) __swab16s((x))
817
818
819
820
821
822
823
824
825#if 1
826
827/* Dummy types */
828
829#define ____cacheline_aligned
830
831typedef struct
832{
833 volatile unsigned int lock;
834} rwlock_t;
835
836typedef struct {
837 volatile unsigned int lock;
838} spinlock_t;
839
840struct task_struct;
841
842
843
844
845
846#if 1 /* atomic */
847
848/*
849 * Atomic operations that C can't guarantee us. Useful for
850 * resource counting etc..
851 */
852
853#ifdef CONFIG_SMP
854#define LOCK "lock ; "
855#else
856#define LOCK ""
857#endif
858
859/*
860 * Make sure gcc doesn't try to be clever and move things around
861 * on us. We need to use _exactly_ the address the user gave us,
862 * not some alias that contains the same information.
863 */
864typedef struct { volatile int counter; } atomic_t;
865
866#define ATOMIC_INIT(i) { (i) }
867
875#define atomic_read(v) ((v)->counter)
876
885#define atomic_set(v,i) (((v)->counter) = (i))
886
895static __inline__ void atomic_add(int i, atomic_t *v)
896{
897#if 0
898 __asm__ __volatile__(
899 LOCK "addl %1,%0"
900 :"=m" (v->counter)
901 :"ir" (i), "m" (v->counter));
902#endif
903}
904
913static __inline__ void atomic_sub(int i, atomic_t *v)
914{
915#if 0
916 __asm__ __volatile__(
917 LOCK "subl %1,%0"
918 :"=m" (v->counter)
919 :"ir" (i), "m" (v->counter));
920#endif
921}
922
933static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
934{
935#if 0
936 unsigned char c;
937
938 __asm__ __volatile__(
939 LOCK "subl %2,%0; sete %1"
940 :"=m" (v->counter), "=qm" (c)
941 :"ir" (i), "m" (v->counter) : "memory");
942 return c;
943#endif
944}
945
953static __inline__ void atomic_inc(atomic_t *v)
954{
955#if 0
956 __asm__ __volatile__(
957 LOCK "incl %0"
958 :"=m" (v->counter)
959 :"m" (v->counter));
960#endif
961}
962
970static __inline__ void atomic_dec(atomic_t *v)
971{
972#if 0
973 __asm__ __volatile__(
974 LOCK "decl %0"
975 :"=m" (v->counter)
976 :"m" (v->counter));
977#endif
978}
979
989static __inline__ int atomic_dec_and_test(atomic_t *v)
990{
991#if 0
992 unsigned char c;
993
994 __asm__ __volatile__(
995 LOCK "decl %0; sete %1"
996 :"=m" (v->counter), "=qm" (c)
997 :"m" (v->counter) : "memory");
998 return c != 0;
999#else
1000 return 1;
1001#endif
1002}
1003
1013static __inline__ int atomic_inc_and_test(atomic_t *v)
1014{
1015#if 0
1016 unsigned char c;
1017
1018 __asm__ __volatile__(
1019 LOCK "incl %0; sete %1"
1020 :"=m" (v->counter), "=qm" (c)
1021 :"m" (v->counter) : "memory");
1022 return c != 0;
1023#else
1024 return 1;
1025#endif
1026}
1027
1038static __inline__ int atomic_add_negative(int i, atomic_t *v)
1039{
1040#if 0
1041 unsigned char c;
1042
1043 __asm__ __volatile__(
1044 LOCK "addl %2,%0; sets %1"
1045 :"=m" (v->counter), "=qm" (c)
1046 :"ir" (i), "m" (v->counter) : "memory");
1047 return c;
1048#else
1049 return 0;
1050#endif
1051}
1052
1053/* These are x86-specific, used by some header files */
1054#define atomic_clear_mask(mask, addr)
1055#if 0
1056__asm__ __volatile__(LOCK "andl %0,%1" \
1057: : "r" (~(mask)),"m" (*addr) : "memory")
1058#endif
1059
1060#define atomic_set_mask(mask, addr)
1061#if 0
1062__asm__ __volatile__(LOCK "orl %0,%1" \
1063: : "r" (mask),"m" (*addr) : "memory")
1064#endif
1065
1066/* Atomic operations are already serializing on x86 */
1067#define smp_mb__before_atomic_dec()
1068#define smp_mb__after_atomic_dec()
1069#define smp_mb__before_atomic_inc()
1070#define smp_mb__after_atomic_inc()
1071
1072
1073
1074#endif /* atomic */
1075
1076
1077
1078
1079
1080#if 1 /* list */
1081
1082struct list_head {
1083 struct list_head *next, *prev;
1084};
1085
1086#define LIST_HEAD_INIT(name) { &(name), &(name) }
1087
1088#define LIST_HEAD(name) \
1089 struct list_head name = LIST_HEAD_INIT(name)
1090
1091#define INIT_LIST_HEAD(ptr) do { \
1092 (ptr)->next = (ptr); (ptr)->prev = (ptr); \
1093} while (0)
1094
1095/*
1096 * Insert a new entry between two known consecutive entries.
1097 *
1098 * This is only for internal list manipulation where we know
1099 * the prev/next entries already!
1100 */
1101static inline void __list_add(struct list_head *new,
1102 struct list_head *prev,
1103 struct list_head *next)
1104{
1105#if 0
1106 next->prev = new;
1107 new->next = next;
1108 new->prev = prev;
1109 prev->next = new;
1110#endif
1111}
1112
1121static inline void list_add(struct list_head *new, struct list_head *head)
1122{
1123#if 0
1124 __list_add(new, head, head->next);
1125#endif
1126}
1127
1136static inline void list_add_tail(struct list_head *new, struct list_head *head)
1137{
1138#if 0
1139 __list_add(new, head->prev, head);
1140#endif
1141}
1142
1143/*
1144 * Delete a list entry by making the prev/next entries
1145 * point to each other.
1146 *
1147 * This is only for internal list manipulation where we know
1148 * the prev/next entries already!
1149 */
1150static inline void __list_del(struct list_head *prev, struct list_head *next)
1151{
1152 next->prev = prev;
1153 prev->next = next;
1154}
1155
1161static inline void list_del(struct list_head *entry)
1162{
1163#if 0
1164 __list_del(entry->prev, entry->next);
1165 entry->next = (void *) 0;
1166 entry->prev = (void *) 0;
1167#endif
1168}
1169
1174static inline void list_del_init(struct list_head *entry)
1175{
1176#if 0
1177 __list_del(entry->prev, entry->next);
1179#endif
1180}
1181
1187static inline void list_move(struct list_head *list, struct list_head *head)
1188{
1189#if 0
1191 list_add(list, head);
1192#endif
1193}
1194
1200static inline void list_move_tail(struct list_head *list,
1201 struct list_head *head)
1202{
1203#if 0
1206#endif
1207}
1208
1213static inline int list_empty(struct list_head *head)
1214{
1215 return head->next == head;
1216}
1217
1218static inline void __list_splice(struct list_head *list,
1219 struct list_head *head)
1220{
1221#if 0
1222 struct list_head *first = list->next;
1223 struct list_head *last = list->prev;
1224 struct list_head *at = head->next;
1225
1226 first->prev = head;
1227 head->next = first;
1228
1229 last->next = at;
1230 at->prev = last;
1231#endif
1232}
1233
1239static inline void list_splice(struct list_head *list, struct list_head *head)
1240{
1241#if 0
1242 if (!list_empty(list))
1244#endif
1245}
1246
1254static inline void list_splice_init(struct list_head *list,
1255 struct list_head *head)
1256{
1257#if 0
1258 if (!list_empty(list)) {
1261 }
1262#endif
1263}
1264
1271#define list_entry(ptr, type, member)
1272#if 0
1273 ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
1274#endif
1275
1282#if 0
1283 for (pos = (head)->next, prefetch(pos->next); pos != (head); \
1284 pos = pos->next, prefetch(pos->next))
1285#endif
1286
1293#if 0
1294 for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
1295 pos = pos->prev, prefetch(pos->prev))
1296#endif
1297
1305#if 0
1306 for (pos = (head)->next, n = pos->next; pos != (head); \
1307 pos = n, n = pos->next)
1308#endif
1309
1317#if 0
1318 for (pos = list_entry((head)->next, typeof(*pos), member), \
1319 prefetch(pos->member.next); \
1320 &pos->member != (head); \
1321 pos = list_entry(pos->member.next, typeof(*pos), member), \
1322 prefetch(pos->member.next))
1323#endif
1324
1325#endif /* list */
1326
1327
1328
1329
1330
1331#if 1 /* wait */
1332
1333#define WNOHANG 0x00000001
1335
1336#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
1337#define __WALL 0x40000000 /* Wait on all children, regardless of type */
1338#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
1339
1340#if 0
1341#include <linux/kernel.h>
1342#include <linux/list.h>
1343#include <linux/stddef.h>
1344#include <linux/spinlock.h>
1345#include <linux/config.h>
1346
1347#include <asm/page.h>
1348#include <asm/processor.h>
1349#endif
1350
1351/*
1352 * Debug control. Slow but useful.
1353 */
1354#if defined(CONFIG_DEBUG_WAITQ)
1355#define WAITQUEUE_DEBUG 1
1356#else
1357#define WAITQUEUE_DEBUG 0
1358#endif
1359
1360struct __wait_queue {
1361 unsigned int flags;
1362#define WQ_FLAG_EXCLUSIVE 0x01
1364 struct list_head task_list;
1365#if WAITQUEUE_DEBUG
1366 long __magic;
1367 long __waker;
1368#endif
1369};
1371
1372/*
1373 * 'dual' spinlock architecture. Can be switched between spinlock_t and
1374 * rwlock_t locks via changing this define. Since waitqueues are quite
1375 * decoupled in the new architecture, lightweight 'simple' spinlocks give
1376 * us slightly better latencies and smaller waitqueue structure size.
1377 */
1378#define USE_RW_WAIT_QUEUE_SPINLOCK 0
1379
1380#if USE_RW_WAIT_QUEUE_SPINLOCK
1381# define wq_lock_t rwlock_t
1382# define WAITQUEUE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
1383
1384# define wq_read_lock read_lock
1385# define wq_read_lock_irqsave read_lock_irqsave
1386# define wq_read_unlock_irqrestore read_unlock_irqrestore
1387# define wq_read_unlock read_unlock
1388# define wq_write_lock_irq write_lock_irq
1389# define wq_write_lock_irqsave write_lock_irqsave
1390# define wq_write_unlock_irqrestore write_unlock_irqrestore
1391# define wq_write_unlock write_unlock
1392#else
1393# define wq_lock_t spinlock_t
1394# define WAITQUEUE_RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
1395
1396# define wq_read_lock spin_lock
1397# define wq_read_lock_irqsave spin_lock_irqsave
1398# define wq_read_unlock spin_unlock
1399# define wq_read_unlock_irqrestore spin_unlock_irqrestore
1400# define wq_write_lock_irq spin_lock_irq
1401# define wq_write_lock_irqsave spin_lock_irqsave
1402# define wq_write_unlock_irqrestore spin_unlock_irqrestore
1403# define wq_write_unlock spin_unlock
1404#endif
1405
1406struct __wait_queue_head {
1408 struct list_head task_list;
1409#if WAITQUEUE_DEBUG
1410 long __magic;
1411 long __creator;
1412#endif
1413};
1415
1416
1417/*
1418 * Debugging macros. We eschew `do { } while (0)' because gcc can generate
1419 * spurious .aligns.
1420 */
1421#if WAITQUEUE_DEBUG
1422#define WQ_BUG() BUG()
1423#define CHECK_MAGIC(x)
1424#if 0
1425 do { \
1426 if ((x) != (long)&(x)) { \
1427 printk("bad magic %lx (should be %lx), ", \
1428 (long)x, (long)&(x)); \
1429 WQ_BUG(); \
1430 } \
1431 } while (0)
1432#endif
1433
1434#define CHECK_MAGIC_WQHEAD(x)
1435#if 0
1436 do { \
1437 if ((x)->__magic != (long)&((x)->__magic)) { \
1438 printk("bad magic %lx (should be %lx, creator %lx), ", \
1439 (x)->__magic, (long)&((x)->__magic), (x)->__creator); \
1440 WQ_BUG(); \
1441 } \
1442 } while (0)
1443#endif
1444
1445#define WQ_CHECK_LIST_HEAD(list)
1446#if 0
1447 do { \
1448 if (!(list)->next || !(list)->prev) \
1449 WQ_BUG(); \
1450 } while(0)
1451#endif
1452
1453#define WQ_NOTE_WAKER(tsk)
1454#if 0
1455 do { \
1456 (tsk)->__waker = (long)__builtin_return_address(0); \
1457 } while (0)
1458#endif
1459#else
1460#define WQ_BUG()
1461#define CHECK_MAGIC(x)
1462#define CHECK_MAGIC_WQHEAD(x)
1463#define WQ_CHECK_LIST_HEAD(list)
1464#define WQ_NOTE_WAKER(tsk)
1465#endif
1466
1467/*
1468 * Macros for declaration and initialisation of the datatypes
1469 */
1470
1471#if WAITQUEUE_DEBUG
1472# define __WAITQUEUE_DEBUG_INIT(name) //(long)&(name).__magic, 0
1473# define __WAITQUEUE_HEAD_DEBUG_INIT(name) //(long)&(name).__magic, (long)&(name).__magic
1474#else
1475# define __WAITQUEUE_DEBUG_INIT(name)
1476# define __WAITQUEUE_HEAD_DEBUG_INIT(name)
1477#endif
1478
1479#define __WAITQUEUE_INITIALIZER(name, tsk)
1480#if 0
1481{
1482 task: tsk, \
1483 task_list: { NULL, NULL }, \
1484 __WAITQUEUE_DEBUG_INIT(name)}
1485#endif
1486
1487#define DECLARE_WAITQUEUE(name, tsk)
1488#if 0
1490#endif
1491
1492#define __WAIT_QUEUE_HEAD_INITIALIZER(name)
1493#if 0
1494{
1496 task_list: { &(name).task_list, &(name).task_list }, \
1497 __WAITQUEUE_HEAD_DEBUG_INIT(name)}
1498#endif
1499
1500#define DECLARE_WAIT_QUEUE_HEAD(name)
1501#if 0
1503#endif
1504
1506{
1507#if 0
1508#if WAITQUEUE_DEBUG
1509 if (!q)
1510 WQ_BUG();
1511#endif
1513 INIT_LIST_HEAD(&q->task_list);
1514#if WAITQUEUE_DEBUG
1515 q->__magic = (long)&q->__magic;
1516 q->__creator = (long)current_text_addr();
1517#endif
1518#endif
1519}
1520
1521static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
1522{
1523#if 0
1524#if WAITQUEUE_DEBUG
1525 if (!q || !p)
1526 WQ_BUG();
1527#endif
1528 q->flags = 0;
1529 q->task = p;
1530#if WAITQUEUE_DEBUG
1531 q->__magic = (long)&q->__magic;
1532#endif
1533#endif
1534}
1535
1537{
1538#if 0
1539#if WAITQUEUE_DEBUG
1540 if (!q)
1541 WQ_BUG();
1543#endif
1544
1545 return !list_empty(&q->task_list);
1546#endif
1547}
1548
1550{
1551#if 0
1552#if WAITQUEUE_DEBUG
1553 if (!head || !new)
1554 WQ_BUG();
1556 CHECK_MAGIC(new->__magic);
1557 if (!head->task_list.next || !head->task_list.prev)
1558 WQ_BUG();
1559#endif
1560 list_add(&new->task_list, &head->task_list);
1561#endif
1562}
1563
1564/*
1565 * Used for wake-one threads:
1566 */
1568 wait_queue_t *new)
1569{
1570#if 0
1571#if WAITQUEUE_DEBUG
1572 if (!head || !new)
1573 WQ_BUG();
1575 CHECK_MAGIC(new->__magic);
1576 if (!head->task_list.next || !head->task_list.prev)
1577 WQ_BUG();
1578#endif
1579 list_add_tail(&new->task_list, &head->task_list);
1580#endif
1581}
1582
1584 wait_queue_t *old)
1585{
1586#if 0
1587#if WAITQUEUE_DEBUG
1588 if (!old)
1589 WQ_BUG();
1590 CHECK_MAGIC(old->__magic);
1591#endif
1592 list_del(&old->task_list);
1593#endif
1594}
1595
1596
1597
1598
1599#endif /* wait */
1600
1601
1602#endif
1603
1604
1605
1606
1607#if 1 /* slab */
1608
1609typedef struct
1610{
1611 int x;
1612} kmem_cache_s;
1613
1615
1616#if 0
1617#include <linux/mm.h>
1618#include <linux/cache.h>
1619#endif
1620
1621/* flags for kmem_cache_alloc() */
1622#define SLAB_NOFS GFP_NOFS
1623#define SLAB_NOIO GFP_NOIO
1624#define SLAB_NOHIGHIO GFP_NOHIGHIO
1625#define SLAB_ATOMIC GFP_ATOMIC
1626#define SLAB_USER GFP_USER
1627#define SLAB_KERNEL GFP_KERNEL
1628#define SLAB_NFS GFP_NFS
1629#define SLAB_DMA GFP_DMA
1630
1631#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS)
1632#define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
1633
1634/* flags to pass to kmem_cache_create().
1635 * The first 3 are only valid when the allocator as been build
1636 * SLAB_DEBUG_SUPPORT.
1637 */
1638#define SLAB_DEBUG_FREE 0x00000100UL /* Perform (expensive) checks on free */
1639#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
1640#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
1641#define SLAB_POISON 0x00000800UL /* Poison objects */
1642#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
1643#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
1644#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
1645#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
1646
1647/* flags passed to a constructor func */
1648#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
1649#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */
1650#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */
1651
1652/* prototypes */
1653extern void kmem_cache_init(void);
1654extern void kmem_cache_sizes_init(void);
1655
1656extern kmem_cache_t *kmem_find_general_cachep(size_t, int gfpflags);
1657extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
1658 void (*)(void *, kmem_cache_t *, unsigned long),
1659 void (*)(void *, kmem_cache_t *, unsigned long));
1660extern int kmem_cache_destroy(kmem_cache_t *);
1662extern void *kmem_cache_alloc(kmem_cache_t *, int);
1663extern void kmem_cache_free(kmem_cache_t *, void *);
1664extern unsigned int kmem_cache_size(kmem_cache_t *);
1665
1666extern void *kmalloc(size_t, int);
1667extern void kfree(const void *);
1668
1669//extern int FASTCALL(kmem_cache_reap(int));
1670
1671/* System wide caches */
1673extern kmem_cache_t *mm_cachep;
1678extern kmem_cache_t *bh_cachep;
1679extern kmem_cache_t *fs_cachep;
1681
1682#endif /* slab */
1683
1684
1685
1686/*
1687 * Berkeley style UIO structures - Alan Cox 1994.
1688 *
1689 * This program is free software; you can redistribute it and/or
1690 * modify it under the terms of the GNU General Public License
1691 * as published by the Free Software Foundation; either version
1692 * 2 of the License, or (at your option) any later version.
1693 */
1694
1695
1696/* A word of warning: Our uio structure will clash with the C library one (which is now obsolete). Remove the C
1697 library one from sys/uio.h if you have a very old library set */
1698
1699struct iovec
1700{
1701 void *iov_base; /* BSD uses caddr_t (1003.1g requires void *) */
1702 __kernel_size_t iov_len; /* Must be size_t (1003.1g) */
1703};
1704
1705/*
1706 * UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
1707 */
1708
1709#define UIO_FASTIOV 8
1710#define UIO_MAXIOV 1024
1711#if 0
1712#define UIO_MAXIOV 16 /* Maximum iovec's in one operation
1713 16 matches BSD */
1714 /* Beg pardon: BSD has 1024 --ANK */
1715#endif
1716
1717
1718
1719/*
1720 * In Linux 2.4, static timers have been removed from the kernel.
1721 * Timers may be dynamically created and destroyed, and should be initialized
1722 * by a call to init_timer() upon creation.
1723 *
1724 * The "data" field enables use of a common timeout function for several
1725 * timeouts. You can use this field to distinguish between the different
1726 * invocations.
1727 */
1728struct timer_list {
1730 unsigned long expires;
1731 unsigned long data;
1732 void (*function)(unsigned long);
1733};
1734
1735
1736
1737struct timeval {
1738 unsigned long tv_sec;
1739 unsigned long tv_usec;
1740// time_t tv_sec; /* seconds */
1741// suseconds_t tv_usec; /* microseconds */
1742};
1743
1744
1745
1746
1747
1748
1749
1750#if 1 /* poll */
1751
1752struct file;
1753
1754struct poll_table_page;
1755
1756typedef struct poll_table_struct {
1758 struct poll_table_page * table;
1760
1761extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p);
1762
1763static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
1764{
1765 if (p && wait_address)
1766 __pollwait(filp, wait_address, p);
1767}
1768
1769static inline void poll_initwait(poll_table* pt)
1770{
1771 pt->error = 0;
1772 pt->table = NULL;
1773}
1775
1776
1777/*
1778 * Scalable version of the fd_set.
1779 */
1780
1781typedef struct {
1782 unsigned long *in, *out, *ex;
1783 unsigned long *res_in, *res_out, *res_ex;
1784} fd_set_bits;
1785
1786/*
1787 * How many longwords for "nr" bits?
1788 */
1789#define FDS_BITPERLONG (8*sizeof(long))
1790#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
1791#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
1792
1793/*
1794 * We do a VERIFY_WRITE here even though we are only reading this time:
1795 * we'll write to it eventually..
1796 *
1797 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
1798 */
1799static inline
1800int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
1801{
1802#if 0
1803 nr = FDS_BYTES(nr);
1804 if (ufdset) {
1805 int error;
1806 error = verify_area(VERIFY_WRITE, ufdset, nr);
1807 if (!error && __copy_from_user(fdset, ufdset, nr))
1808 error = -EFAULT;
1809 return error;
1810 }
1811 memset(fdset, 0, nr);
1812 return 0;
1813#else
1814 return 0;
1815#endif
1816}
1817
1818static inline
1819void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
1820{
1821#if 0
1822 if (ufdset)
1823 __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
1824#endif
1825}
1826
1827static inline
1828void zero_fd_set(unsigned long nr, unsigned long *fdset)
1829{
1830#if 0
1831 memset(fdset, 0, FDS_BYTES(nr));
1832#endif
1833}
1834
1835extern int do_select(int n, fd_set_bits *fds, long *timeout);
1836
1837#endif /* poll */
1838
1839
1840
1841typedef struct
1842{
1843 int x;
1845
1846
1847
1848
1849
1850#if 1 /* poll */
1851
1852/* These are specified by iBCS2 */
1853#define POLLIN 0x0001
1854#define POLLPRI 0x0002
1855#define POLLOUT 0x0004
1856#define POLLERR 0x0008
1857#define POLLHUP 0x0010
1858#define POLLNVAL 0x0020
1859
1860/* The rest seem to be more-or-less nonstandard. Check them! */
1861#define POLLRDNORM 0x0040
1862#define POLLRDBAND 0x0080
1863#define POLLWRNORM 0x0100
1864#define POLLWRBAND 0x0200
1865#define POLLMSG 0x0400
1866
1867struct pollfd {
1868 int fd;
1869 short events;
1870 short revents;
1871};
1872
1873#endif /* poll */
_STLP_INLINE_LOOP _InputIter const _Tp & __val
Definition: _algobase.h:656
#define EFAULT
Definition: acclib.h:86
struct outqueuenode * head
Definition: adnsresfilter.c:66
u16 __u16
Definition: btrfs.h:18
u8 __u8
Definition: btrfs.h:17
ULONG32 u32
Definition: btrfs.h:14
ULONG64 u64
Definition: btrfs.h:15
u32 __u32
Definition: btrfs.h:19
u64 __u64
Definition: btrfs.h:20
r l[0]
Definition: byte_order.h:168
Definition: list.h:37
struct list * next
Definition: list.h:38
struct list * prev
Definition: list.h:39
UINT64 u_int64_t
Definition: types.h:76
unsigned int(__cdecl typeof(jpeg_read_scanlines))(struct jpeg_decompress_struct *
Definition: typeof.h:31
#define pt(x, y)
Definition: drawing.c:79
#define prefetch(a)
Definition: list.h:14
signed __int16 __s16
Definition: types.h:20
signed __int32 __s32
Definition: types.h:23
signed __int8 __s8
Definition: types.h:18
unsigned __int64 loff_t
Definition: types.h:84
signed __int64 __s64
Definition: types.h:26
__kernel_size_t size_t
Definition: linux.h:237
static void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
Definition: linux.h:1549
static void list_add(struct list_head *new, struct list_head *head)
Definition: linux.h:1121
#define LOCK
Definition: linux.h:856
__s8 int8_t
Definition: linux.h:281
static __inline__ void atomic_dec(atomic_t *v)
Definition: linux.h:970
int __kernel_ptrdiff_t
Definition: linux.h:134
kmem_cache_t * kmem_cache_create(const char *, size_t, size_t, unsigned long, void(*)(void *, kmem_cache_t *, unsigned long), void(*)(void *, kmem_cache_t *, unsigned long))
#define NULL
Definition: linux.h:6
static __inline__ int atomic_inc_and_test(atomic_t *v)
Definition: linux.h:1013
#define __arch__swab64s(x)
Definition: linux.h:458
__u32 u_int32_t
Definition: linux.h:284
void kmem_cache_sizes_init(void)
signed int s32
Definition: linux.h:57
kmem_cache_t * names_cachep
static int list_empty(struct list_head *head)
Definition: linux.h:1213
#define INIT_LIST_HEAD(ptr)
Definition: linux.h:1091
unsigned int u32
Definition: linux.h:58
static void init_waitqueue_head(wait_queue_head_t *q)
Definition: linux.h:1505
signed char s8
Definition: linux.h:51
#define __arch__swab16p(x)
Definition: linux.h:436
__s32 int32_t
Definition: linux.h:285
#define __swab32(x)
Definition: linux.h:485
static __inline__ void __swab32s(__u32 *addr)
Definition: linux.h:524
#define list_for_each_prev(pos, head)
Definition: linux.h:1292
#define __arch__swab64(x)
Definition: linux.h:432
static void list_move_tail(struct list_head *list, struct list_head *head)
Definition: linux.h:1200
#define CHECK_MAGIC_WQHEAD(x)
Definition: linux.h:1462
static __inline__ __u32 __swab32p(__u32 *x)
Definition: linux.h:520
__u8 u_int8_t
Definition: linux.h:280
void kmem_cache_init(void)
__u8 uint8_t
Definition: linux.h:289
static void __list_del(struct list_head *prev, struct list_head *next)
Definition: linux.h:1150
static void __list_splice(struct list_head *list, struct list_head *head)
Definition: linux.h:1218
#define list_entry(ptr, type, member)
Definition: linux.h:1271
unsigned short __kernel_old_gid_t
Definition: linux.h:146
#define __arch__swab16(x)
Definition: linux.h:423
static int waitqueue_active(wait_queue_head_t *q)
Definition: linux.h:1536
__signed__ int __s32
Definition: linux.h:40
char * __kernel_caddr_t
Definition: linux.h:139
static void list_splice(struct list_head *list, struct list_head *head)
Definition: linux.h:1239
__kernel_caddr_t caddr_t
Definition: linux.h:262
static __inline__ __const__ __u16 __fswab16(__u16 x)
Definition: linux.h:490
kmem_cache_t * kmem_find_general_cachep(size_t, int gfpflags)
static void poll_initwait(poll_table *pt)
Definition: linux.h:1769
int __kernel_ssize_t
Definition: linux.h:133
kmem_cache_t * filp_cachep
static __inline__ __const__ __u32 __fswab32(__u32 x)
Definition: linux.h:516
unsigned int __kernel_size_t
Definition: linux.h:132
int __kernel_key_t
Definition: linux.h:114
#define __arch__swab24(x)
Definition: linux.h:426
unsigned short __kernel_nlink_t
Definition: linux.h:126
unsigned int __kernel_uid32_t
Definition: linux.h:142
void poll_freewait(poll_table *pt)
signed long long s64
Definition: linux.h:60
#define CHECK_MAGIC(x)
Definition: linux.h:1461
static int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
Definition: linux.h:1800
unsigned int __u32
Definition: linux.h:41
__kernel_uid_t uid_t
Definition: linux.h:223
__kernel_dev_t dev_t
Definition: linux.h:197
long __kernel_off_t
Definition: linux.h:127
#define __arch__swab24p(x)
Definition: linux.h:439
static void list_add_tail(struct list_head *new, struct list_head *head)
Definition: linux.h:1136
static __inline__ int atomic_add_negative(int i, atomic_t *v)
Definition: linux.h:1038
unsigned long __kernel_ino_t
Definition: linux.h:124
signed short s16
Definition: linux.h:54
#define FDS_BYTES(nr)
Definition: linux.h:1791
unsigned short __kernel_gid16_t
Definition: linux.h:141
__kernel_ssize_t ssize_t
Definition: linux.h:242
void __pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
#define __WNOTHREAD
Definition: linux.h:1336
__kernel_key_t key_t
Definition: linux.h:204
void kmem_cache_free(kmem_cache_t *, void *)
Definition: linux.c:103
kmem_cache_t * dquot_cachep
int do_select(int n, fd_set_bits *fds, long *timeout)
#define WQ_BUG()
Definition: linux.h:1460
unsigned long ulong
Definition: linux.h:275
unsigned short __kernel_gid_t
Definition: linux.h:131
__kernel_nlink_t nlink_t
Definition: linux.h:200
__s16 int16_t
Definition: linux.h:283
u64 dma64_addr_t
Definition: linux.h:72
static __inline__ void atomic_add(int i, atomic_t *v)
Definition: linux.h:895
#define __WCLONE
Definition: linux.h:1338
static __inline__ __const__ __u32 __fswab24(__u32 x)
Definition: linux.h:503
__kernel_fd_set fd_set
Definition: linux.h:196
struct page mem_map_t
#define __WALL
Definition: linux.h:1337
static __inline__ void __swab24s(__u32 *addr)
Definition: linux.h:511
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
Definition: linux.h:933
static __inline__ void atomic_sub(int i, atomic_t *v)
Definition: linux.h:913
kmem_cache_t * mm_cachep
static void zero_fd_set(unsigned long nr, unsigned long *fdset)
Definition: linux.h:1828
__kernel_time_t time_t
Definition: linux.h:252
static void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
Definition: linux.h:1101
#define WAITQUEUE_RW_LOCK_UNLOCKED
Definition: linux.h:1394
#define __arch__swab32p(x)
Definition: linux.h:442
unsigned long u_long
Definition: linux.h:269
static __inline__ void atomic_inc(atomic_t *v)
Definition: linux.h:953
__kernel_gid_t gid_t
Definition: linux.h:224
unsigned int uint
Definition: linux.h:274
int kmem_cache_destroy(kmem_cache_t *)
Definition: linux.c:82
static void list_splice_init(struct list_head *list, struct list_head *head)
Definition: linux.h:1254
unsigned short __kernel_dev_t
Definition: linux.h:123
static __inline__ int atomic_dec_and_test(atomic_t *v)
Definition: linux.h:989
void(* __kernel_sighandler_t)(int)
Definition: linux.h:111
#define list_for_each_entry(pos, head, member)
Definition: linux.h:1316
unsigned short umode_t
Definition: linux.h:27
kmem_cache_t * bh_cachep
unsigned int __kernel_gid32_t
Definition: linux.h:143
#define list_for_each_safe(pos, n, head)
Definition: linux.h:1304
unsigned short u16
Definition: linux.h:55
__signed__ char __s8
Definition: linux.h:34
void * kmem_cache_alloc(kmem_cache_t *, int)
Definition: linux.c:92
long __kernel_clock_t
Definition: linux.h:137
__kernel_clock_t clock_t
Definition: linux.h:257
unsigned short u_short
Definition: linux.h:267
kmem_cache_t * fs_cachep
static void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
Definition: linux.h:1819
unsigned short __kernel_old_uid_t
Definition: linux.h:145
static void list_del(struct list_head *entry)
Definition: linux.h:1161
static __inline__ void __swab16s(__u16 *addr)
Definition: linux.h:498
unsigned char __u8
Definition: linux.h:35
unsigned short __kernel_mode_t
Definition: linux.h:125
kmem_cache_t * vm_area_cachep
int __kernel_daddr_t
Definition: linux.h:138
#define list_for_each(pos, head)
Definition: linux.h:1281
unsigned short ushort
Definition: linux.h:273
__kernel_daddr_t daddr_t
Definition: linux.h:203
long __kernel_suseconds_t
Definition: linux.h:136
#define __arch__swab32s(x)
Definition: linux.h:455
kmem_cache_t * files_cachep
static void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
Definition: linux.h:1521
u32 dma_addr_t
Definition: linux.h:70
static void __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
Definition: linux.h:1583
unsigned int u_int
Definition: linux.h:268
__kernel_mode_t mode_t
Definition: linux.h:199
__u16 uint16_t
Definition: linux.h:290
unsigned int kmem_cache_size(kmem_cache_t *)
unsigned short __kernel_ipc_pid_t
Definition: linux.h:129
__u32 uint32_t
Definition: linux.h:291
unsigned short __u16
Definition: linux.h:38
int kmem_cache_shrink(kmem_cache_t *)
#define __arch__swab32(x)
Definition: linux.h:429
int __kernel_pid_t
Definition: linux.h:128
__kernel_ino_t ino_t
Definition: linux.h:198
__kernel_off_t off_t
Definition: linux.h:201
__signed__ short __s16
Definition: linux.h:37
__kernel_ptrdiff_t ptrdiff_t
Definition: linux.h:247
#define __arch__swab16s(x)
Definition: linux.h:449
unsigned long long u64
Definition: linux.h:61
#define wq_lock_t
Definition: linux.h:1393
#define __WAIT_QUEUE_HEAD_INITIALIZER(name)
Definition: linux.h:1492
static void poll_wait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
Definition: linux.h:1763
unsigned short __kernel_uid_t
Definition: linux.h:130
static void list_del_init(struct list_head *entry)
Definition: linux.h:1174
unsigned char u_char
Definition: linux.h:266
unsigned char unchar
Definition: linux.h:272
long __kernel_time_t
Definition: linux.h:135
static __inline__ __u32 __swab24p(__u32 *x)
Definition: linux.h:507
unsigned short __kernel_uid16_t
Definition: linux.h:140
kmem_cache_t * sigact_cachep
static void __add_wait_queue_tail(wait_queue_head_t *head, wait_queue_t *new)
Definition: linux.h:1567
#define __FDSET_LONGS
Definition: linux.h:98
#define WUNTRACED
Definition: linux.h:1334
unsigned char u8
Definition: linux.h:52
static void list_move(struct list_head *list, struct list_head *head)
Definition: linux.h:1187
#define __arch__swab24s(x)
Definition: linux.h:452
__u16 u_int16_t
Definition: linux.h:282
static __inline__ __u16 __swab16p(__u16 *x)
Definition: linux.h:494
#define __WAITQUEUE_INITIALIZER(name, tsk)
Definition: linux.h:1479
#define __arch__swab64p(x)
Definition: linux.h:445
__kernel_pid_t pid_t
Definition: linux.h:202
struct poll_table_struct poll_table
#define WNOHANG
Definition: linux.h:1333
__kernel_suseconds_t suseconds_t
Definition: linux.h:205
GLint GLint GLint GLint GLint x
Definition: gl.h:1548
const GLdouble * v
Definition: gl.h:2040
GLuint GLuint GLsizei GLenum type
Definition: gl.h:1545
GLdouble GLdouble GLdouble GLdouble q
Definition: gl.h:2063
GLdouble n
Definition: glext.h:7729
const GLubyte * c
Definition: glext.h:8905
GLenum GLint GLuint mask
Definition: glext.h:6028
GLuint in
Definition: glext.h:9616
const GLint * first
Definition: glext.h:5794
GLenum const GLvoid * addr
Definition: glext.h:9621
GLuint GLfloat * val
Definition: glext.h:7180
GLfloat GLfloat p
Definition: glext.h:8902
GLfloat GLfloat GLfloat GLfloat h
Definition: glext.h:7723
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
uint32_t entry
Definition: isohybrid.c:63
#define c
Definition: ke_i.h:80
#define error(str)
Definition: mkdosfs.c:1605
#define kmalloc(size, gfp)
Definition: module.h:1125
#define htons(x)
Definition: module.h:215
#define kfree(p)
Definition: module.h:1126
#define ntohl(x)
Definition: module.h:205
#define ntohs(x)
Definition: module.h:210
#define htonl(x)
Definition: module.h:214
static PVOID ptr
Definition: dispmode.c:27
ULONG nr
Definition: thread.c:7
static UINT UINT last
Definition: font.c:45
#define uint64_t
Definition: nsiface.idl:62
#define int64_t
Definition: nsiface.idl:57
__asm__(".p2align 4, 0x90\n" ".seh_proc __seh2_global_filter_func\n" "__seh2_global_filter_func:\n" "\tpush %rbp\n" "\t.seh_pushreg %rbp\n" "\tsub $32, %rsp\n" "\t.seh_stackalloc 32\n" "\t.seh_endprologue\n" "\tmov %rdx, %rbp\n" "\tjmp *%rax\n" "__seh2_global_filter_func_exit:\n" "\t.p2align 4\n" "\tadd $32, %rsp\n" "\tpop %rbp\n" "\tret\n" "\t.seh_endproc")
#define long
Definition: qsort.c:33
static unsigned __int64 next
Definition: rand_nt.c:6
static FILE * out
Definition: regtests2xml.c:44
#define __u64
Definition: types.h:17
#define memset(x, y, z)
Definition: compat.h:39
wq_lock_t lock
Definition: linux.h:1407
struct list_head task_list
Definition: module.h:500
unsigned int flags
Definition: module.h:476
struct list_head task_list
Definition: module.h:479
struct task_struct * task
Definition: linux.h:1363
volatile int counter
Definition: linux.h:864
unsigned long * res_ex
Definition: linux.h:1783
unsigned long * ex
Definition: linux.h:1782
Definition: fci.c:127
Definition: linux.h:1700
void * iov_base
Definition: linux.h:1701
__kernel_size_t iov_len
Definition: linux.h:1702
Definition: list.h:15
struct list_head * next
Definition: list.h:16
struct list_head * prev
Definition: list.h:16
Definition: name.c:39
Definition: module.h:576
int x
Definition: linux.h:10
struct poll_table_page * table
Definition: linux.h:1758
Definition: linux.h:1867
int fd
Definition: linux.h:1868
short revents
Definition: linux.h:1870
short events
Definition: linux.h:1869
volatile unsigned int lock
Definition: linux.h:833
volatile unsigned int lock
Definition: linux.h:837
Definition: dhcpd.h:245
void(* function)(unsigned long)
Definition: module.h:533
unsigned long data
Definition: module.h:534
unsigned long expires
Definition: module.h:531
unsigned long tv_sec
Definition: linux.h:1738
unsigned long tv_usec
Definition: linux.h:1739
Definition: linux.h:306
__kernel_daddr_t f_tfree
Definition: linux.h:307
char f_fpack[6]
Definition: linux.h:310
__kernel_ino_t f_tinode
Definition: linux.h:308
char f_fname[6]
Definition: linux.h:309
static WCHAR ** task_list
Definition: taskkill.c:35
rwlock_t lock
Definition: tcpcore.h:0
#define new(TYPE, numElems)
Definition: treelist.c:54
#define typeof(X_)
Definition: msvc.h:2