34typedef __signed__
char __s8;
43#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
44typedef __signed__
long long __s64;
45typedef unsigned long long __u64;
51typedef signed char s8;
52typedef unsigned char u8;
54typedef signed short s16;
55typedef unsigned short u16;
57typedef signed int s32;
58typedef unsigned int u32;
60typedef signed long long s64;
61typedef unsigned long long u64;
63#define BITS_PER_LONG 32
67#ifdef CONFIG_HIGHMEM64G
92#define __NFDBITS (8 * sizeof(unsigned long))
95#define __FD_SETSIZE 1024
98#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
101#define __FDELT(d) ((d) / __NFDBITS)
104#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
149typedef long long __kernel_loff_t;
153#if defined(__KERNEL__) || defined(__USE_ALL)
160#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
163#define __FD_SET(fd,fdsetp) \
164 __asm__ __volatile__("btsl %1,%0": \
165 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
168#define __FD_CLR(fd,fdsetp) \
169 __asm__ __volatile__("btrl %1,%0": \
170 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
173#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
174 unsigned char __result; \
175 __asm__ __volatile__("btl %1,%2 ; setb %0" \
176 :"=q" (__result) :"r" ((int) (fd)), \
177 "m" (*(__kernel_fd_set *) (fdsetp))); \
181#define __FD_ZERO(fdsetp) \
184 __asm__ __volatile__("cld ; rep ; stosl" \
185 :"=m" (*(__kernel_fd_set *) (fdsetp)), \
186 "=&c" (__d0), "=&D" (__d1) \
187 :"a" (0), "1" (__FDSET_LONGS), \
188 "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
194#ifndef __KERNEL_STRICT_NAMES
228typedef __kernel_loff_t
loff_t;
277#ifndef __BIT_TYPES_DEFINED__
278#define __BIT_TYPES_DEFINED__
293#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
326#ifndef __LITTLE_ENDIAN
327#define __LITTLE_ENDIAN 1234
329#ifndef __LITTLE_ENDIAN_BITFIELD
330#define __LITTLE_ENDIAN_BITFIELD
352#define ___swab16(x) \
356 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
357 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
360#define ___swab24(x) \
364 ((__x & (__u32)0x000000ffUL) << 16) | \
365 (__x & (__u32)0x0000ff00UL) | \
366 ((__x & (__u32)0x00ff0000UL) >> 16) )); \
369#define ___swab32(x) \
373 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
374 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
375 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
376 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
379#define ___swab64(x) \
383 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
384 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
385 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
386 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
387 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
388 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
389 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
390 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
393#define ___constant_swab16(x) \
395 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
396 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
397#define ___constant_swab24(x) \
399 (((__u32)(x) & (__u32)0x000000ffU) << 16) | \
400 (((__u32)(x) & (__u32)0x0000ff00U) | \
401 (((__u32)(x) & (__u32)0x00ff0000U) >> 16) ))
402#define ___constant_swab32(x) \
404 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
405 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
406 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
407 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
408#define ___constant_swab64(x) \
410 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
411 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
412 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
413 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
414 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
415 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
416 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
417 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
422#ifndef __arch__swab16
423# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
425#ifndef __arch__swab24
426# define __arch__swab24(x) ({ __u32 __tmp = (x) ; ___swab24(__tmp); })
428#ifndef __arch__swab32
429# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
431#ifndef __arch__swab64
432# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
435#ifndef __arch__swab16p
436# define __arch__swab16p(x) __arch__swab16(*(x))
438#ifndef __arch__swab24p
439# define __arch__swab24p(x) __arch__swab24(*(x))
441#ifndef __arch__swab32p
442# define __arch__swab32p(x) __arch__swab32(*(x))
444#ifndef __arch__swab64p
445# define __arch__swab64p(x) __arch__swab64(*(x))
448#ifndef __arch__swab16s
449# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
451#ifndef __arch__swab24s
452# define __arch__swab24s(x) do { *(x) = __arch__swab24p((x)); } while (0)
454#ifndef __arch__swab32s
455# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
457#ifndef __arch__swab64s
458# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
465#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
466# define __swab16(x) \
467(__builtin_constant_p((__u16)(x)) ? \
470# define __swab24(x) \
471(__builtin_constant_p((__u32)(x)) ? \
474# define __swab32(x) \
475(__builtin_constant_p((__u32)(x)) ? \
478# define __swab64(x) \
479(__builtin_constant_p((__u64)(x)) ? \
483# define __swab16(x) __fswab16(x)
484# define __swab24(x) __fswab24(x)
485# define __swab32(x) __fswab32(x)
486# define __swab64(x) __fswab64(x)
529#ifdef __BYTEORDER_HAS_U64__
530static __inline__ __const__
__u64 __fswab64(
__u64 x)
532# ifdef __SWAB_64_THRU_32__
544static __inline__
void __swab64s(
__u64 *
addr)
550#if defined(__KERNEL__)
551#define swab16 __swab16
552#define swab24 __swab24
553#define swab32 __swab32
554#define swab64 __swab64
555#define swab16p __swab16p
556#define swab24p __swab24p
557#define swab32p __swab32p
558#define swab64p __swab64p
559#define swab16s __swab16s
560#define swab24s __swab24s
561#define swab32s __swab32s
562#define swab64s __swab64s
649#if defined(__KERNEL__)
654#define cpu_to_le64 __cpu_to_le64
655#define le64_to_cpu __le64_to_cpu
656#define cpu_to_le32 __cpu_to_le32
657#define le32_to_cpu __le32_to_cpu
658#define cpu_to_le16 __cpu_to_le16
659#define le16_to_cpu __le16_to_cpu
660#define cpu_to_be64 __cpu_to_be64
661#define be64_to_cpu __be64_to_cpu
662#define cpu_to_be32 __cpu_to_be32
663#define be32_to_cpu __be32_to_cpu
664#define cpu_to_be16 __cpu_to_be16
665#define be16_to_cpu __be16_to_cpu
666#define cpu_to_le64p __cpu_to_le64p
667#define le64_to_cpup __le64_to_cpup
668#define cpu_to_le32p __cpu_to_le32p
669#define le32_to_cpup __le32_to_cpup
670#define cpu_to_le16p __cpu_to_le16p
671#define le16_to_cpup __le16_to_cpup
672#define cpu_to_be64p __cpu_to_be64p
673#define be64_to_cpup __be64_to_cpup
674#define cpu_to_be32p __cpu_to_be32p
675#define be32_to_cpup __be32_to_cpup
676#define cpu_to_be16p __cpu_to_be16p
677#define be16_to_cpup __be16_to_cpup
678#define cpu_to_le64s __cpu_to_le64s
679#define le64_to_cpus __le64_to_cpus
680#define cpu_to_le32s __cpu_to_le32s
681#define le32_to_cpus __le32_to_cpus
682#define cpu_to_le16s __cpu_to_le16s
683#define le16_to_cpus __le16_to_cpus
684#define cpu_to_be64s __cpu_to_be64s
685#define be64_to_cpus __be64_to_cpus
686#define cpu_to_be32s __cpu_to_be32s
687#define be32_to_cpus __be32_to_cpus
688#define cpu_to_be16s __cpu_to_be16s
689#define be16_to_cpus __be16_to_cpus
716#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
720extern unsigned long int ntohl(
unsigned long int);
721extern unsigned long int htonl(
unsigned long int);
723extern unsigned short int ntohs(
unsigned short int);
724extern unsigned short int htons(
unsigned short int);
727#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) && !defined(__STRICT_ANSI__)
729#define ___htonl(x) __cpu_to_be32(x)
730#define ___htons(x) __cpu_to_be16(x)
731#define ___ntohl(x) __be32_to_cpu(x)
732#define ___ntohs(x) __be16_to_cpu(x)
734#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
735#define htonl(x) ___htonl(x)
736#define ntohl(x) ___ntohl(x)
738#define htonl(x) ((unsigned long)___htonl(x))
739#define ntohl(x) ((unsigned long)___ntohl(x))
741#define htons(x) ___htons(x)
742#define ntohs(x) ___ntohs(x)
749#define __constant_htonl(x) ___constant_swab32((x))
750#define __constant_ntohl(x) ___constant_swab32((x))
751#define __constant_htons(x) ___constant_swab16((x))
752#define __constant_ntohs(x) ___constant_swab16((x))
753#define __constant_cpu_to_le64(x) ((__u64)(x))
754#define __constant_le64_to_cpu(x) ((__u64)(x))
755#define __constant_cpu_to_le32(x) ((__u32)(x))
756#define __constant_le32_to_cpu(x) ((__u32)(x))
757#define __constant_cpu_to_le24(x) ((__u32)(x))
758#define __constant_le24_to_cpu(x) ((__u32)(x))
759#define __constant_cpu_to_le16(x) ((__u16)(x))
760#define __constant_le16_to_cpu(x) ((__u16)(x))
761#define __constant_cpu_to_be64(x) ___constant_swab64((x))
762#define __constant_be64_to_cpu(x) ___constant_swab64((x))
763#define __constant_cpu_to_be32(x) ___constant_swab32((x))
764#define __constant_be32_to_cpu(x) ___constant_swab32((x))
765#define __constant_cpu_to_be24(x) ___constant_swab24((x))
766#define __constant_be24_to_cpu(x) ___constant_swab24((x))
767#define __constant_cpu_to_be16(x) ___constant_swab16((x))
768#define __constant_be16_to_cpu(x) ___constant_swab16((x))
769#define __cpu_to_le64(x) ((__u64)(x))
770#define __le64_to_cpu(x) ((__u64)(x))
771#define __cpu_to_le32(x) ((__u32)(x))
772#define __le32_to_cpu(x) ((__u32)(x))
773#define __cpu_to_le24(x) ((__u32)(x))
774#define __le24_to_cpu(x) ((__u32)(x))
775#define __cpu_to_le16(x) ((__u16)(x))
776#define __le16_to_cpu(x) ((__u16)(x))
777#define __cpu_to_be64(x) __swab64((x))
778#define __be64_to_cpu(x) __swab64((x))
779#define __cpu_to_be32(x) __swab32((x))
780#define __be32_to_cpu(x) __swab32((x))
781#define __cpu_to_be24(x) __swab24((x))
782#define __be24_to_cpu(x) __swab24((x))
783#define __cpu_to_be16(x) __swab16((x))
784#define __be16_to_cpu(x) __swab16((x))
785#define __cpu_to_le64p(x) (*(__u64*)(x))
786#define __le64_to_cpup(x) (*(__u64*)(x))
787#define __cpu_to_le32p(x) (*(__u32*)(x))
788#define __le32_to_cpup(x) (*(__u32*)(x))
789#define __cpu_to_le24p(x) (*(__u32*)(x))
790#define __le24_to_cpup(x) (*(__u32*)(x))
791#define __cpu_to_le16p(x) (*(__u16*)(x))
792#define __le16_to_cpup(x) (*(__u16*)(x))
793#define __cpu_to_be64p(x) __swab64p((x))
794#define __be64_to_cpup(x) __swab64p((x))
795#define __cpu_to_be32p(x) __swab32p((x))
796#define __be32_to_cpup(x) __swab32p((x))
797#define __cpu_to_be24p(x) __swab24p((x))
798#define __be24_to_cpup(x) __swab24p((x))
799#define __cpu_to_be16p(x) __swab16p((x))
800#define __be16_to_cpup(x) __swab16p((x))
801#define __cpu_to_le64s(x) do {} while (0)
802#define __le64_to_cpus(x) do {} while (0)
803#define __cpu_to_le32s(x) do {} while (0)
804#define __le32_to_cpus(x) do {} while (0)
805#define __cpu_to_le24s(x) do {} while (0)
806#define __le24_to_cpus(x) do {} while (0)
807#define __cpu_to_le16s(x) do {} while (0)
808#define __le16_to_cpus(x) do {} while (0)
809#define __cpu_to_be64s(x) __swab64s((x))
810#define __be64_to_cpus(x) __swab64s((x))
811#define __cpu_to_be32s(x) __swab32s((x))
812#define __be32_to_cpus(x) __swab32s((x))
813#define __cpu_to_be24s(x) __swab24s((x))
814#define __be24_to_cpus(x) __swab24s((x))
815#define __cpu_to_be16s(x) __swab16s((x))
816#define __be16_to_cpus(x) __swab16s((x))
829#define ____cacheline_aligned
854#define LOCK "lock ; "
866#define ATOMIC_INIT(i) { (i) }
875#define atomic_read(v) ((v)->counter)
885#define atomic_set(v,i) (((v)->counter) = (i))
901 :
"ir" (
i),
"m" (
v->counter));
919 :
"ir" (
i),
"m" (
v->counter));
939 LOCK "subl %2,%0; sete %1"
940 :
"=m" (
v->counter),
"=qm" (
c)
941 :
"ir" (
i),
"m" (
v->counter) :
"memory");
995 LOCK "decl %0; sete %1"
996 :
"=m" (
v->counter),
"=qm" (
c)
997 :
"m" (
v->counter) :
"memory");
1019 LOCK "incl %0; sete %1"
1020 :
"=m" (
v->counter),
"=qm" (
c)
1021 :
"m" (
v->counter) :
"memory");
1044 LOCK "addl %2,%0; sets %1"
1045 :
"=m" (
v->counter),
"=qm" (
c)
1046 :
"ir" (
i),
"m" (
v->counter) :
"memory");
1054#define atomic_clear_mask(mask, addr)
1057: :
"r" (~(
mask)),
"m" (*
addr) :
"memory")
1060#define atomic_set_mask(mask, addr)
1063: :
"r" (
mask),
"m" (*
addr) :
"memory")
1067#define smp_mb__before_atomic_dec()
1068#define smp_mb__after_atomic_dec()
1069#define smp_mb__before_atomic_inc()
1070#define smp_mb__after_atomic_inc()
1086#define LIST_HEAD_INIT(name) { &(name), &(name) }
1088#define LIST_HEAD(name) \
1089 struct list_head name = LIST_HEAD_INIT(name)
1091#define INIT_LIST_HEAD(ptr) do { \
1092 (ptr)->next = (ptr); (ptr)->prev = (ptr); \
1165 entry->next = (
void *) 0;
1166 entry->prev = (
void *) 0;
1271#define list_entry(ptr, type, member)
1273 ((
type *)((
char *)(
ptr)-(
unsigned long)(&((
type *)0)->member)))
1322 prefetch(
pos->member.next))
1341#include <linux/kernel.h>
1342#include <linux/list.h>
1343#include <linux/stddef.h>
1344#include <linux/spinlock.h>
1345#include <linux/config.h>
1347#include <asm/page.h>
1348#include <asm/processor.h>
1354#
if defined(CONFIG_DEBUG_WAITQ)
1355#define WAITQUEUE_DEBUG 1
1357#define WAITQUEUE_DEBUG 0
1362#define WQ_FLAG_EXCLUSIVE 0x01
1378#define USE_RW_WAIT_QUEUE_SPINLOCK 0
1380#if USE_RW_WAIT_QUEUE_SPINLOCK
1381# define wq_lock_t rwlock_t
1382# define WAITQUEUE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
1384# define wq_read_lock read_lock
1385# define wq_read_lock_irqsave read_lock_irqsave
1386# define wq_read_unlock_irqrestore read_unlock_irqrestore
1387# define wq_read_unlock read_unlock
1388# define wq_write_lock_irq write_lock_irq
1389# define wq_write_lock_irqsave write_lock_irqsave
1390# define wq_write_unlock_irqrestore write_unlock_irqrestore
1391# define wq_write_unlock write_unlock
1393# define wq_lock_t spinlock_t
1394# define WAITQUEUE_RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
1396# define wq_read_lock spin_lock
1397# define wq_read_lock_irqsave spin_lock_irqsave
1398# define wq_read_unlock spin_unlock
1399# define wq_read_unlock_irqrestore spin_unlock_irqrestore
1400# define wq_write_lock_irq spin_lock_irq
1401# define wq_write_lock_irqsave spin_lock_irqsave
1402# define wq_write_unlock_irqrestore spin_unlock_irqrestore
1403# define wq_write_unlock spin_unlock
1422#define WQ_BUG() BUG()
1423#define CHECK_MAGIC(x)
1426 if ((
x) != (
long)&(
x)) { \
1427 printk(
"bad magic %lx (should be %lx), ", \
1428 (
long)
x, (
long)&(
x)); \
1434#define CHECK_MAGIC_WQHEAD(x)
1437 if ((
x)->__magic != (
long)&((
x)->__magic)) { \
1438 printk(
"bad magic %lx (should be %lx, creator %lx), ", \
1439 (
x)->__magic, (
long)&((
x)->__magic), (
x)->__creator); \
1445#define WQ_CHECK_LIST_HEAD(list)
1453#define WQ_NOTE_WAKER(tsk)
1456 (tsk)->__waker = (
long)__builtin_return_address(0); \
1461#define CHECK_MAGIC(x)
1462#define CHECK_MAGIC_WQHEAD(x)
1463#define WQ_CHECK_LIST_HEAD(list)
1464#define WQ_NOTE_WAKER(tsk)
1472# define __WAITQUEUE_DEBUG_INIT(name)
1473# define __WAITQUEUE_HEAD_DEBUG_INIT(name)
1475# define __WAITQUEUE_DEBUG_INIT(name)
1476# define __WAITQUEUE_HEAD_DEBUG_INIT(name)
1479#define __WAITQUEUE_INITIALIZER(name, tsk)
1484 __WAITQUEUE_DEBUG_INIT(
name)}
1487#define DECLARE_WAITQUEUE(name, tsk)
1492#define __WAIT_QUEUE_HEAD_INITIALIZER(name)
1497 __WAITQUEUE_HEAD_DEBUG_INIT(
name)}
1500#define DECLARE_WAIT_QUEUE_HEAD(name)
1515 q->__magic = (
long)&
q->__magic;
1516 q->__creator = (
long)current_text_addr();
1531 q->__magic = (
long)&
q->__magic;
1557 if (!
head->task_list.next || !
head->task_list.prev)
1576 if (!
head->task_list.next || !
head->task_list.prev)
1618#include <linux/cache.h>
1622#define SLAB_NOFS GFP_NOFS
1623#define SLAB_NOIO GFP_NOIO
1624#define SLAB_NOHIGHIO GFP_NOHIGHIO
1625#define SLAB_ATOMIC GFP_ATOMIC
1626#define SLAB_USER GFP_USER
1627#define SLAB_KERNEL GFP_KERNEL
1628#define SLAB_NFS GFP_NFS
1629#define SLAB_DMA GFP_DMA
1631#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS)
1632#define SLAB_NO_GROW 0x00001000UL
1638#define SLAB_DEBUG_FREE 0x00000100UL
1639#define SLAB_DEBUG_INITIAL 0x00000200UL
1640#define SLAB_RED_ZONE 0x00000400UL
1641#define SLAB_POISON 0x00000800UL
1642#define SLAB_NO_REAP 0x00001000UL
1643#define SLAB_HWCACHE_ALIGN 0x00002000UL
1644#define SLAB_CACHE_DMA 0x00004000UL
1645#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL
1648#define SLAB_CTOR_CONSTRUCTOR 0x001UL
1649#define SLAB_CTOR_ATOMIC 0x002UL
1650#define SLAB_CTOR_VERIFY 0x004UL
1709#define UIO_FASTIOV 8
1710#define UIO_MAXIOV 1024
1712#define UIO_MAXIOV 16
1754struct poll_table_page;
1765 if (
p && wait_address)
1789#define FDS_BITPERLONG (8*sizeof(long))
1790#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
1791#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
1806 error = verify_area(VERIFY_WRITE, ufdset,
nr);
1807 if (!
error && __copy_from_user(fdset, ufdset,
nr))
1853#define POLLIN 0x0001
1854#define POLLPRI 0x0002
1855#define POLLOUT 0x0004
1856#define POLLERR 0x0008
1857#define POLLHUP 0x0010
1858#define POLLNVAL 0x0020
1861#define POLLRDNORM 0x0040
1862#define POLLRDBAND 0x0080
1863#define POLLWRNORM 0x0100
1864#define POLLWRBAND 0x0200
1865#define POLLMSG 0x0400
_STLP_INLINE_LOOP _InputIter const _Tp & __val
struct outqueuenode * head
unsigned int(__cdecl typeof(jpeg_read_scanlines))(struct jpeg_decompress_struct *
static void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
static void list_add(struct list_head *new, struct list_head *head)
static __inline__ void atomic_dec(atomic_t *v)
kmem_cache_t * kmem_cache_create(const char *, size_t, size_t, unsigned long, void(*)(void *, kmem_cache_t *, unsigned long), void(*)(void *, kmem_cache_t *, unsigned long))
static __inline__ int atomic_inc_and_test(atomic_t *v)
#define __arch__swab64s(x)
void kmem_cache_sizes_init(void)
kmem_cache_t * names_cachep
static int list_empty(struct list_head *head)
#define INIT_LIST_HEAD(ptr)
static void init_waitqueue_head(wait_queue_head_t *q)
#define __arch__swab16p(x)
static __inline__ void __swab32s(__u32 *addr)
#define list_for_each_prev(pos, head)
#define __arch__swab64(x)
static void list_move_tail(struct list_head *list, struct list_head *head)
#define CHECK_MAGIC_WQHEAD(x)
static __inline__ __u32 __swab32p(__u32 *x)
void kmem_cache_init(void)
static void __list_del(struct list_head *prev, struct list_head *next)
static void __list_splice(struct list_head *list, struct list_head *head)
#define list_entry(ptr, type, member)
unsigned short __kernel_old_gid_t
#define __arch__swab16(x)
static int waitqueue_active(wait_queue_head_t *q)
static void list_splice(struct list_head *list, struct list_head *head)
static __inline__ __const__ __u16 __fswab16(__u16 x)
kmem_cache_t * kmem_find_general_cachep(size_t, int gfpflags)
static void poll_initwait(poll_table *pt)
kmem_cache_t * filp_cachep
static __inline__ __const__ __u32 __fswab32(__u32 x)
unsigned int __kernel_size_t
#define __arch__swab24(x)
unsigned short __kernel_nlink_t
unsigned int __kernel_uid32_t
void poll_freewait(poll_table *pt)
static int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
#define __arch__swab24p(x)
static void list_add_tail(struct list_head *new, struct list_head *head)
static __inline__ int atomic_add_negative(int i, atomic_t *v)
unsigned long __kernel_ino_t
unsigned short __kernel_gid16_t
void __pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
void kmem_cache_free(kmem_cache_t *, void *)
kmem_cache_t * dquot_cachep
int do_select(int n, fd_set_bits *fds, long *timeout)
unsigned short __kernel_gid_t
static __inline__ void atomic_add(int i, atomic_t *v)
static __inline__ __const__ __u32 __fswab24(__u32 x)
static __inline__ void __swab24s(__u32 *addr)
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
static __inline__ void atomic_sub(int i, atomic_t *v)
static void zero_fd_set(unsigned long nr, unsigned long *fdset)
static void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
#define WAITQUEUE_RW_LOCK_UNLOCKED
#define __arch__swab32p(x)
static __inline__ void atomic_inc(atomic_t *v)
int kmem_cache_destroy(kmem_cache_t *)
static void list_splice_init(struct list_head *list, struct list_head *head)
unsigned short __kernel_dev_t
static __inline__ int atomic_dec_and_test(atomic_t *v)
void(* __kernel_sighandler_t)(int)
#define list_for_each_entry(pos, head, member)
unsigned int __kernel_gid32_t
#define list_for_each_safe(pos, n, head)
void * kmem_cache_alloc(kmem_cache_t *, int)
static void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
unsigned short __kernel_old_uid_t
static void list_del(struct list_head *entry)
static __inline__ void __swab16s(__u16 *addr)
unsigned short __kernel_mode_t
kmem_cache_t * vm_area_cachep
#define list_for_each(pos, head)
long __kernel_suseconds_t
#define __arch__swab32s(x)
kmem_cache_t * files_cachep
static void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
static void __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
unsigned int kmem_cache_size(kmem_cache_t *)
unsigned short __kernel_ipc_pid_t
int kmem_cache_shrink(kmem_cache_t *)
#define __arch__swab32(x)
__kernel_ptrdiff_t ptrdiff_t
#define __arch__swab16s(x)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name)
static void poll_wait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
unsigned short __kernel_uid_t
static void list_del_init(struct list_head *entry)
static __inline__ __u32 __swab24p(__u32 *x)
unsigned short __kernel_uid16_t
kmem_cache_t * sigact_cachep
static void __add_wait_queue_tail(wait_queue_head_t *head, wait_queue_t *new)
static void list_move(struct list_head *list, struct list_head *head)
#define __arch__swab24s(x)
static __inline__ __u16 __swab16p(__u16 *x)
#define __WAITQUEUE_INITIALIZER(name, tsk)
#define __arch__swab64p(x)
struct poll_table_struct poll_table
__kernel_suseconds_t suseconds_t
GLint GLint GLint GLint GLint x
GLuint GLuint GLsizei GLenum type
GLdouble GLdouble GLdouble GLdouble q
GLenum const GLvoid * addr
GLfloat GLfloat GLfloat GLfloat h
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
#define kmalloc(size, gfp)
__asm__(".p2align 4, 0x90\n" ".seh_proc __seh2_global_filter_func\n" "__seh2_global_filter_func:\n" "\tsub %rbp, %rax\n" "\tpush %rbp\n" "\t.seh_pushreg %rbp\n" "\tsub $32, %rsp\n" "\t.seh_stackalloc 32\n" "\t.seh_endprologue\n" "\tsub %rax, %rdx\n" "\tmov %rdx, %rbp\n" "\tjmp *%r8\n" "__seh2_global_filter_func_exit:\n" "\t.p2align 4\n" "\tadd $32, %rsp\n" "\tpop %rbp\n" "\tret\n" "\t.seh_endproc")
static unsigned __int64 next
struct list_head task_list
struct list_head task_list
struct task_struct * task
struct poll_table_page * table
volatile unsigned int lock
volatile unsigned int lock
void(* function)(unsigned long)
static WCHAR ** task_list
#define new(TYPE, numElems)