|
#define | _WIN2K_TARGET_ 1 |
|
#define | offsetof(type, member) ((ULONG_PTR)&(((type *)0)->member)) |
|
#define | container_of(ptr, type, member) ((type *)((char *)ptr - (char *)offsetof(type, member))) |
|
#define | __swab16(x) RtlUshortByteSwap(x) |
|
#define | __swab32(x) RtlUlongByteSwap(x) |
|
#define | __swab64(x) RtlUlonglongByteSwap(x) |
|
#define | __constant_swab32 __swab32 |
|
#define | __constant_swab64 __swab64 |
|
#define | __constant_htonl(x) __constant_swab32((x)) |
|
#define | __constant_ntohl(x) __constant_swab32((x)) |
|
#define | __constant_htons(x) __constant_swab16((x)) |
|
#define | __constant_ntohs(x) __constant_swab16((x)) |
|
#define | __constant_cpu_to_le64(x) ((__u64)(x)) |
|
#define | __constant_le64_to_cpu(x) ((__u64)(x)) |
|
#define | __constant_cpu_to_le32(x) ((__u32)(x)) |
|
#define | __constant_le32_to_cpu(x) ((__u32)(x)) |
|
#define | __constant_cpu_to_le16(x) ((__u16)(x)) |
|
#define | __constant_le16_to_cpu(x) ((__u16)(x)) |
|
#define | __constant_cpu_to_be64(x) __constant_swab64((x)) |
|
#define | __constant_be64_to_cpu(x) __constant_swab64((x)) |
|
#define | __constant_cpu_to_be32(x) __constant_swab32((x)) |
|
#define | __constant_be32_to_cpu(x) __constant_swab32((x)) |
|
#define | __constant_cpu_to_be16(x) __constant_swab16((x)) |
|
#define | __constant_be16_to_cpu(x) __constant_swab16((x)) |
|
#define | __cpu_to_le64(x) ((__u64)(x)) |
|
#define | __le64_to_cpu(x) ((__u64)(x)) |
|
#define | __cpu_to_le32(x) ((__u32)(x)) |
|
#define | __le32_to_cpu(x) ((__u32)(x)) |
|
#define | __cpu_to_le16(x) ((__u16)(x)) |
|
#define | __le16_to_cpu(x) ((__u16)(x)) |
|
#define | __cpu_to_be64(x) __swab64((x)) |
|
#define | __be64_to_cpu(x) __swab64((x)) |
|
#define | __cpu_to_be32(x) __swab32((x)) |
|
#define | __be32_to_cpu(x) __swab32((x)) |
|
#define | __cpu_to_be16(x) __swab16((x)) |
|
#define | __be16_to_cpu(x) __swab16((x)) |
|
#define | __cpu_to_le64p(x) (*(__u64*)(x)) |
|
#define | __le64_to_cpup(x) (*(__u64*)(x)) |
|
#define | __cpu_to_le32p(x) (*(__u32*)(x)) |
|
#define | __le32_to_cpup(x) (*(__u32*)(x)) |
|
#define | __cpu_to_le16p(x) (*(__u16*)(x)) |
|
#define | __le16_to_cpup(x) (*(__u16*)(x)) |
|
#define | __cpu_to_be64p(x) __swab64p((x)) |
|
#define | __be64_to_cpup(x) __swab64p((x)) |
|
#define | __cpu_to_be32p(x) __swab32p((x)) |
|
#define | __be32_to_cpup(x) __swab32p((x)) |
|
#define | __cpu_to_be16p(x) __swab16p((x)) |
|
#define | __be16_to_cpup(x) __swab16p((x)) |
|
#define | __cpu_to_le64s(x) ((__s64)(x)) |
|
#define | __le64_to_cpus(x) ((__s64)(x)) |
|
#define | __cpu_to_le32s(x) ((__s32)(x)) |
|
#define | __le32_to_cpus(x) ((__s32)(x)) |
|
#define | __cpu_to_le16s(x) ((__s16)(x)) |
|
#define | __le16_to_cpus(x) ((__s16)(x)) |
|
#define | __cpu_to_be64s(x) __swab64s((x)) |
|
#define | __be64_to_cpus(x) __swab64s((x)) |
|
#define | __cpu_to_be32s(x) __swab32s((x)) |
|
#define | __be32_to_cpus(x) __swab32s((x)) |
|
#define | __cpu_to_be16s(x) __swab16s((x)) |
|
#define | __be16_to_cpus(x) __swab16s((x)) |
|
#define | cpu_to_le64 __cpu_to_le64 |
|
#define | le64_to_cpu __le64_to_cpu |
|
#define | cpu_to_le32 __cpu_to_le32 |
|
#define | le32_to_cpu __le32_to_cpu |
|
#define | cpu_to_le16 __cpu_to_le16 |
|
#define | le16_to_cpu __le16_to_cpu |
|
#define | cpu_to_be64 __cpu_to_be64 |
|
#define | be64_to_cpu __be64_to_cpu |
|
#define | cpu_to_be32 __cpu_to_be32 |
|
#define | be32_to_cpu __be32_to_cpu |
|
#define | cpu_to_be16 __cpu_to_be16 |
|
#define | be16_to_cpu __be16_to_cpu |
|
#define | cpu_to_le64p __cpu_to_le64p |
|
#define | le64_to_cpup __le64_to_cpup |
|
#define | cpu_to_le32p __cpu_to_le32p |
|
#define | le32_to_cpup __le32_to_cpup |
|
#define | cpu_to_le16p __cpu_to_le16p |
|
#define | le16_to_cpup __le16_to_cpup |
|
#define | cpu_to_be64p __cpu_to_be64p |
|
#define | be64_to_cpup __be64_to_cpup |
|
#define | cpu_to_be32p __cpu_to_be32p |
|
#define | be32_to_cpup __be32_to_cpup |
|
#define | cpu_to_be16p __cpu_to_be16p |
|
#define | be16_to_cpup __be16_to_cpup |
|
#define | cpu_to_le64s __cpu_to_le64s |
|
#define | le64_to_cpus __le64_to_cpus |
|
#define | cpu_to_le32s __cpu_to_le32s |
|
#define | le32_to_cpus __le32_to_cpus |
|
#define | cpu_to_le16s __cpu_to_le16s |
|
#define | le16_to_cpus __le16_to_cpus |
|
#define | cpu_to_be64s __cpu_to_be64s |
|
#define | be64_to_cpus __be64_to_cpus |
|
#define | cpu_to_be32s __cpu_to_be32s |
|
#define | be32_to_cpus __be32_to_cpus |
|
#define | cpu_to_be16s __cpu_to_be16s |
|
#define | be16_to_cpus __be16_to_cpus |
|
#define | ntohl(x) |
|
#define | ntohs(x) |
|
#define | htonl(x) ntohl(x) |
|
#define | htons(x) ntohs(x) |
|
#define | KERN_EMERG "<0>" /* system is unusable */ |
|
#define | KERN_ALERT "<1>" /* action must be taken immediately */ |
|
#define | KERN_CRIT "<2>" /* critical conditions */ |
|
#define | KERN_ERR "<3>" /* error conditions */ |
|
#define | KERN_WARNING "<4>" /* warning conditions */ |
|
#define | KERN_NOTICE "<5>" /* normal but significant condition */ |
|
#define | KERN_INFO "<6>" /* informational */ |
|
#define | KERN_DEBUG "<7>" /* debug-level messages */ |
|
#define | printk DbgPrint |
|
#define | MAX_ERRNO 4095 |
|
#define | IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO) |
|
#define | BUG_ON(c) assert(!(c)) |
|
#define | WARN_ON(c) BUG_ON(c) |
|
#define | likely |
|
#define | unlikely |
|
#define | __init |
|
#define | __exit |
|
#define | THIS_MODULE NULL |
|
#define | MODULE_LICENSE(x) |
|
#define | MODULE_ALIAS_NLS(x) |
|
#define | EXPORT_SYMBOL(x) |
|
#define | try_module_get(x) (TRUE) |
|
#define | module_put(x) |
|
#define | module_init(X) int __init module_##X() {return X();} |
|
#define | module_exit(X) void __exit module_##X() {X();} |
|
#define | DECLARE_INIT(X) int __init module_##X(void) |
|
#define | DECLARE_EXIT(X) void __exit module_##X(void) |
|
#define | LOAD_MODULE(X) |
|
#define | UNLOAD_MODULE(X) |
|
#define | LOAD_NLS LOAD_MODULE |
|
#define | UNLOAD_NLS UNLOAD_MODULE |
|
#define | spin_lock_init(sl) KeInitializeSpinLock(&((sl)->lock)) |
|
#define | spin_lock(sl) KeAcquireSpinLock(&((sl)->lock), &((sl)->irql)) |
|
#define | spin_unlock(sl) KeReleaseSpinLock(&((sl)->lock), (sl)->irql) |
|
#define | spin_lock_irqsave(sl, flags) do {spin_lock(sl); flags=(sl)->irql;} while(0) |
|
#define | spin_unlock_irqrestore(sl, flags) do {ASSERT((KIRQL)(flags)==(sl)->irql); spin_unlock(sl);} while(0) |
|
#define | assert_spin_locked(x) do {} while(0) |
|
#define | TASK_INTERRUPTIBLE 1 |
|
#define | TASK_UNINTERRUPTIBLE 2 |
|
#define | yield() do {} while(0) |
|
#define | might_sleep() do {} while(0) |
|
#define | mutex_init(x) ExInitializeFastMutex(&((x)->lock)) |
|
#define | mutex_lock(x) ExAcquireFastMutex(&((x)->lock)) |
|
#define | mutex_unlock(x) ExReleaseFastMutex(&((x)->lock)) |
|
#define | WQ_FLAG_EXCLUSIVE 0x01 |
|
#define | WQ_FLAG_AUTO_REMOVAL 0x02 |
|
#define | DEFINE_WAIT(name) wait_queue_t name = (PVOID)wait_queue_create(); |
|
#define | is_sync_wait(wait) (TRUE) |
|
#define | set_current_state(state) do {} while(0) |
|
#define | __set_current_state(state) do {} while(0) |
|
#define | page_address(_page) ((char*)_page + sizeof(struct page)) |
|
#define | get_page(p) atomic_inc(&(p)->count) |
|
#define | PG_locked 0 /* Page is locked. Don't touch. */ |
|
#define | PG_error 1 |
|
#define | PG_referenced 2 |
|
#define | PG_uptodate 3 |
|
#define | PG_dirty 4 |
|
#define | PG_unused 5 |
|
#define | PG_lru 6 |
|
#define | PG_active 7 |
|
#define | PG_slab 8 |
|
#define | PG_skip 10 |
|
#define | PG_highmem 11 |
|
#define | PG_checked 12 /* kill me in 2.5.<early>. */ |
|
#define | PG_arch_1 13 |
|
#define | PG_reserved 14 |
|
#define | PG_launder 15 /* written out by VM pressure.. */ |
|
#define | PG_fs_1 16 /* Filesystem specific */ |
|
#define | arch_set_page_uptodate(page) |
|
#define | UnlockPage(page) unlock_page(page) |
|
#define | Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) |
|
#define | SetPageUptodate(page) |
|
#define | ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) |
|
#define | PageDirty(page) test_bit(PG_dirty, &(page)->flags) |
|
#define | SetPageDirty(page) set_bit(PG_dirty, &(page)->flags) |
|
#define | ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags) |
|
#define | PageLocked(page) test_bit(PG_locked, &(page)->flags) |
|
#define | LockPage(page) set_bit(PG_locked, &(page)->flags) |
|
#define | TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags) |
|
#define | PageChecked(page) test_bit(PG_checked, &(page)->flags) |
|
#define | SetPageChecked(page) set_bit(PG_checked, &(page)->flags) |
|
#define | ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags) |
|
#define | PageLaunder(page) test_bit(PG_launder, &(page)->flags) |
|
#define | SetPageLaunder(page) set_bit(PG_launder, &(page)->flags) |
|
#define | ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags) |
|
#define | ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags) |
|
#define | PageError(page) test_bit(PG_error, &(page)->flags) |
|
#define | SetPageError(page) set_bit(PG_error, &(page)->flags) |
|
#define | ClearPageError(page) clear_bit(PG_error, &(page)->flags) |
|
#define | PageReferenced(page) test_bit(PG_referenced, &(page)->flags) |
|
#define | SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags) |
|
#define | ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags) |
|
#define | PageActive(page) test_bit(PG_active, &(page)->flags) |
|
#define | SetPageActive(page) set_bit(PG_active, &(page)->flags) |
|
#define | ClearPageActive(page) clear_bit(PG_active, &(page)->flags) |
|
#define | __get_free_page(gfp_mask) __get_free_pages((gfp_mask),0) |
|
#define | __free_page(page) __free_pages((page), 0) |
|
#define | free_page(addr) free_pages((addr),0) |
|
#define | __GFP_HIGHMEM 0x02 |
|
#define | __GFP_WAIT 0x10 /* Can wait and reschedule? */ |
|
#define | __GFP_HIGH 0x20 /* Should access emergency pools? */ |
|
#define | __GFP_IO 0x40 /* Can start low memory physical IO? */ |
|
#define | __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */ |
|
#define | __GFP_FS 0x100 /* Can call down to low-level FS? */ |
|
#define | GFP_ATOMIC (__GFP_HIGH) |
|
#define | GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) |
|
#define | GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM) |
|
#define | GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) |
|
#define | GFP_NOFS 0 |
|
#define | __GFP_NOFAIL 0 |
|
#define | KM_USER0 0 |
|
#define | PAGE_CACHE_SIZE (PAGE_SIZE) |
|
#define | PAGE_CACHE_SHIFT (12) |
|
#define | MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) |
|
#define | BUFFER_FNS(bit, name) |
|
#define | TAS_BUFFER_FNS(bit, name) |
|
#define | bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) |
|
#define | touch_buffer(bh) mark_page_accessed(bh->b_page) |
|
#define | page_buffers(page) |
|
#define | page_has_buffers(page) PagePrivate(page) |
|
#define | NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */ |
|
#define | HZ (100) |
|
#define | jiffies JIFFIES() |
|
#define | ExFreePoolWithTag(_P, _T) ExFreePool(_P) |
|
#define | kmalloc(size, gfp) Ext2AllocatePool(NonPagedPool, size, 'JBDM') |
|
#define | kfree(p) Ext2FreePool(p, 'JBDM') |
|
#define | SLAB_HWCACHE_ALIGN 0x00002000U /* align objs on a h/w cache lines */ |
|
#define | SLAB_KERNEL 0x00000001U |
|
#define | SLAB_TEMPORARY 0x00000002U |
|
#define | BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ |
|
#define | RW_MASK 1 |
|
#define | RWA_MASK 2 |
|
#define | READ 0 |
|
#define | WRITE 1 |
|
#define | READA 2 /* read-ahead - don't block if no resources */ |
|
#define | SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ |
|
#define | READ_SYNC (READ | (1 << BIO_RW_SYNC)) |
|
#define | READ_META (READ | (1 << BIO_RW_META)) |
|
#define | WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) |
|
#define | WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) |
|
#define | typecheck(x, y) (TRUE) |
|
#define | time_after(a, b) |
|
#define | time_before(a, b) time_after(b,a) |
|
#define | time_after_eq(a, b) |
|
#define | time_before_eq(a, b) time_after_eq(b,a) |
|
#define | time_in_range(a, b, c) |
|
#define | smp_rmb() do {}while(0) |
|
#define | do_div(n, b) do_div64(&(n), (__u64)b) |
|
|
USHORT FASTCALL | RtlUshortByteSwap (IN USHORT Source) |
|
ULONG FASTCALL | RtlUlongByteSwap (IN ULONG Source) |
|
ULONGLONG FASTCALL | RtlUlonglongByteSwap (IN ULONGLONG Source) |
|
static void | le16_add_cpu (__le16 *var, u16 val) |
|
static void | le32_add_cpu (__le32 *var, u32 val) |
|
static void | le64_add_cpu (__le64 *var, u64 val) |
|
static void * | ERR_PTR (long error) |
|
static long | PTR_ERR (const void *ptr) |
|
static long | IS_ERR (const void *ptr) |
|
static int | spin_needbreak (spinlock_t *lock) |
|
static int | set_bit (int nr, volatile unsigned long *addr) |
|
static int | clear_bit (int nr, volatile unsigned long *addr) |
|
static int | test_and_clear_bit (int nr, volatile unsigned long *addr) |
|
static int | test_bit (int nr, volatile const unsigned long *addr) |
|
static int | test_and_set_bit (int nr, volatile unsigned long *addr) |
|
static | cond_resched () |
|
static | need_resched () |
|
void | init_waitqueue_head (wait_queue_head_t *q) |
|
int | wake_up (wait_queue_head_t *queue) |
|
struct __wait_queue * | wait_queue_create () |
|
void | wait_queue_destroy (struct __wait_queue *) |
|
void | prepare_to_wait (wait_queue_head_t *q, wait_queue_t *wait, int state) |
|
void | prepare_to_wait_exclusive (wait_queue_head_t *q, wait_queue_t *wait, int state) |
|
void | finish_wait (wait_queue_head_t *q, wait_queue_t *wait) |
|
int | autoremove_wake_function (wait_queue_t *wait, unsigned mode, int sync, void *key) |
|
int | wake_bit_function (wait_queue_t *wait, unsigned mode, int sync, void *key) |
|
unsigned long | __get_free_pages (unsigned int gfp_mask, unsigned int order) |
|
void | __free_pages (struct page *page, unsigned int order) |
|
void | free_pages (unsigned long addr, unsigned int order) |
|
void | truncate_inode_pages (struct address_space *, loff_t) |
|
void | mark_buffer_dirty (struct buffer_head *bh) |
|
void | init_buffer (struct buffer_head *, bh_end_io_t *, void *) |
|
void | set_bh_page (struct buffer_head *bh, struct page *page, unsigned long offset) |
|
int | try_to_free_buffers (struct page *) |
|
struct buffer_head * | alloc_page_buffers (struct page *page, unsigned long size, int retry) |
|
void | create_empty_buffers (struct page *, unsigned long, unsigned long b_state) |
|
void | mark_buffer_dirty_inode (struct buffer_head *bh, struct inode *inode) |
|
int | inode_has_buffers (struct inode *) |
|
void | invalidate_inode_buffers (struct inode *) |
|
int | remove_inode_buffers (struct inode *inode) |
|
int | sync_mapping_buffers (struct address_space *mapping) |
|
void | unmap_underlying_metadata (struct block_device *bdev, sector_t block) |
|
void | mark_buffer_async_write (struct buffer_head *bh) |
|
void | invalidate_bdev (struct block_device *) |
|
int | sync_blockdev (struct block_device *bdev) |
|
void | __wait_on_buffer (struct buffer_head *) |
|
wait_queue_head_t * | bh_waitq_head (struct buffer_head *bh) |
|
int | fsync_bdev (struct block_device *) |
|
struct super_block * | freeze_bdev (struct block_device *) |
|
void | thaw_bdev (struct block_device *, struct super_block *) |
|
int | fsync_super (struct super_block *) |
|
int | fsync_no_super (struct block_device *) |
|
struct buffer_head * | __find_get_block (struct block_device *bdev, sector_t block, unsigned long size) |
|
struct buffer_head * | get_block_bh (struct block_device *bdev, sector_t block, unsigned long size, int zero) |
|
struct buffer_head * | __getblk (struct block_device *bdev, sector_t block, unsigned long size) |
|
void | __brelse (struct buffer_head *) |
|
void | __bforget (struct buffer_head *) |
|
void | __breadahead (struct block_device *, sector_t block, unsigned int size) |
|
struct buffer_head * | __bread (struct block_device *, sector_t block, unsigned size) |
|
void | invalidate_bh_lrus (void) |
|
struct buffer_head * | alloc_buffer_head (gfp_t gfp_flags) |
|
void | free_buffer_head (struct buffer_head *bh) |
|
void | unlock_buffer (struct buffer_head *bh) |
|
void | __lock_buffer (struct buffer_head *bh) |
|
void | ll_rw_block (int, int, struct buffer_head *bh[]) |
|
int | sync_dirty_buffer (struct buffer_head *bh) |
|
int | submit_bh (int, struct buffer_head *) |
|
void | write_boundary_block (struct block_device *bdev, sector_t bblock, unsigned blocksize) |
|
int | bh_uptodate_or_lock (struct buffer_head *bh) |
|
int | bh_submit_read (struct buffer_head *bh) |
|
struct buffer_head * | extents_bread (struct super_block *sb, sector_t block) |
|
struct buffer_head * | extents_bwrite (struct super_block *sb, sector_t block) |
|
void | extents_mark_buffer_dirty (struct buffer_head *bh) |
|
void | extents_brelse (struct buffer_head *bh) |
|
void | extents_bforget (struct buffer_head *bh) |
|
void | buffer_head_remove (struct block_device *bdev, struct buffer_head *bh) |
|
void | block_invalidatepage (struct page *page, unsigned long offset) |
|
void | page_zero_new_buffers (struct page *page, unsigned from, unsigned to) |
|
int | block_commit_write (struct page *page, unsigned from, unsigned to) |
|
void | block_sync_page (struct page *) |
|
void | buffer_init (void) |
|
static void | get_bh (struct buffer_head *bh) |
|
static void | put_bh (struct buffer_head *bh) |
|
static void | brelse (struct buffer_head *bh) |
|
static void | fini_bh (struct buffer_head **bh) |
|
static void | bforget (struct buffer_head *bh) |
|
static struct buffer_head * | sb_getblk (struct super_block *sb, sector_t block) |
|
static struct buffer_head * | sb_getblk_zero (struct super_block *sb, sector_t block) |
|
static struct buffer_head * | sb_bread (struct super_block *sb, sector_t block) |
|
static struct buffer_head * | sb_find_get_block (struct super_block *sb, sector_t block) |
|
static void | map_bh (struct buffer_head *bh, struct super_block *sb, sector_t block) |
|
static void | wait_on_buffer (struct buffer_head *bh) |
|
static void | lock_buffer (struct buffer_head *bh) |
|
int | __set_page_dirty_buffers (struct page *page) |
|
int | register_nls (struct nls_table *) |
|
int | unregister_nls (struct nls_table *) |
|
struct nls_table * | load_nls (char *) |
|
void | unload_nls (struct nls_table *) |
|
struct nls_table * | load_nls_default (void) |
|
int | utf8_mbtowc (wchar_t *, const __u8 *, int) |
|
int | utf8_mbstowcs (wchar_t *, const __u8 *, int) |
|
int | utf8_wctomb (__u8 *, wchar_t, int) |
|
int | utf8_wcstombs (__u8 *, const wchar_t *, int) |
|
static __u32 | JIFFIES () |
|
NTKERNELAPI NTSTATUS | ExUuidCreate (OUT UUID *Uuid) |
|
NTKERNELAPI PVOID NTAPI | ExAllocatePoolWithTag (IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag) |
|
PVOID | Ext2AllocatePool (IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag) |
|
VOID | Ext2FreePool (IN PVOID P, IN ULONG Tag) |
|
void * | kzalloc (int size, int flags) |
|
kmem_cache_t * | kmem_cache_create (const char *name, size_t size, size_t offset, unsigned long flags, kmem_cache_cb_t ctor) |
|
void * | kmem_cache_alloc (kmem_cache_t *kc, int flags) |
|
void | kmem_cache_free (kmem_cache_t *kc, void *p) |
|
int | kmem_cache_destroy (kmem_cache_t *kc) |
|
static __u32 | do_div64 (__u64 *n, __u64 b) |
|