11 #ifndef _EXT2_MODULE_HEADER_ 12 #define _EXT2_MODULE_HEADER_ 16 #include <linux/types.h> 22 #if _WIN32_WINNT <= 0x500 23 #define _WIN2K_TARGET_ 1 29 # define offsetof(type, member) ((ULONG_PTR)&(((type *)0)->member)) 33 #define container_of(ptr, type, member) \ 34 ((type *)((char *)ptr - (char *)offsetof(type, member))) 42 #if (defined(_M_IX86) && (_MSC_FULL_VER > 13009037)) || \ 43 ((defined(_M_AMD64) || defined(_M_IA64)) && \ 44 (_MSC_FULL_VER > 13009175)) 54 #pragma intrinsic(_byteswap_ushort) 55 #pragma intrinsic(_byteswap_ulong) 56 #pragma intrinsic(_byteswap_uint64) 58 #define RtlUshortByteSwap(_x) _byteswap_ushort((USHORT)(_x)) 59 #define RtlUlongByteSwap(_x) _byteswap_ulong((_x)) 60 #define RtlUlonglongByteSwap(_x) _byteswap_uint64((_x)) 62 #elif !defined(__REACTOS__) 83 #define __swab16(x) RtlUshortByteSwap(x) 84 #define __swab32(x) RtlUlongByteSwap(x) 85 #define __swab64(x) RtlUlonglongByteSwap(x) 87 #define __constant_swab32 __swab32 88 #define __constant_swab64 __swab64 90 #define __constant_htonl(x) __constant_swab32((x)) 91 #define __constant_ntohl(x) __constant_swab32((x)) 92 #define __constant_htons(x) __constant_swab16((x)) 93 #define __constant_ntohs(x) __constant_swab16((x)) 94 #define __constant_cpu_to_le64(x) ((__u64)(x)) 95 #define __constant_le64_to_cpu(x) ((__u64)(x)) 96 #define __constant_cpu_to_le32(x) ((__u32)(x)) 97 #define __constant_le32_to_cpu(x) ((__u32)(x)) 98 #define __constant_cpu_to_le16(x) ((__u16)(x)) 99 #define __constant_le16_to_cpu(x) ((__u16)(x)) 100 #define __constant_cpu_to_be64(x) __constant_swab64((x)) 101 #define __constant_be64_to_cpu(x) __constant_swab64((x)) 102 #define __constant_cpu_to_be32(x) __constant_swab32((x)) 103 #define __constant_be32_to_cpu(x) __constant_swab32((x)) 104 #define __constant_cpu_to_be16(x) __constant_swab16((x)) 105 #define __constant_be16_to_cpu(x) __constant_swab16((x)) 106 #define __cpu_to_le64(x) ((__u64)(x)) 107 #define __le64_to_cpu(x) ((__u64)(x)) 108 #define __cpu_to_le32(x) ((__u32)(x)) 109 #define __le32_to_cpu(x) ((__u32)(x)) 110 #define __cpu_to_le16(x) ((__u16)(x)) 111 #define __le16_to_cpu(x) ((__u16)(x)) 112 #define __cpu_to_be64(x) __swab64((x)) 113 #define __be64_to_cpu(x) __swab64((x)) 114 #define __cpu_to_be32(x) __swab32((x)) 115 #define __be32_to_cpu(x) __swab32((x)) 116 #define __cpu_to_be16(x) __swab16((x)) 117 #define __be16_to_cpu(x) __swab16((x)) 118 #define __cpu_to_le64p(x) (*(__u64*)(x)) 119 #define __le64_to_cpup(x) (*(__u64*)(x)) 120 #define __cpu_to_le32p(x) (*(__u32*)(x)) 121 #define __le32_to_cpup(x) (*(__u32*)(x)) 122 #define __cpu_to_le16p(x) (*(__u16*)(x)) 123 #define __le16_to_cpup(x) (*(__u16*)(x)) 124 #define __cpu_to_be64p(x) __swab64p((x)) 125 #define __be64_to_cpup(x) __swab64p((x)) 126 #define __cpu_to_be32p(x) __swab32p((x)) 127 #define __be32_to_cpup(x) __swab32p((x)) 128 #define __cpu_to_be16p(x) __swab16p((x)) 129 #define __be16_to_cpup(x) __swab16p((x)) 130 #define __cpu_to_le64s(x) ((__s64)(x)) 131 #define __le64_to_cpus(x) ((__s64)(x)) 132 #define __cpu_to_le32s(x) ((__s32)(x)) 133 #define __le32_to_cpus(x) ((__s32)(x)) 134 #define __cpu_to_le16s(x) ((__s16)(x)) 135 #define __le16_to_cpus(x) ((__s16)(x)) 136 #define __cpu_to_be64s(x) __swab64s((x)) 137 #define __be64_to_cpus(x) __swab64s((x)) 138 #define __cpu_to_be32s(x) __swab32s((x)) 139 #define __be32_to_cpus(x) __swab32s((x)) 140 #define __cpu_to_be16s(x) __swab16s((x)) 141 #define __be16_to_cpus(x) __swab16s((x)) 144 #define cpu_to_le64 __cpu_to_le64 145 #define le64_to_cpu __le64_to_cpu 146 #define cpu_to_le32 __cpu_to_le32 147 #define le32_to_cpu __le32_to_cpu 148 #define cpu_to_le16 __cpu_to_le16 149 #define le16_to_cpu __le16_to_cpu 152 #define cpu_to_be64 __cpu_to_be64 153 #define be64_to_cpu __be64_to_cpu 154 #define cpu_to_be32 __cpu_to_be32 155 #define be32_to_cpu __be32_to_cpu 156 #define cpu_to_be16 __cpu_to_be16 157 #define be16_to_cpu __be16_to_cpu 158 #define cpu_to_le64p __cpu_to_le64p 159 #define le64_to_cpup __le64_to_cpup 160 #define cpu_to_le32p __cpu_to_le32p 161 #define le32_to_cpup __le32_to_cpup 162 #define cpu_to_le16p __cpu_to_le16p 163 #define le16_to_cpup __le16_to_cpup 164 #define cpu_to_be64p __cpu_to_be64p 165 #define be64_to_cpup __be64_to_cpup 166 #define cpu_to_be32p __cpu_to_be32p 167 #define be32_to_cpup __be32_to_cpup 168 #define cpu_to_be16p __cpu_to_be16p 169 #define be16_to_cpup __be16_to_cpup 170 #define cpu_to_le64s __cpu_to_le64s 171 #define le64_to_cpus __le64_to_cpus 172 #define cpu_to_le32s __cpu_to_le32s 173 #define le32_to_cpus __le32_to_cpus 174 #define cpu_to_le16s __cpu_to_le16s 175 #define le16_to_cpus __le16_to_cpus 176 #define cpu_to_be64s __cpu_to_be64s 177 #define be64_to_cpus __be64_to_cpus 178 #define cpu_to_be32s __cpu_to_be32s 179 #define be32_to_cpus __be32_to_cpus 180 #define cpu_to_be16s __cpu_to_be16s 181 #define be16_to_cpus __be16_to_cpus 203 #define ntohl(x) ( ( ( ( x ) & 0x000000ff ) << 24 ) | \ 204 ( ( ( x ) & 0x0000ff00 ) << 8 ) | \ 205 ( ( ( x ) & 0x00ff0000 ) >> 8 ) | \ 206 ( ( ( x ) & 0xff000000 ) >> 24 ) ) 208 #define ntohs(x) ( ( ( ( x ) & 0xff00 ) >> 8 ) | \ 209 ( ( ( x ) & 0x00ff ) << 8 ) ) 212 #define htonl(x) ntohl(x) 213 #define htons(x) ntohs(x) 220 #define KERN_EMERG "<0>" 221 #define KERN_ALERT "<1>" 222 #define KERN_CRIT "<2>" 223 #define KERN_ERR "<3>" 224 #define KERN_WARNING "<4>" 225 #define KERN_NOTICE "<5>" 226 #define KERN_INFO "<6>" 227 #define KERN_DEBUG "<7>" 229 #define printk DbgPrint 234 #define MAX_ERRNO 4095 235 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO) 253 #define BUG_ON(c) assert(!(c)) 255 #define WARN_ON(c) BUG_ON(c) 267 #define THIS_MODULE NULL 268 #define MODULE_LICENSE(x) 269 #define MODULE_ALIAS_NLS(x) 270 #define EXPORT_SYMBOL(x) 273 #define try_module_get(x) (TRUE) 274 #define module_put(x) 276 #define module_init(X) int __init module_##X() {return X();} 277 #define module_exit(X) void __exit module_##X() {X();} 279 #define DECLARE_INIT(X) int __init module_##X(void) 280 #define DECLARE_EXIT(X) void __exit module_##X(void) 282 #define LOAD_MODULE(X) do { \ 286 #define UNLOAD_MODULE(X) do { \ 290 #define LOAD_NLS LOAD_MODULE 291 #define UNLOAD_NLS UNLOAD_MODULE 303 #define spin_lock_init(sl) KeInitializeSpinLock(&((sl)->lock)) 304 #define spin_lock(sl) KeAcquireSpinLock(&((sl)->lock), &((sl)->irql)) 305 #define spin_unlock(sl) KeReleaseSpinLock(&((sl)->lock), (sl)->irql) 306 #define spin_lock_irqsave(sl, flags) do {spin_lock(sl); flags=(sl)->irql;} while(0) 307 #define spin_unlock_irqrestore(sl, flags) do {ASSERT((KIRQL)(flags)==(sl)->irql); spin_unlock(sl);} while(0) 309 #define assert_spin_locked(x) do {} while(0) 318 #ifdef CONFIG_PREEMPT 319 return spin_is_contended(
lock);
415 #define TASK_INTERRUPTIBLE 1 416 #define TASK_UNINTERRUPTIBLE 2 439 #define yield() do {} while(0) 440 #define might_sleep() do {} while(0) 450 #define mutex_init(x) ExInitializeFastMutex(&((x)->lock)) 451 #define mutex_lock(x) ExAcquireFastMutex(&((x)->lock)) 452 #define mutex_unlock(x) ExReleaseFastMutex(&((x)->lock)) 462 #define WQ_FLAG_EXCLUSIVE 0x01 463 #define WQ_FLAG_AUTO_REMOVAL 0x02 473 #define DEFINE_WAIT(name) \ 474 wait_queue_t name = (PVOID)wait_queue_create(); 494 #define is_sync_wait(wait) (TRUE) 495 #define set_current_state(state) do {} while(0) 496 #define __set_current_state(state) do {} while(0) 526 #ifdef CONFIG_TIMER_STATS 564 #define page_address(_page) ((char*)_page + sizeof(struct page)) 575 #define get_page(p) atomic_inc(&(p)->count) 579 #define PG_referenced 2 580 #define PG_uptodate 3 587 #define PG_highmem 11 588 #define PG_checked 12 590 #define PG_reserved 14 591 #define PG_launder 15 594 #ifndef arch_set_page_uptodate 595 #define arch_set_page_uptodate(page) 599 #define UnlockPage(page) unlock_page(page) 600 #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) 601 #define SetPageUptodate(page) \ 603 arch_set_page_uptodate(page); \ 604 set_bit(PG_uptodate, &(page)->flags); \ 606 #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) 607 #define PageDirty(page) test_bit(PG_dirty, &(page)->flags) 608 #define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags) 609 #define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags) 610 #define PageLocked(page) test_bit(PG_locked, &(page)->flags) 611 #define LockPage(page) set_bit(PG_locked, &(page)->flags) 612 #define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags) 613 #define PageChecked(page) test_bit(PG_checked, &(page)->flags) 614 #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) 615 #define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags) 616 #define PageLaunder(page) test_bit(PG_launder, &(page)->flags) 617 #define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags) 618 #define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags) 619 #define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags) 621 #define PageError(page) test_bit(PG_error, &(page)->flags) 622 #define SetPageError(page) set_bit(PG_error, &(page)->flags) 623 #define ClearPageError(page) clear_bit(PG_error, &(page)->flags) 624 #define PageReferenced(page) test_bit(PG_referenced, &(page)->flags) 625 #define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags) 626 #define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags) 628 #define PageActive(page) test_bit(PG_active, &(page)->flags) 629 #define SetPageActive(page) set_bit(PG_active, &(page)->flags) 630 #define ClearPageActive(page) clear_bit(PG_active, &(page)->flags) 634 #define __get_free_page(gfp_mask) \ 635 __get_free_pages((gfp_mask),0) 640 #define __free_page(page) __free_pages((page), 0) 641 #define free_page(addr) free_pages((addr),0) 647 #define __GFP_HIGHMEM 0x02 649 #define __GFP_WAIT 0x10 650 #define __GFP_HIGH 0x20 651 #define __GFP_IO 0x40 652 #define __GFP_HIGHIO 0x80 653 #define __GFP_FS 0x100 655 #define GFP_ATOMIC (__GFP_HIGH) 656 #define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) 657 #define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM) 658 #define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS) 660 #define __GFP_NOFAIL 0 695 #define PAGE_CACHE_SIZE (PAGE_SIZE) 696 #define PAGE_CACHE_SHIFT (12) 697 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) 742 #define BUFFER_FNS(bit, name) \ 743 static inline void set_buffer_##name(struct buffer_head *bh) \ 745 set_bit(BH_##bit, &(bh)->b_state); \ 747 static inline void clear_buffer_##name(struct buffer_head *bh) \ 749 clear_bit(BH_##bit, &(bh)->b_state); \ 751 static inline int buffer_##name(const struct buffer_head *bh) \ 753 return test_bit(BH_##bit, &(bh)->b_state); \ 759 #define TAS_BUFFER_FNS(bit, name) \ 760 static inline int test_set_buffer_##name(struct buffer_head *bh) \ 762 return test_and_set_bit(BH_##bit, &(bh)->b_state); \ 764 static inline int test_clear_buffer_##name(struct buffer_head *bh) \ 766 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ 793 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) 794 #define touch_buffer(bh) mark_page_accessed(bh->b_page) 798 #define page_buffers(page) \ 800 BUG_ON(!PagePrivate(page)), \ 801 ((struct buffer_head *)page_private(page)) \ 803 #define page_has_buffers(page) PagePrivate(page) 859 sector_t bblock,
unsigned blocksize);
879 int block_write_full_page(
struct page *
page, get_block_t *get_block,
880 struct writeback_control *wbc);
881 int block_read_full_page(
struct page*, get_block_t*);
882 int block_write_begin(
struct file *,
struct address_space *,
883 loff_t,
unsigned,
unsigned,
884 struct page **,
void **, get_block_t*);
885 int block_write_end(
struct file *,
struct address_space *,
886 loff_t,
unsigned,
unsigned,
887 struct page *,
void *);
888 int generic_write_end(
struct file *,
struct address_space *,
889 loff_t,
unsigned,
unsigned,
890 struct page *,
void *);
892 int block_prepare_write(
struct page*,
unsigned,
unsigned, get_block_t*);
893 int cont_write_begin(
struct file *,
struct address_space *,
loff_t,
894 unsigned,
unsigned,
struct page **,
void **,
896 int block_page_mkwrite(
struct vm_area_struct *vma,
struct page *
page,
897 get_block_t get_block);
898 sector_t generic_block_bmap(
struct address_space *,
sector_t, get_block_t *);
899 int generic_commit_write(
struct file *,
struct page *,
unsigned,
unsigned);
900 int block_truncate_page(
struct address_space *,
loff_t, get_block_t *);
901 int file_fsync(
struct file *,
struct dentry *,
int);
902 int nobh_write_begin(
struct file *,
struct address_space *,
903 loff_t,
unsigned,
unsigned,
904 struct page **,
void **, get_block_t*);
905 int nobh_write_end(
struct file *,
struct address_space *,
906 loff_t,
unsigned,
unsigned,
907 struct page *,
void *);
908 int nobh_truncate_page(
struct address_space *,
loff_t, get_block_t *);
909 int nobh_writepage(
struct page *
page, get_block_t *get_block,
910 struct writeback_control *wbc);
925 static inline void attach_page_buffers(
struct page *
page,
928 page_cache_get(
page);
929 SetPagePrivate(
page);
930 set_page_private(
page, (
unsigned long)
head);
999 set_buffer_mapped(bh);
1021 if (test_set_buffer_locked(bh))
1044 #define NLS_MAX_CHARSET_SIZE 6 1075 #define jiffies JIFFIES() 1081 #ifdef _WIN2K_TARGET_ 1099 #define ExFreePoolWithTag(_P, _T) ExFreePool(_P) 1115 #define kmalloc(size, gfp) Ext2AllocatePool(NonPagedPool, size, 'JBDM') 1116 #define kfree(p) Ext2FreePool(p, 'JBDM') 1121 #define SLAB_HWCACHE_ALIGN 0x00002000U 1122 #define SLAB_KERNEL 0x00000001U 1123 #define SLAB_TEMPORARY 0x00000002U 1143 unsigned long flags,
1156 #define BDEVNAME_SIZE 32 1169 #define READ_SYNC (READ | (1 << BIO_RW_SYNC)) 1170 #define READ_META (READ | (1 << BIO_RW_META)) 1171 #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) 1172 #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) 1191 #define typecheck(x, y) (TRUE) 1193 #define time_after(a,b) \ 1194 (typecheck(unsigned long, a) && \ 1195 typecheck(unsigned long, b) && \ 1196 ((long)(b) - (long)(a) < 0)) 1197 #define time_before(a,b) time_after(b,a) 1199 #define time_after_eq(a,b) \ 1200 (typecheck(unsigned long, a) && \ 1201 typecheck(unsigned long, b) && \ 1202 ((long)(a) - (long)(b) >= 0)) 1203 #define time_before_eq(a,b) time_after_eq(b,a) 1205 #define time_in_range(a,b,c) \ 1206 (time_after_eq(a,b) && \ 1207 time_before_eq(a,c)) 1209 #define smp_rmb() do {}while(0) 1220 #define do_div(n, b) do_div64(&(n), (__u64)b) 1222 #endif // _EXT2_MODULE_HEADER_
NTKERNELAPI NTSTATUS ExUuidCreate(OUT UUID *Uuid)
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
static unsigned int block
static DWORD async_read(http_request_t *req, void *buf, DWORD size, DWORD read_pos, DWORD *ret_read)
int kmem_cache_destroy(kmem_cache_t *kc)
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
static __u32 do_div64(__u64 *n, __u64 b)
static void wait_on_buffer(struct buffer_head *bh)
void block_sync_page(struct page *)
void extents_mark_buffer_dirty(struct buffer_head *bh)
GLuint GLdouble GLdouble GLint GLint order
struct LOOKASIDE_ALIGN _NPAGED_LOOKASIDE_LIST NPAGED_LOOKASIDE_LIST
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
struct png_info_def **typedef void(__cdecl typeof(png_destroy_read_struct))(struct png_struct_def **
void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh)
static int need_resched()
static int spin_needbreak(spinlock_t *lock)
unsigned __int64 sector_t
void thaw_bdev(struct block_device *, struct super_block *)
int fsync_no_super(struct block_device *)
struct outqueuenode * head
int unregister_nls(struct nls_table *)
void free_buffer_head(struct buffer_head *bh)
int sync_blockdev(struct block_device *bdev)
int utf8_mbtowc(wchar_t *, const __u8 *, int)
void * kmem_cache_alloc(kmem_cache_t *kc, int flags)
void ll_rw_block(int, int, struct buffer_head *bh[])
static void put_bh(struct buffer_head *bh)
ULONG NTAPI KeQueryTimeIncrement(VOID)
static struct buffer_head * sb_find_get_block(struct super_block *sb, sector_t block)
#define BUFFER_FNS(bit, name)
void() bh_end_io_t(struct buffer_head *bh, int uptodate)
int register_nls(struct nls_table *)
void invalidate_bdev(struct block_device *)
static int test_bit(int nr, volatile const unsigned long *addr)
kmem_cache_t * kmem_cache_create(const char *name, size_t size, size_t offset, unsigned long flags, kmem_cache_cb_t ctor)
void free_pages(unsigned long addr, unsigned int order)
int sync_mapping_buffers(struct address_space *mapping)
struct rb_root bd_bh_root
static void le64_add_cpu(__le64 *var, u64 val)
void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
void wait_queue_destroy(struct __wait_queue *)
unsigned char * charset2lower
struct list_head task_list
_Check_return_ unsigned short __cdecl _byteswap_ushort(_In_ unsigned short)
struct buffer_head * extents_bwrite(struct super_block *sb, sector_t block)
void unlock_buffer(struct buffer_head *bh)
void init_waitqueue_head(wait_queue_head_t *q)
int inode_has_buffers(struct inode *)
NTSTATUS(* NTAPI)(IN PFILE_FULL_EA_INFORMATION EaBuffer, IN ULONG EaLength, OUT PULONG ErrorOffset)
kmem_cache_cb_t constructor
struct list_head task_list
void extents_brelse(struct buffer_head *bh)
static void bforget(struct buffer_head *bh)
unsigned __int64 blkcnt_t
void unload_nls(struct nls_table *)
void __breadahead(struct block_device *, sector_t block, unsigned int size)
void block_invalidatepage(struct page *page, unsigned long offset)
static void fini_bh(struct buffer_head **bh)
void extents_bforget(struct buffer_head *bh)
void mark_buffer_dirty(struct buffer_head *bh)
struct buffer_head * alloc_page_buffers(struct page *page, unsigned long size, int retry)
static struct buffer_head * sb_bread(struct super_block *sb, sector_t block)
struct block_device * b_bdev
USHORT FASTCALL RtlUshortByteSwap(IN USHORT Source)
static void get_bh(struct buffer_head *bh)
static void * ERR_PTR(long error)
_Check_return_ unsigned long __cdecl _byteswap_ulong(_In_ unsigned long)
void __free_pages(struct page *page, unsigned int order)
VOID NTAPI KeQueryTickCount(IN PLARGE_INTEGER TickCount)
int bh_uptodate_or_lock(struct buffer_head *bh)
#define TAS_BUFFER_FNS(bit, name)
static struct buffer_head * sb_getblk(struct super_block *sb, sector_t block)
ULONGLONG FASTCALL RtlUlonglongByteSwap(IN ULONGLONG Source)
kmem_cache_t * bd_bh_cache
static void atomic_inc(atomic_t volatile *v)
static int set_bit(int nr, volatile unsigned long *addr)
GLboolean GLboolean GLboolean b
static long PTR_ERR(const void *ptr)
static void le16_add_cpu(__le16 *var, u16 val)
struct _spinlock_t spinlock_t
struct buffer_head * __find_get_block(struct block_device *bdev, sector_t block, unsigned long size)
int(* char2uni)(const unsigned char *rawstring, int boundlen, wchar_t *uni)
int utf8_wctomb(__u8 *, wchar_t, int)
struct buffer_head * alloc_buffer_head(gfp_t gfp_flags)
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
void(* kmem_cache_cb_t)(void *, kmem_cache_t *, unsigned long)
struct nls_table * load_nls(char *)
static void lock_buffer(struct buffer_head *bh)
void __brelse(struct buffer_head *)
GLenum GLenum GLenum GLenum mapping
GLdouble GLdouble GLdouble GLdouble q
static int test_and_clear_bit(int nr, volatile unsigned long *addr)
struct task_struct * current
int bh_submit_read(struct buffer_head *bh)
static int clear_bit(int nr, volatile unsigned long *addr)
int fsync_super(struct super_block *)
static int test_and_set_bit(int nr, volatile unsigned long *addr)
void __lock_buffer(struct buffer_head *bh)
struct buffer_head * __getblk(struct block_device *bdev, sector_t block, unsigned long size)
GLenum const GLvoid * addr
static void le32_add_cpu(__le32 *var, u32 val)
void __wait_on_buffer(struct buffer_head *)
int wake_up(wait_queue_head_t *queue)
int sync_dirty_buffer(struct buffer_head *bh)
struct buffer_head * get_block_bh(struct block_device *bdev, sector_t block, unsigned long size, int zero)
_Must_inspect_result_ _In_ WDFDEVICE _In_ BOOLEAN _In_opt_ PVOID Tag
static int cond_resched()
static void brelse(struct buffer_head *bh)
struct super_block * freeze_bdev(struct block_device *)
void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize)
struct buffer_head * __bread(struct block_device *, sector_t block, unsigned size)
int utf8_mbstowcs(wchar_t *, const __u8 *, int)
VOID Ext2FreePool(IN PVOID P, IN ULONG Tag)
_Check_return_ unsigned __int64 __cdecl _byteswap_uint64(_In_ unsigned __int64)
wait_queue_head_t * bh_waitq_head(struct buffer_head *bh)
struct nls_table * load_nls_default(void)
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _Inout_ PLARGE_INTEGER NumberOfBytes
void kmem_cache_free(kmem_cache_t *kc, void *p)
PVOID Ext2AllocatePool(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag)
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
void mark_buffer_async_write(struct buffer_head *bh)
static void map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
struct buffer_head * extents_bread(struct super_block *sb, sector_t block)
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
void * kzalloc(int size, int flags)
_Must_inspect_result_ _In_opt_ PWDF_OBJECT_ATTRIBUTES _Out_ WDFWAITLOCK * Lock
_Must_inspect_result_ _In_ WDFDEVICE _In_ DEVICE_REGISTRY_PROPERTY _In_ _Strict_type_match_ POOL_TYPE PoolType
void init_buffer(struct buffer_head *, bh_end_io_t *, void *)
void truncate_inode_pages(struct address_space *, loff_t)
void create_empty_buffers(struct page *, unsigned long, unsigned long b_state)
struct __wait_queue * wait_queue_create()
NTKERNELAPI PVOID NTAPI ExAllocatePoolWithTag(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag)
_In_ UINT _In_ UINT _In_ PNDIS_PACKET Source
int remove_inode_buffers(struct inode *inode)
void invalidate_inode_buffers(struct inode *)
void invalidate_bh_lrus(void)
PARTITION_INFORMATION bd_part
int __set_page_dirty_buffers(struct page *page)
unsigned char * charset2upper
unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset)
int buffer_heads_over_limit
int(* uni2char)(wchar_t uni, unsigned char *out, int boundlen)
int utf8_wcstombs(__u8 *, const wchar_t *, int)
static struct buffer_head * sb_getblk_zero(struct super_block *sb, sector_t block)
static long IS_ERR(const void *ptr)
int block_commit_write(struct page *page, unsigned from, unsigned to)
int fsync_bdev(struct block_device *)
int submit_bh(int, struct buffer_head *)
void __bforget(struct buffer_head *)
unsigned int(__cdecl typeof(jpeg_read_scanlines))(struct jpeg_decompress_struct *
int try_to_free_buffers(struct page *)
ULONG FASTCALL RtlUlongByteSwap(IN ULONG Source)
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)