ReactOS 0.4.15-dev-5666-gc548b97
linux.c
Go to the documentation of this file.
1/*
2 * COPYRIGHT: See COPYRIGHT.TXT
3 * PROJECT: Ext2 File System Driver for WinNT/2K/XP
4 * FILE: linux.c
5 * PROGRAMMER: Matt Wu <mattwu@163.com>
6 * HOMEPAGE: http://www.ext2fsd.com
7 * UPDATE HISTORY:
8 */
9
10/* INCLUDES *****************************************************************/
11
12#include <ext2fs.h>
13#include <linux/jbd.h>
14#include <linux/errno.h>
15
16/* GLOBALS ***************************************************************/
17
19
20/* DEFINITIONS *************************************************************/
21
22#ifdef ALLOC_PRAGMA
23#pragma alloc_text(PAGE, kzalloc)
24#endif
25
27 /* pid */ 0,
28 /* tid */ 1,
29 /* comm */ "current\0",
30 /* journal_info */ NULL
31};
33
34void *kzalloc(int size, int flags)
35{
36 void *buffer = kmalloc(size, flags);
37 if (buffer) {
38 memset(buffer, 0, size);
39 }
40 return buffer;
41}
42
43//
44// slab routines
45//
46
49 const char * name,
50 size_t size,
51 size_t offset,
52 unsigned long flags,
54)
55{
56 kmem_cache_t *kc = NULL;
57
58 kc = kmalloc(sizeof(kmem_cache_t), GFP_KERNEL);
59 if (kc == NULL) {
60 goto errorout;
61 }
62
63 memset(kc, 0, sizeof(kmem_cache_t));
65 &kc->la,
66 NULL,
67 NULL,
68 0,
69 size,
70 'JBKC',
71 0);
72
73 kc->size = size;
74 strncpy(kc->name, name, 31);
75 kc->constructor = ctor;
76
77errorout:
78
79 return kc;
80}
81
83{
84 ASSERT(kc != NULL);
85
87 kfree(kc);
88
89 return 0;
90}
91
93{
94 PVOID ptr = NULL;
95 ptr = ExAllocateFromNPagedLookasideList(&(kc->la));
96 if (ptr) {
97 atomic_inc(&kc->count);
98 atomic_inc(&kc->acount);
99 }
100 return ptr;
101}
102
104{
105 if (p) {
106 atomic_dec(&kc->count);
107 ExFreeToNPagedLookasideList(&(kc->la), p);
108 }
109}
110
111//
112// wait queue routines
113//
114
116{
117 spin_lock_init(&q->lock);
118 INIT_LIST_HEAD(&q->task_list);
119}
120
121struct __wait_queue *
123{
124 struct __wait_queue * wait = NULL;
125 wait = kmalloc(sizeof(struct __wait_queue), GFP_KERNEL);
126 if (!wait) {
127 return NULL;
128 }
129
130 memset(wait, 0, sizeof(struct __wait_queue));
132 wait->private = (void *)KeGetCurrentThread();
134 KeInitializeEvent(&(wait->event),
136 FALSE);
137
138 return wait;
139}
140
141void
143{
144 kfree(wait);
145}
146
147static inline void __add_wait_queue(wait_queue_head_t *head, struct __wait_queue *new)
148{
149 list_add(&new->task_list, &head->task_list);
150}
151
152/*
153 * Used for wake-one threads:
154 */
156 struct __wait_queue *new)
157{
158 list_add_tail(&new->task_list, &head->task_list);
159}
160
162 struct __wait_queue *old)
163{
164 list_del(&old->task_list);
165}
166
168{
169 unsigned long flags;
170 struct __wait_queue *wait = *waiti;
171
172 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
173 spin_lock_irqsave(&q->lock, flags);
174 __add_wait_queue(q, wait);
176}
177
179{
180 unsigned long flags;
181 struct __wait_queue *wait = *waiti;
182
183 wait->flags |= WQ_FLAG_EXCLUSIVE;
184 spin_lock_irqsave(&q->lock, flags);
187}
188
190{
191 unsigned long flags;
192 struct __wait_queue *wait = *waiti;
193
194 spin_lock_irqsave(&q->lock, flags);
195 __remove_wait_queue(q, wait);
197}
198
199/*
200 * Note: we use "set_current_state()" _after_ the wait-queue add,
201 * because we need a memory barrier there on SMP, so that any
202 * wake-function that tests for the wait-queue being active
203 * will be guaranteed to see waitqueue addition _or_ subsequent
204 * tests in this thread will see the wakeup having taken place.
205 *
206 * The spin_unlock() itself is semi-permeable and only protects
207 * one way (it only protects stuff inside the critical region and
208 * stops them from bleeding out - it would still allow subsequent
209 * loads to move into the critical region).
210 */
211void
213{
214 unsigned long flags;
215 struct __wait_queue *wait = *waiti;
216
217 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
218 spin_lock_irqsave(&q->lock, flags);
219 if (list_empty(&wait->task_list))
220 __add_wait_queue(q, wait);
221 /*
222 * don't alter the task state if this is just going to
223 * queue an async wait queue callback
224 */
225 if (is_sync_wait(wait))
228}
229
230void
232{
233 unsigned long flags;
234 struct __wait_queue *wait = *waiti;
235
236 wait->flags |= WQ_FLAG_EXCLUSIVE;
237 spin_lock_irqsave(&q->lock, flags);
238 if (list_empty(&wait->task_list))
240 /*
241 * don't alter the task state if this is just going to
242 * queue an async wait queue callback
243 */
244 if (is_sync_wait(wait))
247}
249
251{
252 unsigned long flags;
253 struct __wait_queue *wait = *waiti;
254
255 __set_current_state(TASK_RUNNING);
256 /*
257 * We can check for list emptiness outside the lock
258 * IFF:
259 * - we use the "careful" check that verifies both
260 * the next and prev pointers, so that there cannot
261 * be any half-pending updates in progress on other
262 * CPU's that we haven't seen yet (and that might
263 * still change the stack area.
264 * and
265 * - all other users take the lock (ie we can only
266 * have _one_ other CPU that looks at or modifies
267 * the list).
268 */
269 if (!list_empty_careful(&wait->task_list)) {
270 spin_lock_irqsave(&q->lock, flags);
271 list_del_init(&wait->task_list);
273 }
274
275 /* free wait */
276 wait_queue_destroy(wait);
277}
278
280{
281 return 0; /* KeSetEvent(&wait->event, 0, FALSE); */
282}
283
284
285//
286// kernel timer routines
287//
288
289//
290// buffer head routines
291//
292
298
299int
301{
302 g_jbh.bh_count.counter = 0;
303 g_jbh.bh_acount.counter = 0;
304 g_jbh.bh_cache = kmem_cache_create(
305 "ext2_bh", /* bh */
306 sizeof(struct buffer_head),
307 0, /* offset */
308 SLAB_TEMPORARY, /* flags */
309 NULL); /* ctor */
310 if (g_jbh.bh_cache == NULL) {
311 printk(KERN_EMERG "JBD: failed to create handle cache\n");
312 return -ENOMEM;
313 }
314 return 0;
315}
316
317void
319{
320 if (g_jbh.bh_cache) {
321 kmem_cache_destroy(g_jbh.bh_cache);
322 g_jbh.bh_cache = NULL;
323 }
324}
325
326struct buffer_head *
328{
329 struct buffer_head * bh = NULL;
330 bh = kmem_cache_alloc(g_jbh.bh_cache, GFP_NOFS);
331 if (bh) {
332 atomic_inc(&g_jbh.bh_count);
333 atomic_inc(&g_jbh.bh_acount);
334
335 memset(bh, 0, sizeof(struct buffer_head));
338 DEBUG(DL_BH, ("bh=%p allocated.\n", bh));
339 INC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
340 }
341
342 return bh;
343}
344
345void
347{
348 if (bh) {
349 if (bh->b_mdl) {
350
351 DEBUG(DL_BH, ("bh=%p mdl=%p (Flags:%xh VA:%p) released.\n", bh, bh->b_mdl,
352 bh->b_mdl->MdlFlags, bh->b_mdl->MappedSystemVa));
353 if (IsFlagOn(bh->b_mdl->MdlFlags, MDL_MAPPED_TO_SYSTEM_VA)) {
354 MmUnmapLockedPages(bh->b_mdl->MappedSystemVa, bh->b_mdl);
355 }
357 }
358 if (bh->b_bcb) {
360 }
361
362 DEBUG(DL_BH, ("bh=%p freed.\n", bh));
363 DEC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
364 kmem_cache_free(g_jbh.bh_cache, bh);
365 atomic_dec(&g_jbh.bh_count);
366 }
367}
368
369//
370// Red-black tree insert routine.
371//
372
374 sector_t blocknr)
375{
376 struct rb_node *new = root->rb_node;
377
378 /* Figure out where to put new node */
379 while (new) {
380 struct buffer_head *bh =
381 container_of(new, struct buffer_head, b_rb_node);
382 s64 result = blocknr - bh->b_blocknr;
383
384 if (result < 0)
385 new = new->rb_left;
386 else if (result > 0)
387 new = new->rb_right;
388 else
389 return bh;
390
391 }
392
393 return NULL;
394}
395
396static int buffer_head_blocknr_cmp(struct rb_node *a, struct rb_node *b)
397{
398 struct buffer_head *a_bh, *b_bh;
399 s64 result;
400 a_bh = container_of(a, struct buffer_head, b_rb_node);
401 b_bh = container_of(b, struct buffer_head, b_rb_node);
402 result = a_bh->b_blocknr - b_bh->b_blocknr;
403
404 if (result < 0)
405 return -1;
406 if (result > 0)
407 return 1;
408 return 0;
409}
410
411static struct buffer_head *buffer_head_search(struct block_device *bdev,
412 sector_t blocknr)
413{
414 struct rb_root *root;
415 root = &bdev->bd_bh_root;
416 return __buffer_head_search(root, blocknr);
417}
418
419static void buffer_head_insert(struct block_device *bdev, struct buffer_head *bh)
420{
422}
423
424void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh)
425{
426 rb_erase(&bh->b_rb_node, &bdev->bd_bh_root);
427}
428
429struct buffer_head *
431 struct block_device * bdev,
433 unsigned long size,
434 int zero
435)
436{
437 PEXT2_VCB Vcb = bdev->bd_priv;
439 PVOID bcb = NULL;
440 PVOID ptr = NULL;
441
442 struct list_head *entry;
443
444 /* allocate buffer_head and initialize it */
445 struct buffer_head *bh = NULL, *tbh = NULL;
446
447 /* check the block is valid or not */
448 if (block >= TOTAL_BLOCKS) {
449 DbgBreak();
450 goto errorout;
451 }
452
453 /* search the bdev bh list */
455 tbh = buffer_head_search(bdev, block);
456 if (tbh) {
457 bh = tbh;
458 get_bh(bh);
460 goto errorout;
461 }
463
464 bh = new_buffer_head();
465 if (!bh) {
466 goto errorout;
467 }
468 bh->b_bdev = bdev;
469 bh->b_blocknr = block;
470 bh->b_size = size;
471 bh->b_data = NULL;
472#ifdef __REACTOS__
474#endif
475
476again:
477
478 offset.QuadPart = (s64) bh->b_blocknr;
479 offset.QuadPart <<= BLOCK_BITS;
480
481 if (zero) {
482 /* PIN_EXCLUSIVE disabled, likely to deadlock with volume operations */
483 if (!CcPreparePinWrite(Vcb->Volume,
484 &offset,
485 bh->b_size,
486 FALSE,
487 PIN_WAIT /* | PIN_EXCLUSIVE */,
488 &bcb,
489 &ptr)) {
490 Ext2Sleep(100);
491 goto again;
492 }
493 } else {
494 if (!CcPinRead( Vcb->Volume,
495 &offset,
496 bh->b_size,
497 PIN_WAIT,
498 &bcb,
499 &ptr)) {
500 Ext2Sleep(100);
501 goto again;
502 }
503 set_buffer_uptodate(bh);
504 }
505
507 if (bh->b_mdl) {
508 /* muse map the PTE to NonCached zone. journal recovery will
509 access the PTE under spinlock: DISPATCH_LEVEL IRQL */
513 /* bh->b_data = MmMapLockedPages(bh->b_mdl, KernelMode); */
514 }
515 if (!bh->b_mdl || !bh->b_data) {
517 bh = NULL;
518 goto errorout;
519 }
520
521 get_bh(bh);
522
523 DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p mdl=%p (Flags:%xh VA:%p)\n",
524 Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_mdl, bh->b_mdl->MdlFlags, bh->b_data));
525
527 /* do search again here */
528 tbh = buffer_head_search(bdev, block);
529 if (tbh) {
531 bh = tbh;
532 get_bh(bh);
536 goto errorout;
537 } else {
538 buffer_head_insert(bdev, bh);
539 }
541
542 /* we get it */
543errorout:
544
545 if (bcb)
546 CcUnpinData(bcb);
547
548 return bh;
549}
550
551int submit_bh_mdl(int rw, struct buffer_head *bh)
552{
553 struct block_device *bdev = bh->b_bdev;
554 PEXT2_VCB Vcb = bdev->bd_priv;
555 PBCB Bcb;
558
559 ASSERT(Vcb->Identifier.Type == EXT2VCB);
560 ASSERT(bh->b_data);
561
562 if (rw == WRITE) {
563
564 if (IsVcbReadOnly(Vcb)) {
565 goto errorout;
566 }
567
568 SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
569 Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
570
571 /* PIN_EXCLUSIVE disabled, likely to deadlock with volume operations */
573 Vcb->Volume,
574 &Offset,
576 FALSE,
577 PIN_WAIT /* | PIN_EXCLUSIVE */,
578 &Bcb,
579 &Buffer )) {
580#if 0
581 if (memcmp(Buffer, bh->b_data, BLOCK_SIZE) != 0) {
582 DbgBreak();
583 }
585#endif
588 (ULONG)bh->b_blocknr,
589 (ULONG)bh->b_blocknr,
590 (bh->b_size >> BLOCK_BITS));
592 } else {
593
595 (ULONG)bh->b_blocknr,
596 (ULONG)bh->b_blocknr,
597 (bh->b_size >> BLOCK_BITS));
598 }
599
600 } else {
601 }
602
603errorout:
604
605 unlock_buffer(bh);
606 put_bh(bh);
607 return 0;
608}
609
610struct buffer_head *
612 struct block_device * bdev,
614 unsigned long size,
615 int zero
616)
617{
618 PEXT2_VCB Vcb = bdev->bd_priv;
620
621 struct list_head *entry;
622
623 /* allocate buffer_head and initialize it */
624 struct buffer_head *bh = NULL, *tbh = NULL;
625
626 /* check the block is valid or not */
627 if (block >= TOTAL_BLOCKS) {
628 DbgBreak();
629 goto errorout;
630 }
631
632 /* search the bdev bh list */
634 tbh = buffer_head_search(bdev, block);
635 if (tbh) {
636 bh = tbh;
637 get_bh(bh);
639 goto errorout;
640 }
642
643 bh = new_buffer_head();
644 if (!bh) {
645 goto errorout;
646 }
647 bh->b_bdev = bdev;
648 bh->b_blocknr = block;
649 bh->b_size = size;
650 bh->b_data = NULL;
651#ifdef __REACTOS__
653#endif
654
655again:
656
657 offset.QuadPart = (s64) bh->b_blocknr;
658 offset.QuadPart <<= BLOCK_BITS;
659
660 if (zero) {
661 if (!CcPreparePinWrite(Vcb->Volume,
662 &offset,
663 bh->b_size,
664 FALSE,
665 PIN_WAIT,
666 &bh->b_bcb,
667 (PVOID *)&bh->b_data)) {
668 Ext2Sleep(100);
669 goto again;
670 }
671 } else {
672 if (!CcPinRead( Vcb->Volume,
673 &offset,
674 bh->b_size,
675 PIN_WAIT,
676 &bh->b_bcb,
677 (PVOID *)&bh->b_data)) {
678 Ext2Sleep(100);
679 goto again;
680 }
681 set_buffer_uptodate(bh);
682 }
683
684 if (bh->b_bcb)
686
687 if (!bh->b_data) {
689 bh = NULL;
690 goto errorout;
691 }
692 get_bh(bh);
693
694 DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p ptr=%p.\n",
695 Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_data));
696
698 /* do search again here */
699 tbh = buffer_head_search(bdev, block);
700 if (tbh) {
701 get_bh(tbh);
703 bh = tbh;
707 goto errorout;
708 } else {
709 buffer_head_insert(bdev, bh);
710 }
712
713 /* we get it */
714errorout:
715
716 return bh;
717}
718
719int submit_bh_pin(int rw, struct buffer_head *bh)
720{
721 struct block_device *bdev = bh->b_bdev;
722 PEXT2_VCB Vcb = bdev->bd_priv;
725
726 ASSERT(Vcb->Identifier.Type == EXT2VCB);
727 ASSERT(bh->b_data && bh->b_bcb);
728
729 if (rw == WRITE) {
730
731 if (IsVcbReadOnly(Vcb)) {
732 goto errorout;
733 }
734
735 SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
736 Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
737
740 (ULONG)bh->b_blocknr,
741 (ULONG)bh->b_blocknr,
742 (bh->b_size >> BLOCK_BITS));
743 } else {
744 }
745
746errorout:
747
748 unlock_buffer(bh);
749 put_bh(bh);
750 return 0;
751}
752
753#if 0
754
755struct buffer_head *
757 struct block_device * bdev,
759 unsigned long size,
760 int zero
761)
762{
763 return get_block_bh_mdl(bdev, block, size, zero);
764}
765
766int submit_bh(int rw, struct buffer_head *bh)
767{
768 return submit_bh_mdl(rw, bh);
769}
770
771#else
772
773struct buffer_head *
775 struct block_device * bdev,
777 unsigned long size,
778 int zero
779)
780{
781 return get_block_bh_pin(bdev, block, size, zero);
782}
783
784int submit_bh(int rw, struct buffer_head *bh)
785{
786 return submit_bh_pin(rw, bh);
787}
788#endif
789
790struct buffer_head *
792 struct block_device * bdev,
794 unsigned long size
795)
796{
797 return get_block_bh(bdev, block, size, 0);
798}
799
800void __brelse(struct buffer_head *bh)
801{
802 struct block_device *bdev = bh->b_bdev;
804
805 ASSERT(Vcb->Identifier.Type == EXT2VCB);
806
807 /* write data in case it's dirty */
808 while (buffer_dirty(bh)) {
809 ll_rw_block(WRITE, 1, &bh);
810 }
811
813 if (atomic_dec_and_test(&bh->b_count)) {
814 ASSERT(0 == atomic_read(&bh->b_count));
815 } else {
817 return;
818 }
820#ifdef __REACTOS__
821 if (!IsListEmpty(&bh->b_link))
822#endif
824 InsertTailList(&Vcb->bd.bd_bh_free, &bh->b_link);
825 KeClearEvent(&Vcb->bd.bd_bh_notify);
828
829 DEBUG(DL_BH, ("brelse: cnt=%u size=%u blk=%10.10xh bh=%p ptr=%p\n",
830 atomic_read(&g_jbh.bh_count) - 1, bh->b_size,
831 bh->b_blocknr, bh, bh->b_data ));
832}
833
834
835void __bforget(struct buffer_head *bh)
836{
837 clear_buffer_dirty(bh);
838 __brelse(bh);
839}
840
842{
843}
844
846{
847 clear_buffer_locked(bh);
848}
849
851{
852}
853
854void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
855{
856 int i;
857
858 for (i = 0; i < nr; i++) {
859
860 struct buffer_head *bh = bhs[i];
861
862 if (rw == SWRITE)
863 lock_buffer(bh);
864 else if (test_set_buffer_locked(bh))
865 continue;
866
867 if (rw == WRITE || rw == SWRITE) {
868 if (test_clear_buffer_dirty(bh)) {
869 get_bh(bh);
870 submit_bh(WRITE, bh);
871 continue;
872 }
873 } else {
874 if (!buffer_uptodate(bh)) {
875 get_bh(bh);
876 submit_bh(rw, bh);
877 continue;
878 }
879 }
880 unlock_buffer(bh);
881 }
882}
883
885{
886 ll_rw_block(READ, 1, &bh);
887 return 0;
888}
889
891{
892 int ret = 0;
893
894 ASSERT(atomic_read(&bh->b_count) <= 1);
895 lock_buffer(bh);
896 if (test_clear_buffer_dirty(bh)) {
897 get_bh(bh);
898 ret = submit_bh(WRITE, bh);
899 wait_on_buffer(bh);
900 } else {
901 unlock_buffer(bh);
902 }
903 return ret;
904}
905
907{
908 set_buffer_dirty(bh);
909}
910
912{
913 PEXT2_VCB Vcb = (PEXT2_VCB) bdev->bd_priv;
915 return 0;
916}
917
918/*
919 * Perform a pagecache lookup for the matching buffer. If it's there, refre
920 * it in the LRU and mark it as accessed. If it is not present then return
921 * NULL
922 */
923struct buffer_head *
924__find_get_block(struct block_device *bdev, sector_t block, unsigned long size)
925{
926 return __getblk(bdev, block, size);
927}
928
929
930//
931// inode block mapping
932//
933
935{
936 ULONGLONG lcn = 0;
937 struct super_block *s = i->i_sb;
938
939 PEXT2_MCB Mcb = (PEXT2_MCB)i->i_priv;
940 PEXT2_VCB Vcb = (PEXT2_VCB)s->s_priv;
944
945 if (!Mcb || !Vcb) {
946 goto errorout;
947 }
948
949 offset <<= BLOCK_BITS;
951 NULL,
952 Vcb,
953 Mcb,
954 offset,
956 FALSE,
957 &extent
958 );
959
960 if (!NT_SUCCESS(status)) {
961 goto errorout;
962 }
963
964 if (extent == NULL) {
965 goto errorout;
966 }
967
968 lcn = (unsigned long)(extent->Lba >> BLOCK_BITS);
969
970errorout:
971
972 if (extent) {
974 }
975
976 return lcn;
977}
978
979void iget(struct inode *inode)
980{
982}
983
984void iput(struct inode *inode)
985{
987 kfree(inode);
988 }
989}
990
991//
992// initialzer and destructor
993//
994
995int
997{
998 int rc = 0;
999
1000 rc = ext2_init_bh();
1001 if (rc != 0) {
1002 goto errorout;
1003 }
1004
1005errorout:
1006
1007 return rc;
1008}
1009
1010void
1012{
1014}
#define ENOMEM
Definition: acclib.h:84
int memcmp(void *Buffer1, void *Buffer2, ACPI_SIZE Count)
Definition: utclib.c:112
char * strncpy(char *DstString, const char *SrcString, ACPI_SIZE Count)
Definition: utclib.c:427
struct outqueuenode * head
Definition: adnsresfilter.c:66
static int state
Definition: maze.c:121
#define atomic_read(v)
Definition: atomic.h:23
static void atomic_inc(atomic_t volatile *v)
Definition: atomic.h:95
#define ATOMIC_INIT(i)
Definition: atomic.h:14
static void atomic_dec(atomic_t volatile *v)
Definition: atomic.h:107
static int atomic_dec_and_test(atomic_t volatile *v)
Definition: atomic.h:121
LONG NTSTATUS
Definition: precomp.h:26
#define DEBUG(args)
Definition: rdesktop.h:129
static int list_empty(struct list_entry *head)
Definition: list.h:58
static void list_add_tail(struct list_entry *head, struct list_entry *entry)
Definition: list.h:83
static void list_add(struct list_entry *entry, struct list_entry *prev, struct list_entry *next)
Definition: list.h:64
struct _root root
VOID NTAPI CcSetDirtyPinnedData(IN PVOID BcbVoid, IN OPTIONAL PLARGE_INTEGER Lsn)
Definition: cachesub.c:121
Definition: bufpool.h:45
Definition: _queue.h:67
#define BLOCK_SIZE
Definition: dlist.c:220
#define NULL
Definition: types.h:112
#define TRUE
Definition: types.h:120
#define FALSE
Definition: types.h:117
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:32
#define BLOCK_BITS
Definition: stream.h:22
#define INIT_LIST_HEAD(ptr)
Definition: list.h:24
static void list_del(struct list_head *entry)
Definition: list.h:89
static void list_del_init(struct list_head *entry)
Definition: list.h:100
#define PS_BUFF_HEAD
Definition: common.h:35
static int list_empty_careful(const struct list_head *head)
Definition: list.h:138
void rb_erase(struct rb_node *, struct rb_root *)
Definition: rbtree.c:223
void rb_insert(struct rb_root *root, struct rb_node *node, int(*cmp)(struct rb_node *, struct rb_node *))
Definition: rbtree.c:392
unsigned __int64 sector_t
Definition: types.h:78
__s64 s64
Definition: types.h:36
#define RemoveEntryList(Entry)
Definition: env_spec_w32.h:986
#define InsertTailList(ListHead, Entry)
#define IsListEmpty(ListHead)
Definition: env_spec_w32.h:954
#define KeInitializeEvent(pEvt, foo, foo2)
Definition: env_spec_w32.h:477
#define KeSetEvent(pEvt, foo, foo2)
Definition: env_spec_w32.h:476
#define KeQuerySystemTime(t)
Definition: env_spec_w32.h:570
#define ExAcquireResourceExclusiveLite(res, wait)
Definition: env_spec_w32.h:615
#define InitializeListHead(ListHead)
Definition: env_spec_w32.h:944
VOID NTAPI KeClearEvent(IN PKEVENT Event)
Definition: eventobj.c:22
#define SetFlag(_F, _SF)
Definition: ext2fs.h:187
@ EXT2VCB
Definition: ext2fs.h:457
#define IsVcbReadOnly(Vcb)
Definition: ext2fs.h:805
VOID Ext2FreeExtent(IN PEXT2_EXTENT Extent)
Definition: memory.c:505
VOID Ext2Sleep(ULONG ms)
Definition: misc.c:297
#define TOTAL_BLOCKS
Definition: ext2fs.h:101
#define DEC_MEM_COUNT(_i, _p, _s)
Definition: ext2fs.h:599
struct _EXT2_MCB * PEXT2_MCB
Definition: ext2fs.h:479
NTSTATUS Ext2BuildExtents(IN PEXT2_IRP_CONTEXT IrpContext, IN PEXT2_VCB Vcb, IN PEXT2_MCB Mcb, IN ULONGLONG Offset, IN ULONG Size, IN BOOLEAN bAlloc, OUT PEXT2_EXTENT *Chain)
Definition: memory.c:1207
#define DL_BH
Definition: ext2fs.h:1411
#define DbgBreak()
Definition: ext2fs.h:46
struct _EXT2_VCB * PEXT2_VCB
BOOLEAN Ext2AddBlockExtent(IN PEXT2_VCB Vcb, IN PEXT2_MCB Mcb, IN ULONG Start, IN ULONG Block, IN ULONG Number)
Definition: memory.c:1027
#define INC_MEM_COUNT(_i, _p, _s)
Definition: ext2fs.h:598
PMDL Ext2CreateMdl(IN PVOID Buffer, IN ULONG Length, IN LOCK_OPERATION Operation)
Definition: block.c:52
VOID Ext2DestroyMdl(IN PMDL Mdl)
Definition: block.c:85
NTSTATUS Ext2FlushVolume(IN PEXT2_IRP_CONTEXT IrpContext, IN PEXT2_VCB Vcb, IN BOOLEAN bShutDown)
Definition: flush.c:39
#define IsFlagOn(a, b)
Definition: ext2fs.h:177
IN PVCB IN VBO IN ULONG OUT PBCB * Bcb
Definition: fatprocs.h:414
IN PVCB IN ULONG IN OUT PULONG IN BOOLEAN OUT PLARGE_MCB Mcb
Definition: fatprocs.h:348
GLdouble s
Definition: gl.h:2039
GLdouble GLdouble GLdouble GLdouble q
Definition: gl.h:2063
GLsizeiptr size
Definition: glext.h:5919
GLuint buffer
Definition: glext.h:5915
GLboolean GLboolean GLboolean b
Definition: glext.h:6204
GLbitfield flags
Definition: glext.h:7161
GLfloat GLfloat p
Definition: glext.h:8902
GLboolean GLboolean GLboolean GLboolean a
Definition: glext.h:6204
GLuint64EXT * result
Definition: glext.h:11304
GLintptr offset
Definition: glext.h:5920
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
#define container_of(ptr, type, member)
Definition: glue.h:15
#define KeGetCurrentThread
Definition: hal.h:55
uint32_t entry
Definition: isohybrid.c:63
if(dx< 0)
Definition: linetemp.h:194
void free_buffer_head(struct buffer_head *bh)
Definition: linux.c:346
kmem_cache_t * kmem_cache_create(const char *name, size_t size, size_t offset, unsigned long flags, kmem_cache_cb_t ctor)
Definition: linux.c:48
void iget(struct inode *inode)
Definition: linux.c:979
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
Definition: linux.c:189
void add_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
Definition: linux.c:167
struct buffer_head * new_buffer_head()
Definition: linux.c:327
void __brelse(struct buffer_head *bh)
Definition: linux.c:800
static void buffer_head_insert(struct block_device *bdev, struct buffer_head *bh)
Definition: linux.c:419
void iput(struct inode *inode)
Definition: linux.c:984
struct buffer_head * __find_get_block(struct block_device *bdev, sector_t block, unsigned long size)
Definition: linux.c:924
struct task_struct * current
Definition: linux.c:32
void kmem_cache_free(kmem_cache_t *kc, void *p)
Definition: linux.c:103
static struct buffer_head * __buffer_head_search(struct rb_root *root, sector_t blocknr)
Definition: linux.c:373
struct buffer_head * get_block_bh_mdl(struct block_device *bdev, sector_t block, unsigned long size, int zero)
Definition: linux.c:430
PEXT2_GLOBAL Ext2Global
Definition: init.c:16
struct task_struct current_task
Definition: linux.c:26
int submit_bh_mdl(int rw, struct buffer_head *bh)
Definition: linux.c:551
void unlock_buffer(struct buffer_head *bh)
Definition: linux.c:845
struct _EXT2_BUFFER_HEAD g_jbh
struct buffer_head * get_block_bh_pin(struct block_device *bdev, sector_t block, unsigned long size, int zero)
Definition: linux.c:611
struct buffer_head * get_block_bh(struct block_device *bdev, sector_t block, unsigned long size, int zero)
Definition: linux.c:774
struct __wait_queue * wait_queue_create()
Definition: linux.c:122
static void __add_wait_queue_tail(wait_queue_head_t *head, struct __wait_queue *new)
Definition: linux.c:155
static struct buffer_head * buffer_head_search(struct block_device *bdev, sector_t blocknr)
Definition: linux.c:411
struct buffer_head * __getblk(struct block_device *bdev, sector_t block, unsigned long size)
Definition: linux.c:791
void * kmem_cache_alloc(kmem_cache_t *kc, int flags)
Definition: linux.c:92
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *waiti, int state)
Definition: linux.c:231
int kmem_cache_destroy(kmem_cache_t *kc)
Definition: linux.c:82
void * kzalloc(int size, int flags)
Definition: linux.c:34
static int buffer_head_blocknr_cmp(struct rb_node *a, struct rb_node *b)
Definition: linux.c:396
void ext2_destroy_linux()
Definition: linux.c:1011
int wake_up(wait_queue_head_t *queue)
Definition: linux.c:279
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *waiti, int state)
Definition: linux.c:212
int bh_submit_read(struct buffer_head *bh)
Definition: linux.c:884
int ext2_init_linux()
Definition: linux.c:996
static void __remove_wait_queue(wait_queue_head_t *head, struct __wait_queue *old)
Definition: linux.c:161
int submit_bh_pin(int rw, struct buffer_head *bh)
Definition: linux.c:719
int sync_dirty_buffer(struct buffer_head *bh)
Definition: linux.c:890
int submit_bh(int rw, struct buffer_head *bh)
Definition: linux.c:784
void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh)
Definition: linux.c:424
void ext2_destroy_bh()
Definition: linux.c:318
void __bforget(struct buffer_head *bh)
Definition: linux.c:835
void __wait_on_buffer(struct buffer_head *bh)
Definition: linux.c:850
void init_waitqueue_head(wait_queue_head_t *q)
Definition: linux.c:115
void __lock_buffer(struct buffer_head *bh)
Definition: linux.c:841
void wait_queue_destroy(struct __wait_queue *wait)
Definition: linux.c:142
ULONGLONG bmap(struct inode *i, ULONGLONG b)
Definition: linux.c:934
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *waiti)
Definition: linux.c:178
static void __add_wait_queue(wait_queue_head_t *head, struct __wait_queue *new)
Definition: linux.c:147
int sync_blockdev(struct block_device *bdev)
Definition: linux.c:911
void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
Definition: linux.c:854
void mark_buffer_dirty(struct buffer_head *bh)
Definition: linux.c:906
int ext2_init_bh()
Definition: linux.c:300
void finish_wait(wait_queue_head_t *q, wait_queue_t *waiti)
Definition: linux.c:250
unsigned int ULONG
Definition: retypes.h:1
VOID NTAPI ExDeleteNPagedLookasideList(IN PNPAGED_LOOKASIDE_LIST Lookaside)
Definition: lookas.c:170
VOID NTAPI ExInitializeNPagedLookasideList(IN PNPAGED_LOOKASIDE_LIST Lookaside, IN PALLOCATE_FUNCTION Allocate OPTIONAL, IN PFREE_FUNCTION Free OPTIONAL, IN ULONG Flags, IN SIZE_T Size, IN ULONG Tag, IN USHORT Depth)
Definition: lookas.c:218
#define memmove(s1, s2, n)
Definition: mkisofs.h:881
PVOID NTAPI MmMapLockedPagesSpecifyCache(IN PMDL Mdl, IN KPROCESSOR_MODE AccessMode, IN MEMORY_CACHING_TYPE CacheType, IN PVOID BaseAddress, IN ULONG BugCheckOnFailure, IN ULONG Priority)
Definition: mdlsup.c:660
VOID NTAPI MmUnmapLockedPages(IN PVOID BaseAddress, IN PMDL Mdl)
Definition: mdlsup.c:837
#define ASSERT(a)
Definition: mode.c:44
#define EXPORT_SYMBOL(x)
Definition: module.h:270
#define kmalloc(size, gfp)
Definition: module.h:1115
#define SWRITE
Definition: module.h:1168
static void put_bh(struct buffer_head *bh)
Definition: module.h:939
static void lock_buffer(struct buffer_head *bh)
Definition: module.h:1018
#define GFP_KERNEL
Definition: module.h:658
#define GFP_NOFS
Definition: module.h:659
#define SLAB_TEMPORARY
Definition: module.h:1123
#define __set_current_state(state)
Definition: module.h:496
static void get_bh(struct buffer_head *bh)
Definition: module.h:934
#define printk
Definition: module.h:229
#define is_sync_wait(wait)
Definition: module.h:494
static void wait_on_buffer(struct buffer_head *bh)
Definition: module.h:1011
#define WQ_FLAG_EXCLUSIVE
Definition: module.h:462
#define WRITE
Definition: module.h:1166
void(* kmem_cache_cb_t)(void *, kmem_cache_t *, unsigned long)
Definition: module.h:1125
#define spin_lock_init(sl)
Definition: module.h:303
#define kfree(p)
Definition: module.h:1116
#define KERN_EMERG
Definition: module.h:220
#define spin_lock_irqsave(sl, flags)
Definition: module.h:306
#define spin_unlock_irqrestore(sl, flags)
Definition: module.h:307
#define set_current_state(state)
Definition: module.h:495
#define WQ_FLAG_AUTO_REMOVAL
Definition: module.h:463
@ HighPagePriority
Definition: imports.h:57
static PVOID ptr
Definition: dispmode.c:27
ULONG nr
Definition: thread.c:7
LPFNCONSTRUCTOR ctor
Definition: msctf.c:83
#define KernelMode
Definition: asm.h:34
_In_ ULONG _In_ ULONG Offset
Definition: ntddpcm.h:101
@ SynchronizationEvent
#define PIN_WAIT
BOOLEAN NTAPI ExAcquireSharedStarveExclusive(IN PERESOURCE Resource, IN BOOLEAN Wait)
Definition: resource.c:1063
VOID FASTCALL ExReleaseResourceLite(IN PERESOURCE Resource)
Definition: resource.c:1817
#define Vcb
Definition: cdprocs.h:1415
VOID NTAPI CcUnpinData(IN PVOID Bcb)
Definition: pinsup.c:955
VOID NTAPI CcSetBcbOwnerPointer(IN PVOID Bcb, IN PVOID OwnerPointer)
Definition: pinsup.c:979
VOID NTAPI CcUnpinDataForThread(IN PVOID Bcb, IN ERESOURCE_THREAD ResourceThreadId)
Definition: pinsup.c:991
BOOLEAN NTAPI CcPreparePinWrite(IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN BOOLEAN Zero, IN ULONG Flags, OUT PVOID *Bcb, OUT PVOID *Buffer)
Definition: pinsup.c:827
BOOLEAN NTAPI CcPinRead(IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN ULONG Flags, OUT PVOID *Bcb, OUT PVOID *Buffer)
Definition: pinsup.c:802
#define long
Definition: qsort.c:33
#define rw
Definition: rosglue.h:38
#define memset(x, y, z)
Definition: compat.h:39
int zero
Definition: sehframes.cpp:29
atomic_t bh_acount
Definition: linux.c:296
kmem_cache_t * bh_cache
Definition: linux.c:294
atomic_t bh_count
Definition: linux.c:295
EXT2_REAPER bhReaper
Definition: ext2fs.h:541
KEVENT Wait
Definition: ext2fs.h:497
void * private
Definition: module.h:467
unsigned int flags
Definition: module.h:466
struct list_head task_list
Definition: module.h:469
KEVENT event
Definition: module.h:468
void * bd_priv
Definition: module.h:544
ERESOURCE bd_bh_lock
Definition: module.h:550
struct rb_root bd_bh_root
Definition: module.h:551
struct block_device * b_bdev
Definition: module.h:721
struct rb_node b_rb_node
Definition: module.h:731
size_t b_size
Definition: module.h:724
LIST_ENTRY b_link
Definition: module.h:714
blkcnt_t b_blocknr
Definition: module.h:723
LARGE_INTEGER b_ts_creat
Definition: module.h:733
void * b_bcb
Definition: module.h:718
atomic_t b_count
Definition: module.h:730
PMDL b_mdl
Definition: module.h:717
char * b_data
Definition: module.h:725
LARGE_INTEGER b_ts_drop
Definition: module.h:734
Definition: fs.h:78
atomic_t i_count
Definition: fs.h:90
CHAR name[32]
Definition: module.h:1128
ULONG size
Definition: module.h:1130
atomic_t acount
Definition: module.h:1132
atomic_t count
Definition: module.h:1131
kmem_cache_cb_t constructor
Definition: module.h:1134
NPAGED_LOOKASIDE_LIST la
Definition: module.h:1133
Definition: list.h:15
Definition: name.c:39
Definition: rbtree.h:98
Definition: ps.c:97
Definition: fs.h:64
#define new(TYPE, numElems)
Definition: treelist.c:54
int64_t LONGLONG
Definition: typedefs.h:68
uint64_t ULONGLONG
Definition: typedefs.h:67
#define READ(_gif, _buf, _len)
Definition: ungif.c:107
int ret
ULONG_PTR ERESOURCE_THREAD
Definition: extypes.h:208
#define FO_FILE_MODIFIED
Definition: iotypes.h:1788
@ IoModifyAccess
Definition: ketypes.h:853
@ MmNonCached
Definition: mmtypes.h:129
#define MDL_MAPPED_TO_SYSTEM_VA
Definition: mmtypes.h:18
static unsigned int block
Definition: xmlmemory.c:101