ReactOS 0.4.16-dev-106-g10b08aa
linux.c
Go to the documentation of this file.
1/*
2 * COPYRIGHT: See COPYRIGHT.TXT
3 * PROJECT: Ext2 File System Driver for WinNT/2K/XP
4 * FILE: linux.c
5 * PROGRAMMER: Matt Wu <mattwu@163.com>
6 * HOMEPAGE: http://www.ext2fsd.com
7 * UPDATE HISTORY:
8 */
9
10/* INCLUDES *****************************************************************/
11
12#include <ext2fs.h>
13#include <linux/jbd.h>
14#include <linux/errno.h>
15
16/* GLOBALS ***************************************************************/
17
19
20/* DEFINITIONS *************************************************************/
21
22#ifdef ALLOC_PRAGMA
23#pragma alloc_text(PAGE, kzalloc)
24#endif
25
27 /* pid */ 0,
28 /* tid */ 1,
29 /* comm */ "current\0",
30 /* journal_info */ NULL
31};
33
34void *kzalloc(int size, int flags)
35{
36 void *buffer = kmalloc(size, flags);
37 if (buffer) {
38 memset(buffer, 0, size);
39 }
40 return buffer;
41}
42
43//
44// slab routines
45//
46
49 const char * name,
50 size_t size,
51 size_t offset,
52 unsigned long flags,
54)
55{
56 kmem_cache_t *kc = NULL;
57
58 kc = kmalloc(sizeof(kmem_cache_t), GFP_KERNEL);
59 if (kc == NULL) {
60 goto errorout;
61 }
62
63 memset(kc, 0, sizeof(kmem_cache_t));
65 &kc->la,
66 NULL,
67 NULL,
68 0,
69 size,
70 'JBKC',
71 0);
72
73 kc->size = size;
74 strncpy(kc->name, name, 31);
75 kc->constructor = ctor;
76
77errorout:
78
79 return kc;
80}
81
83{
84 ASSERT(kc != NULL);
85
87 kfree(kc);
88
89 return 0;
90}
91
93{
94 PVOID ptr = NULL;
95 ptr = ExAllocateFromNPagedLookasideList(&(kc->la));
96 if (ptr) {
97 atomic_inc(&kc->count);
98 atomic_inc(&kc->acount);
99 }
100 return ptr;
101}
102
104{
105 if (p) {
106 atomic_dec(&kc->count);
107 ExFreeToNPagedLookasideList(&(kc->la), p);
108 }
109}
110
111//
112// wait queue routines
113//
114
116{
117 spin_lock_init(&q->lock);
118 INIT_LIST_HEAD(&q->task_list);
119}
120
121struct __wait_queue *
123{
124 struct __wait_queue * wait = NULL;
125 wait = kmalloc(sizeof(struct __wait_queue), GFP_KERNEL);
126 if (!wait) {
127 return NULL;
128 }
129
130 memset(wait, 0, sizeof(struct __wait_queue));
132 wait->private = (void *)KeGetCurrentThread();
134 KeInitializeEvent(&(wait->event),
136 FALSE);
137
138 return wait;
139}
140
141void
143{
144 kfree(wait);
145}
146
147static inline void __add_wait_queue(wait_queue_head_t *head, struct __wait_queue *new)
148{
149 list_add(&new->task_list, &head->task_list);
150}
151
152/*
153 * Used for wake-one threads:
154 */
156 struct __wait_queue *new)
157{
158 list_add_tail(&new->task_list, &head->task_list);
159}
160
162 struct __wait_queue *old)
163{
164 list_del(&old->task_list);
165}
166
168{
169 unsigned long flags;
170 struct __wait_queue *wait = *waiti;
171
172 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
173 spin_lock_irqsave(&q->lock, flags);
174 __add_wait_queue(q, wait);
176}
177
179{
180 unsigned long flags;
181 struct __wait_queue *wait = *waiti;
182
183 wait->flags |= WQ_FLAG_EXCLUSIVE;
184 spin_lock_irqsave(&q->lock, flags);
187}
188
190{
191 unsigned long flags;
192 struct __wait_queue *wait = *waiti;
193
194 spin_lock_irqsave(&q->lock, flags);
195 __remove_wait_queue(q, wait);
197}
198
199/*
200 * Note: we use "set_current_state()" _after_ the wait-queue add,
201 * because we need a memory barrier there on SMP, so that any
202 * wake-function that tests for the wait-queue being active
203 * will be guaranteed to see waitqueue addition _or_ subsequent
204 * tests in this thread will see the wakeup having taken place.
205 *
206 * The spin_unlock() itself is semi-permeable and only protects
207 * one way (it only protects stuff inside the critical region and
208 * stops them from bleeding out - it would still allow subsequent
209 * loads to move into the critical region).
210 */
211void
213{
214 unsigned long flags;
215 struct __wait_queue *wait = *waiti;
216
217 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
218 spin_lock_irqsave(&q->lock, flags);
219 if (list_empty(&wait->task_list))
220 __add_wait_queue(q, wait);
221 /*
222 * don't alter the task state if this is just going to
223 * queue an async wait queue callback
224 */
225 if (is_sync_wait(wait))
228}
229
230void
232{
233 unsigned long flags;
234 struct __wait_queue *wait = *waiti;
235
236 wait->flags |= WQ_FLAG_EXCLUSIVE;
237 spin_lock_irqsave(&q->lock, flags);
238 if (list_empty(&wait->task_list))
240 /*
241 * don't alter the task state if this is just going to
242 * queue an async wait queue callback
243 */
244 if (is_sync_wait(wait))
247}
249
251{
252 unsigned long flags;
253 struct __wait_queue *wait = *waiti;
254
255 __set_current_state(TASK_RUNNING);
256 /*
257 * We can check for list emptiness outside the lock
258 * IFF:
259 * - we use the "careful" check that verifies both
260 * the next and prev pointers, so that there cannot
261 * be any half-pending updates in progress on other
262 * CPU's that we haven't seen yet (and that might
263 * still change the stack area.
264 * and
265 * - all other users take the lock (ie we can only
266 * have _one_ other CPU that looks at or modifies
267 * the list).
268 */
269 if (!list_empty_careful(&wait->task_list)) {
270 spin_lock_irqsave(&q->lock, flags);
271 list_del_init(&wait->task_list);
273 }
274
275 /* free wait */
276 wait_queue_destroy(wait);
277}
278
280{
281 return 0; /* KeSetEvent(&wait->event, 0, FALSE); */
282}
283
284
285//
286// kernel timer routines
287//
288
289//
290// buffer head routines
291//
292
298
299int
301{
302 g_jbh.bh_count.counter = 0;
303 g_jbh.bh_acount.counter = 0;
304 g_jbh.bh_cache = kmem_cache_create(
305 "ext2_bh", /* bh */
306 sizeof(struct buffer_head),
307 0, /* offset */
308 SLAB_TEMPORARY, /* flags */
309 NULL); /* ctor */
310 if (g_jbh.bh_cache == NULL) {
311 printk(KERN_EMERG "JBD: failed to create handle cache\n");
312 return -ENOMEM;
313 }
314 return 0;
315}
316
317void
319{
320 if (g_jbh.bh_cache) {
321 kmem_cache_destroy(g_jbh.bh_cache);
322 g_jbh.bh_cache = NULL;
323 }
324}
325
326struct buffer_head *
328{
329 struct buffer_head * bh = NULL;
330 bh = kmem_cache_alloc(g_jbh.bh_cache, GFP_NOFS);
331 if (bh) {
332 atomic_inc(&g_jbh.bh_count);
333 atomic_inc(&g_jbh.bh_acount);
334
335 memset(bh, 0, sizeof(struct buffer_head));
338 DEBUG(DL_BH, ("bh=%p allocated.\n", bh));
339 INC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
340 }
341
342 return bh;
343}
344
345void
347{
348 if (bh) {
349 if (bh->b_mdl) {
350
351 DEBUG(DL_BH, ("bh=%p mdl=%p (Flags:%xh VA:%p) released.\n", bh, bh->b_mdl,
352 bh->b_mdl->MdlFlags, bh->b_mdl->MappedSystemVa));
353 if (IsFlagOn(bh->b_mdl->MdlFlags, MDL_MAPPED_TO_SYSTEM_VA)) {
354 MmUnmapLockedPages(bh->b_mdl->MappedSystemVa, bh->b_mdl);
355 }
357 }
358 if (bh->b_bcb) {
360 }
361
362 DEBUG(DL_BH, ("bh=%p freed.\n", bh));
363 DEC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
364 kmem_cache_free(g_jbh.bh_cache, bh);
365 atomic_dec(&g_jbh.bh_count);
366 }
367}
368
369//
370// Red-black tree insert routine.
371//
372
374 sector_t blocknr)
375{
376 struct rb_node *new = root->rb_node;
377
378 /* Figure out where to put new node */
379 while (new) {
380 struct buffer_head *bh =
381 container_of(new, struct buffer_head, b_rb_node);
382 s64 result = blocknr - bh->b_blocknr;
383
384 if (result < 0)
385 new = new->rb_left;
386 else if (result > 0)
387 new = new->rb_right;
388 else
389 return bh;
390
391 }
392
393 return NULL;
394}
395
396static int buffer_head_blocknr_cmp(struct rb_node *a, struct rb_node *b)
397{
398 struct buffer_head *a_bh, *b_bh;
399 s64 result;
400 a_bh = container_of(a, struct buffer_head, b_rb_node);
401 b_bh = container_of(b, struct buffer_head, b_rb_node);
402 result = a_bh->b_blocknr - b_bh->b_blocknr;
403
404 if (result < 0)
405 return -1;
406 if (result > 0)
407 return 1;
408 return 0;
409}
410
411static struct buffer_head *buffer_head_search(struct block_device *bdev,
412 sector_t blocknr)
413{
414 struct rb_root *root;
415 root = &bdev->bd_bh_root;
416 return __buffer_head_search(root, blocknr);
417}
418
419static void buffer_head_insert(struct block_device *bdev, struct buffer_head *bh)
420{
422}
423
424void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh)
425{
426 rb_erase(&bh->b_rb_node, &bdev->bd_bh_root);
427}
428
429struct buffer_head *
431 struct block_device * bdev,
433 unsigned long size,
434 int zero
435)
436{
437 PEXT2_VCB Vcb = bdev->bd_priv;
439 PVOID bcb = NULL;
440 PVOID ptr = NULL;
441
442 struct list_head *entry;
443
444 /* allocate buffer_head and initialize it */
445 struct buffer_head *bh = NULL, *tbh = NULL;
446
447 /* check the block is valid or not */
448 if (block >= TOTAL_BLOCKS) {
449 DbgBreak();
450 goto errorout;
451 }
452
453 /* search the bdev bh list */
455 tbh = buffer_head_search(bdev, block);
456 if (tbh) {
457 bh = tbh;
458 get_bh(bh);
460 goto errorout;
461 }
463
464 bh = new_buffer_head();
465 if (!bh) {
466 goto errorout;
467 }
468 bh->b_bdev = bdev;
469 bh->b_blocknr = block;
470 bh->b_size = size;
471 bh->b_data = NULL;
472#ifdef __REACTOS__
474#endif
475
476again:
477
478 offset.QuadPart = (s64) bh->b_blocknr;
479 offset.QuadPart <<= BLOCK_BITS;
480
481 if (zero) {
482 /* PIN_EXCLUSIVE disabled, likely to deadlock with volume operations */
483 if (!CcPreparePinWrite(Vcb->Volume,
484 &offset,
485 bh->b_size,
486 FALSE,
487 PIN_WAIT /* | PIN_EXCLUSIVE */,
488 &bcb,
489 &ptr)) {
490 Ext2Sleep(100);
491 goto again;
492 }
493 } else {
494 if (!CcPinRead( Vcb->Volume,
495 &offset,
496 bh->b_size,
497 PIN_WAIT,
498 &bcb,
499 &ptr)) {
500 Ext2Sleep(100);
501 goto again;
502 }
503 set_buffer_uptodate(bh);
504 }
505
507 if (bh->b_mdl) {
508 /* muse map the PTE to NonCached zone. journal recovery will
509 access the PTE under spinlock: DISPATCH_LEVEL IRQL */
513 /* bh->b_data = MmMapLockedPages(bh->b_mdl, KernelMode); */
514 }
515 if (!bh->b_mdl || !bh->b_data) {
517 bh = NULL;
518 goto errorout;
519 }
520
521 get_bh(bh);
522
523 DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p mdl=%p (Flags:%xh VA:%p)\n",
524 Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_mdl, bh->b_mdl->MdlFlags, bh->b_data));
525
527 /* do search again here */
528 tbh = buffer_head_search(bdev, block);
529 if (tbh) {
531 bh = tbh;
532 get_bh(bh);
536 goto errorout;
537 } else {
538 buffer_head_insert(bdev, bh);
539 }
541
542 /* we get it */
543errorout:
544
545 if (bcb)
546 CcUnpinData(bcb);
547
548 return bh;
549}
550
551int submit_bh_mdl(int rw, struct buffer_head *bh)
552{
553 struct block_device *bdev = bh->b_bdev;
554 PEXT2_VCB Vcb = bdev->bd_priv;
555 PBCB Bcb;
558
559 ASSERT(Vcb->Identifier.Type == EXT2VCB);
560 ASSERT(bh->b_data);
561
562 if (rw == WRITE) {
563
564 if (IsVcbReadOnly(Vcb)) {
565 goto errorout;
566 }
567
568 SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
569 Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
570
571 /* PIN_EXCLUSIVE disabled, likely to deadlock with volume operations */
573 Vcb->Volume,
574 &Offset,
576 FALSE,
577 PIN_WAIT /* | PIN_EXCLUSIVE */,
578 &Bcb,
579 &Buffer )) {
580#if 0
581 if (memcmp(Buffer, bh->b_data, BLOCK_SIZE) != 0) {
582 DbgBreak();
583 }
585#endif
588 (ULONG)bh->b_blocknr,
589 (ULONG)bh->b_blocknr,
590 (bh->b_size >> BLOCK_BITS));
592 } else {
593
595 (ULONG)bh->b_blocknr,
596 (ULONG)bh->b_blocknr,
597 (bh->b_size >> BLOCK_BITS));
598 }
599
600 } else {
601 }
602
603errorout:
604
605 unlock_buffer(bh);
606 put_bh(bh);
607 return 0;
608}
609
610struct buffer_head *
612 struct block_device * bdev,
614 unsigned long size,
615 int zero
616)
617{
618 PEXT2_VCB Vcb = bdev->bd_priv;
620
621 struct list_head *entry;
622
623 /* allocate buffer_head and initialize it */
624 struct buffer_head *bh = NULL, *tbh = NULL;
625
626 /* check the block is valid or not */
627 if (block >= TOTAL_BLOCKS) {
628 DbgBreak();
629 goto errorout;
630 }
631
632 /* search the bdev bh list */
634 tbh = buffer_head_search(bdev, block);
635 if (tbh) {
636 bh = tbh;
637 get_bh(bh);
639 goto errorout;
640 }
642
643 bh = new_buffer_head();
644 if (!bh) {
645 goto errorout;
646 }
647 bh->b_bdev = bdev;
648 bh->b_blocknr = block;
649 bh->b_size = size;
650 bh->b_data = NULL;
651#ifdef __REACTOS__
653#endif
654
655again:
656
657 offset.QuadPart = (s64) bh->b_blocknr;
658 offset.QuadPart <<= BLOCK_BITS;
659
660 if (zero) {
661 if (!CcPreparePinWrite(Vcb->Volume,
662 &offset,
663 bh->b_size,
664 FALSE,
665 PIN_WAIT,
666 &bh->b_bcb,
667#ifdef __REACTOS__
668 (PVOID *)&bh->b_data)) {
669#else
670 &bh->b_data)) {
671#endif
672 Ext2Sleep(100);
673 goto again;
674 }
675 } else {
676 if (!CcPinRead( Vcb->Volume,
677 &offset,
678 bh->b_size,
679 PIN_WAIT,
680 &bh->b_bcb,
681#ifdef __REACTOS__
682 (PVOID *)&bh->b_data)) {
683#else
684 &bh->b_data)) {
685#endif
686 Ext2Sleep(100);
687 goto again;
688 }
689 set_buffer_uptodate(bh);
690 }
691
692 if (bh->b_bcb)
694
695 if (!bh->b_data) {
697 bh = NULL;
698 goto errorout;
699 }
700 get_bh(bh);
701
702 DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p ptr=%p.\n",
703 Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_data));
704
706 /* do search again here */
707 tbh = buffer_head_search(bdev, block);
708 if (tbh) {
709 get_bh(tbh);
711 bh = tbh;
715 goto errorout;
716 } else {
717 buffer_head_insert(bdev, bh);
718 }
720
721 /* we get it */
722errorout:
723
724 return bh;
725}
726
727int submit_bh_pin(int rw, struct buffer_head *bh)
728{
729 struct block_device *bdev = bh->b_bdev;
730 PEXT2_VCB Vcb = bdev->bd_priv;
733
734 ASSERT(Vcb->Identifier.Type == EXT2VCB);
735 ASSERT(bh->b_data && bh->b_bcb);
736
737 if (rw == WRITE) {
738
739 if (IsVcbReadOnly(Vcb)) {
740 goto errorout;
741 }
742
743 SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
744 Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
745
748 (ULONG)bh->b_blocknr,
749 (ULONG)bh->b_blocknr,
750 (bh->b_size >> BLOCK_BITS));
751 } else {
752 }
753
754errorout:
755
756 unlock_buffer(bh);
757 put_bh(bh);
758 return 0;
759}
760
761#if 0
762
763struct buffer_head *
765 struct block_device * bdev,
767 unsigned long size,
768 int zero
769)
770{
771 return get_block_bh_mdl(bdev, block, size, zero);
772}
773
774int submit_bh(int rw, struct buffer_head *bh)
775{
776 return submit_bh_mdl(rw, bh);
777}
778
779#else
780
781struct buffer_head *
783 struct block_device * bdev,
785 unsigned long size,
786 int zero
787)
788{
789 return get_block_bh_pin(bdev, block, size, zero);
790}
791
792int submit_bh(int rw, struct buffer_head *bh)
793{
794 return submit_bh_pin(rw, bh);
795}
796#endif
797
798struct buffer_head *
800 struct block_device * bdev,
802 unsigned long size
803)
804{
805 return get_block_bh(bdev, block, size, 0);
806}
807
808void __brelse(struct buffer_head *bh)
809{
810 struct block_device *bdev = bh->b_bdev;
812
813 ASSERT(Vcb->Identifier.Type == EXT2VCB);
814
815 /* write data in case it's dirty */
816 while (buffer_dirty(bh)) {
817 ll_rw_block(WRITE, 1, &bh);
818 }
819
821 if (atomic_dec_and_test(&bh->b_count)) {
822 ASSERT(0 == atomic_read(&bh->b_count));
823 } else {
825 return;
826 }
828#ifdef __REACTOS__
829 if (!IsListEmpty(&bh->b_link))
830#endif
832 InsertTailList(&Vcb->bd.bd_bh_free, &bh->b_link);
833 KeClearEvent(&Vcb->bd.bd_bh_notify);
836
837 DEBUG(DL_BH, ("brelse: cnt=%u size=%u blk=%10.10xh bh=%p ptr=%p\n",
838 atomic_read(&g_jbh.bh_count) - 1, bh->b_size,
839 bh->b_blocknr, bh, bh->b_data ));
840}
841
842
843void __bforget(struct buffer_head *bh)
844{
845 clear_buffer_dirty(bh);
846 __brelse(bh);
847}
848
850{
851}
852
854{
855 clear_buffer_locked(bh);
856}
857
859{
860}
861
862void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
863{
864 int i;
865
866 for (i = 0; i < nr; i++) {
867
868 struct buffer_head *bh = bhs[i];
869
870 if (rw == SWRITE)
871 lock_buffer(bh);
872 else if (test_set_buffer_locked(bh))
873 continue;
874
875 if (rw == WRITE || rw == SWRITE) {
876 if (test_clear_buffer_dirty(bh)) {
877 get_bh(bh);
878 submit_bh(WRITE, bh);
879 continue;
880 }
881 } else {
882 if (!buffer_uptodate(bh)) {
883 get_bh(bh);
884 submit_bh(rw, bh);
885 continue;
886 }
887 }
888 unlock_buffer(bh);
889 }
890}
891
893{
894 ll_rw_block(READ, 1, &bh);
895 return 0;
896}
897
899{
900 int ret = 0;
901
902 ASSERT(atomic_read(&bh->b_count) <= 1);
903 lock_buffer(bh);
904 if (test_clear_buffer_dirty(bh)) {
905 get_bh(bh);
906 ret = submit_bh(WRITE, bh);
907 wait_on_buffer(bh);
908 } else {
909 unlock_buffer(bh);
910 }
911 return ret;
912}
913
915{
916 set_buffer_dirty(bh);
917}
918
920{
921 PEXT2_VCB Vcb = (PEXT2_VCB) bdev->bd_priv;
923 return 0;
924}
925
926/*
927 * Perform a pagecache lookup for the matching buffer. If it's there, refre
928 * it in the LRU and mark it as accessed. If it is not present then return
929 * NULL
930 */
931struct buffer_head *
932__find_get_block(struct block_device *bdev, sector_t block, unsigned long size)
933{
934 return __getblk(bdev, block, size);
935}
936
937
938//
939// inode block mapping
940//
941
943{
944 ULONGLONG lcn = 0;
945 struct super_block *s = i->i_sb;
946
947 PEXT2_MCB Mcb = (PEXT2_MCB)i->i_priv;
948 PEXT2_VCB Vcb = (PEXT2_VCB)s->s_priv;
952
953 if (!Mcb || !Vcb) {
954 goto errorout;
955 }
956
957 offset <<= BLOCK_BITS;
959 NULL,
960 Vcb,
961 Mcb,
962 offset,
964 FALSE,
965 &extent
966 );
967
968 if (!NT_SUCCESS(status)) {
969 goto errorout;
970 }
971
972 if (extent == NULL) {
973 goto errorout;
974 }
975
976 lcn = (unsigned long)(extent->Lba >> BLOCK_BITS);
977
978errorout:
979
980 if (extent) {
982 }
983
984 return lcn;
985}
986
987void iget(struct inode *inode)
988{
990}
991
992void iput(struct inode *inode)
993{
995 kfree(inode);
996 }
997}
998
999//
1000// initialzer and destructor
1001//
1002
1003int
1005{
1006 int rc = 0;
1007
1008 rc = ext2_init_bh();
1009 if (rc != 0) {
1010 goto errorout;
1011 }
1012
1013errorout:
1014
1015 return rc;
1016}
1017
1018void
1020{
1022}
#define ENOMEM
Definition: acclib.h:84
int memcmp(void *Buffer1, void *Buffer2, ACPI_SIZE Count)
Definition: utclib.c:112
char * strncpy(char *DstString, const char *SrcString, ACPI_SIZE Count)
Definition: utclib.c:427
struct outqueuenode * head
Definition: adnsresfilter.c:66
static int state
Definition: maze.c:121
#define atomic_read(v)
Definition: atomic.h:23
static void atomic_inc(atomic_t volatile *v)
Definition: atomic.h:95
#define ATOMIC_INIT(i)
Definition: atomic.h:14
static void atomic_dec(atomic_t volatile *v)
Definition: atomic.h:107
static int atomic_dec_and_test(atomic_t volatile *v)
Definition: atomic.h:121
LONG NTSTATUS
Definition: precomp.h:26
#define DEBUG(args)
Definition: rdesktop.h:129
static int list_empty(struct list_entry *head)
Definition: list.h:58
static void list_add_tail(struct list_entry *head, struct list_entry *entry)
Definition: list.h:83
static void list_add(struct list_entry *entry, struct list_entry *prev, struct list_entry *next)
Definition: list.h:64
struct _root root
VOID NTAPI CcSetDirtyPinnedData(IN PVOID BcbVoid, IN OPTIONAL PLARGE_INTEGER Lsn)
Definition: cachesub.c:121
Definition: bufpool.h:45
Definition: _queue.h:67
#define BLOCK_SIZE
Definition: dlist.c:220
#define NULL
Definition: types.h:112
#define TRUE
Definition: types.h:120
#define FALSE
Definition: types.h:117
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:33
#define BLOCK_BITS
Definition: stream.h:22
#define INIT_LIST_HEAD(ptr)
Definition: list.h:24
static void list_del(struct list_head *entry)
Definition: list.h:89
static void list_del_init(struct list_head *entry)
Definition: list.h:100
#define PS_BUFF_HEAD
Definition: common.h:35
static int list_empty_careful(const struct list_head *head)
Definition: list.h:138
void rb_erase(struct rb_node *, struct rb_root *)
Definition: rbtree.c:223
void rb_insert(struct rb_root *root, struct rb_node *node, int(*cmp)(struct rb_node *, struct rb_node *))
Definition: rbtree.c:392
unsigned __int64 sector_t
Definition: types.h:82
signed long long s64
Definition: linux.h:60
#define RemoveEntryList(Entry)
Definition: env_spec_w32.h:986
#define InsertTailList(ListHead, Entry)
#define IsListEmpty(ListHead)
Definition: env_spec_w32.h:954
#define KeInitializeEvent(pEvt, foo, foo2)
Definition: env_spec_w32.h:477
#define KeSetEvent(pEvt, foo, foo2)
Definition: env_spec_w32.h:476
#define KeQuerySystemTime(t)
Definition: env_spec_w32.h:570
#define ExAcquireResourceExclusiveLite(res, wait)
Definition: env_spec_w32.h:615
#define InitializeListHead(ListHead)
Definition: env_spec_w32.h:944
VOID NTAPI KeClearEvent(IN PKEVENT Event)
Definition: eventobj.c:22
#define SetFlag(_F, _SF)
Definition: ext2fs.h:187
@ EXT2VCB
Definition: ext2fs.h:462
#define IsVcbReadOnly(Vcb)
Definition: ext2fs.h:814
VOID Ext2FreeExtent(IN PEXT2_EXTENT Extent)
Definition: memory.c:505
VOID Ext2Sleep(ULONG ms)
Definition: misc.c:297
#define TOTAL_BLOCKS
Definition: ext2fs.h:101
#define DEC_MEM_COUNT(_i, _p, _s)
Definition: ext2fs.h:608
struct _EXT2_MCB * PEXT2_MCB
Definition: ext2fs.h:484
NTSTATUS Ext2BuildExtents(IN PEXT2_IRP_CONTEXT IrpContext, IN PEXT2_VCB Vcb, IN PEXT2_MCB Mcb, IN ULONGLONG Offset, IN ULONG Size, IN BOOLEAN bAlloc, OUT PEXT2_EXTENT *Chain)
Definition: memory.c:1207
#define DL_BH
Definition: ext2fs.h:1448
#define DbgBreak()
Definition: ext2fs.h:46
struct _EXT2_VCB * PEXT2_VCB
BOOLEAN Ext2AddBlockExtent(IN PEXT2_VCB Vcb, IN PEXT2_MCB Mcb, IN ULONG Start, IN ULONG Block, IN ULONG Number)
Definition: memory.c:1027
#define INC_MEM_COUNT(_i, _p, _s)
Definition: ext2fs.h:607
PMDL Ext2CreateMdl(IN PVOID Buffer, IN ULONG Length, IN LOCK_OPERATION Operation)
Definition: block.c:64
VOID Ext2DestroyMdl(IN PMDL Mdl)
Definition: block.c:97
NTSTATUS Ext2FlushVolume(IN PEXT2_IRP_CONTEXT IrpContext, IN PEXT2_VCB Vcb, IN BOOLEAN bShutDown)
Definition: flush.c:43
#define IsFlagOn(a, b)
Definition: ext2fs.h:177
IN PVCB IN VBO IN ULONG OUT PBCB * Bcb
Definition: fatprocs.h:415
IN PVCB IN ULONG IN OUT PULONG IN BOOLEAN OUT PLARGE_MCB Mcb
Definition: fatprocs.h:349
GLdouble s
Definition: gl.h:2039
GLdouble GLdouble GLdouble GLdouble q
Definition: gl.h:2063
GLsizeiptr size
Definition: glext.h:5919
GLuint buffer
Definition: glext.h:5915
GLboolean GLboolean GLboolean b
Definition: glext.h:6204
GLbitfield flags
Definition: glext.h:7161
GLfloat GLfloat p
Definition: glext.h:8902
GLboolean GLboolean GLboolean GLboolean a
Definition: glext.h:6204
GLuint64EXT * result
Definition: glext.h:11304
GLintptr offset
Definition: glext.h:5920
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
#define container_of(ptr, type, member)
Definition: glue.h:15
#define KeGetCurrentThread
Definition: hal.h:55
uint32_t entry
Definition: isohybrid.c:63
if(dx< 0)
Definition: linetemp.h:194
void free_buffer_head(struct buffer_head *bh)
Definition: linux.c:346
kmem_cache_t * kmem_cache_create(const char *name, size_t size, size_t offset, unsigned long flags, kmem_cache_cb_t ctor)
Definition: linux.c:48
void iget(struct inode *inode)
Definition: linux.c:987
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
Definition: linux.c:189
void add_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
Definition: linux.c:167
struct buffer_head * new_buffer_head()
Definition: linux.c:327
void __brelse(struct buffer_head *bh)
Definition: linux.c:808
static void buffer_head_insert(struct block_device *bdev, struct buffer_head *bh)
Definition: linux.c:419
void iput(struct inode *inode)
Definition: linux.c:992
struct buffer_head * __find_get_block(struct block_device *bdev, sector_t block, unsigned long size)
Definition: linux.c:932
struct task_struct * current
Definition: linux.c:32
void kmem_cache_free(kmem_cache_t *kc, void *p)
Definition: linux.c:103
static struct buffer_head * __buffer_head_search(struct rb_root *root, sector_t blocknr)
Definition: linux.c:373
struct buffer_head * get_block_bh_mdl(struct block_device *bdev, sector_t block, unsigned long size, int zero)
Definition: linux.c:430
PEXT2_GLOBAL Ext2Global
Definition: init.c:16
struct task_struct current_task
Definition: linux.c:26
int submit_bh_mdl(int rw, struct buffer_head *bh)
Definition: linux.c:551
void unlock_buffer(struct buffer_head *bh)
Definition: linux.c:853
struct _EXT2_BUFFER_HEAD g_jbh
struct buffer_head * get_block_bh_pin(struct block_device *bdev, sector_t block, unsigned long size, int zero)
Definition: linux.c:611
struct buffer_head * get_block_bh(struct block_device *bdev, sector_t block, unsigned long size, int zero)
Definition: linux.c:782
struct __wait_queue * wait_queue_create()
Definition: linux.c:122
static void __add_wait_queue_tail(wait_queue_head_t *head, struct __wait_queue *new)
Definition: linux.c:155
static struct buffer_head * buffer_head_search(struct block_device *bdev, sector_t blocknr)
Definition: linux.c:411
struct buffer_head * __getblk(struct block_device *bdev, sector_t block, unsigned long size)
Definition: linux.c:799
void * kmem_cache_alloc(kmem_cache_t *kc, int flags)
Definition: linux.c:92
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *waiti, int state)
Definition: linux.c:231
int kmem_cache_destroy(kmem_cache_t *kc)
Definition: linux.c:82
void * kzalloc(int size, int flags)
Definition: linux.c:34
static int buffer_head_blocknr_cmp(struct rb_node *a, struct rb_node *b)
Definition: linux.c:396
void ext2_destroy_linux()
Definition: linux.c:1019
int wake_up(wait_queue_head_t *queue)
Definition: linux.c:279
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *waiti, int state)
Definition: linux.c:212
int bh_submit_read(struct buffer_head *bh)
Definition: linux.c:892
int ext2_init_linux()
Definition: linux.c:1004
static void __remove_wait_queue(wait_queue_head_t *head, struct __wait_queue *old)
Definition: linux.c:161
int submit_bh_pin(int rw, struct buffer_head *bh)
Definition: linux.c:727
int sync_dirty_buffer(struct buffer_head *bh)
Definition: linux.c:898
int submit_bh(int rw, struct buffer_head *bh)
Definition: linux.c:792
void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh)
Definition: linux.c:424
void ext2_destroy_bh()
Definition: linux.c:318
void __bforget(struct buffer_head *bh)
Definition: linux.c:843
void __wait_on_buffer(struct buffer_head *bh)
Definition: linux.c:858
void init_waitqueue_head(wait_queue_head_t *q)
Definition: linux.c:115
void __lock_buffer(struct buffer_head *bh)
Definition: linux.c:849
void wait_queue_destroy(struct __wait_queue *wait)
Definition: linux.c:142
ULONGLONG bmap(struct inode *i, ULONGLONG b)
Definition: linux.c:942
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *waiti)
Definition: linux.c:178
static void __add_wait_queue(wait_queue_head_t *head, struct __wait_queue *new)
Definition: linux.c:147
int sync_blockdev(struct block_device *bdev)
Definition: linux.c:919
void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
Definition: linux.c:862
void mark_buffer_dirty(struct buffer_head *bh)
Definition: linux.c:914
int ext2_init_bh()
Definition: linux.c:300
void finish_wait(wait_queue_head_t *q, wait_queue_t *waiti)
Definition: linux.c:250
VOID NTAPI ExDeleteNPagedLookasideList(IN PNPAGED_LOOKASIDE_LIST Lookaside)
Definition: lookas.c:170
VOID NTAPI ExInitializeNPagedLookasideList(IN PNPAGED_LOOKASIDE_LIST Lookaside, IN PALLOCATE_FUNCTION Allocate OPTIONAL, IN PFREE_FUNCTION Free OPTIONAL, IN ULONG Flags, IN SIZE_T Size, IN ULONG Tag, IN USHORT Depth)
Definition: lookas.c:218
#define memmove(s1, s2, n)
Definition: mkisofs.h:881
PVOID NTAPI MmMapLockedPagesSpecifyCache(IN PMDL Mdl, IN KPROCESSOR_MODE AccessMode, IN MEMORY_CACHING_TYPE CacheType, IN PVOID BaseAddress, IN ULONG BugCheckOnFailure, IN ULONG Priority)
Definition: mdlsup.c:660
VOID NTAPI MmUnmapLockedPages(IN PVOID BaseAddress, IN PMDL Mdl)
Definition: mdlsup.c:837
#define ASSERT(a)
Definition: mode.c:44
#define EXPORT_SYMBOL(x)
Definition: module.h:272
#define kmalloc(size, gfp)
Definition: module.h:1125
#define SWRITE
Definition: module.h:1178
static void put_bh(struct buffer_head *bh)
Definition: module.h:949
static void lock_buffer(struct buffer_head *bh)
Definition: module.h:1028
#define GFP_KERNEL
Definition: module.h:668
#define GFP_NOFS
Definition: module.h:669
#define SLAB_TEMPORARY
Definition: module.h:1133
#define __set_current_state(state)
Definition: module.h:506
static void get_bh(struct buffer_head *bh)
Definition: module.h:944
#define printk
Definition: module.h:231
#define is_sync_wait(wait)
Definition: module.h:504
static void wait_on_buffer(struct buffer_head *bh)
Definition: module.h:1021
#define WQ_FLAG_EXCLUSIVE
Definition: module.h:472
#define WRITE
Definition: module.h:1176
void(* kmem_cache_cb_t)(void *, kmem_cache_t *, unsigned long)
Definition: module.h:1135
#define spin_lock_init(sl)
Definition: module.h:305
#define kfree(p)
Definition: module.h:1126
#define KERN_EMERG
Definition: module.h:222
#define spin_lock_irqsave(sl, flags)
Definition: module.h:308
#define spin_unlock_irqrestore(sl, flags)
Definition: module.h:309
#define set_current_state(state)
Definition: module.h:505
#define WQ_FLAG_AUTO_REMOVAL
Definition: module.h:473
@ HighPagePriority
Definition: imports.h:55
static PVOID ptr
Definition: dispmode.c:27
ULONG nr
Definition: thread.c:7
LPFNCONSTRUCTOR ctor
Definition: msctf.c:83
#define KernelMode
Definition: asm.h:34
_In_ ULONG _In_ ULONG Offset
Definition: ntddpcm.h:101
@ SynchronizationEvent
#define PIN_WAIT
BOOLEAN NTAPI ExAcquireSharedStarveExclusive(IN PERESOURCE Resource, IN BOOLEAN Wait)
Definition: resource.c:1068
VOID FASTCALL ExReleaseResourceLite(IN PERESOURCE Resource)
Definition: resource.c:1822
#define Vcb
Definition: cdprocs.h:1415
VOID NTAPI CcUnpinData(IN PVOID Bcb)
Definition: pinsup.c:955
VOID NTAPI CcSetBcbOwnerPointer(IN PVOID Bcb, IN PVOID OwnerPointer)
Definition: pinsup.c:979
VOID NTAPI CcUnpinDataForThread(IN PVOID Bcb, IN ERESOURCE_THREAD ResourceThreadId)
Definition: pinsup.c:991
BOOLEAN NTAPI CcPreparePinWrite(IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN BOOLEAN Zero, IN ULONG Flags, OUT PVOID *Bcb, OUT PVOID *Buffer)
Definition: pinsup.c:827
BOOLEAN NTAPI CcPinRead(IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN ULONG Flags, OUT PVOID *Bcb, OUT PVOID *Buffer)
Definition: pinsup.c:802
#define long
Definition: qsort.c:33
#define rw
Definition: rosglue.h:38
#define memset(x, y, z)
Definition: compat.h:39
int zero
Definition: sehframes.cpp:29
atomic_t bh_acount
Definition: linux.c:296
kmem_cache_t * bh_cache
Definition: linux.c:294
atomic_t bh_count
Definition: linux.c:295
EXT2_REAPER bhReaper
Definition: ext2fs.h:550
KEVENT Wait
Definition: ext2fs.h:506
void * private
Definition: module.h:477
unsigned int flags
Definition: module.h:476
struct list_head task_list
Definition: module.h:479
KEVENT event
Definition: module.h:478
void * bd_priv
Definition: module.h:554
ERESOURCE bd_bh_lock
Definition: module.h:560
struct rb_root bd_bh_root
Definition: module.h:561
struct block_device * b_bdev
Definition: module.h:731
struct rb_node b_rb_node
Definition: module.h:741
size_t b_size
Definition: module.h:734
LIST_ENTRY b_link
Definition: module.h:724
blkcnt_t b_blocknr
Definition: module.h:733
LARGE_INTEGER b_ts_creat
Definition: module.h:743
void * b_bcb
Definition: module.h:728
atomic_t b_count
Definition: module.h:740
PMDL b_mdl
Definition: module.h:727
char * b_data
Definition: module.h:735
LARGE_INTEGER b_ts_drop
Definition: module.h:744
Definition: fs.h:78
atomic_t i_count
Definition: fs.h:90
CHAR name[32]
Definition: module.h:1138
ULONG size
Definition: module.h:1140
atomic_t acount
Definition: module.h:1142
atomic_t count
Definition: module.h:1141
kmem_cache_cb_t constructor
Definition: module.h:1144
NPAGED_LOOKASIDE_LIST la
Definition: module.h:1143
Definition: list.h:15
Definition: name.c:39
Definition: rbtree.h:98
Definition: ps.c:97
Definition: fs.h:64
#define new(TYPE, numElems)
Definition: treelist.c:54
int64_t LONGLONG
Definition: typedefs.h:68
uint32_t ULONG
Definition: typedefs.h:59
uint64_t ULONGLONG
Definition: typedefs.h:67
#define READ(_gif, _buf, _len)
Definition: ungif.c:107
int ret
ULONG_PTR ERESOURCE_THREAD
Definition: extypes.h:208
#define FO_FILE_MODIFIED
Definition: iotypes.h:1788
@ IoModifyAccess
Definition: ketypes.h:865
@ MmNonCached
Definition: mmtypes.h:129
#define MDL_MAPPED_TO_SYSTEM_VA
Definition: mmtypes.h:18
static unsigned int block
Definition: xmlmemory.c:101