ReactOS 0.4.15-dev-7907-g95bf896
expool.c
Go to the documentation of this file.
1/*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9/* INCLUDES *******************************************************************/
10
11#include <ntoskrnl.h>
12#define NDEBUG
13#include <debug.h>
14
15#define MODULE_INVOLVED_IN_ARM3
16#include <mm/ARM3/miarm.h>
17
18#undef ExAllocatePoolWithQuota
19#undef ExAllocatePoolWithQuotaTag
20
21/* GLOBALS ********************************************************************/
22
23#define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25/*
26 * This defines when we shrink or expand the table.
27 * 3 --> keep the number of used entries in the 33%-66% of the table capacity.
28 * 4 --> 25% - 75%
29 * etc.
30 */
31#define POOL_BIG_TABLE_USE_RATE 4
32
33typedef struct _POOL_DPC_CONTEXT
34{
40
59
60/* Pool block/header/list access macros */
61#define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
62#define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
63#define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
64#define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
65#define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
66
67/*
68 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
69 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
70 * pool code, but only for checked builds.
71 *
72 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
73 * that these checks are done even on retail builds, due to the increasing
74 * number of kernel-mode attacks which depend on dangling list pointers and other
75 * kinds of list-based attacks.
76 *
77 * For now, I will leave these checks on all the time, but later they are likely
78 * to be DBG-only, at least until there are enough kernel-mode security attacks
79 * against ReactOS to warrant the performance hit.
80 *
81 * For now, these are not made inline, so we can get good stack traces.
82 */
86{
87 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
88}
89
93{
94 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
95}
96
97VOID
100{
101 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
102 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
103 {
104 KeBugCheckEx(BAD_POOL_HEADER,
105 3,
106 (ULONG_PTR)ListHead,
109 }
110}
111
112VOID
113NTAPI
115{
116 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
117}
118
120NTAPI
122{
123 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
124}
125
126VOID
127NTAPI
129{
130 PLIST_ENTRY Blink, Flink;
131 Flink = ExpDecodePoolLink(Entry->Flink);
132 Blink = ExpDecodePoolLink(Entry->Blink);
133 Flink->Blink = ExpEncodePoolLink(Blink);
134 Blink->Flink = ExpEncodePoolLink(Flink);
135}
136
138NTAPI
140{
141 PLIST_ENTRY Entry, Flink;
142 Entry = ExpDecodePoolLink(ListHead->Flink);
143 Flink = ExpDecodePoolLink(Entry->Flink);
144 ListHead->Flink = ExpEncodePoolLink(Flink);
145 Flink->Blink = ExpEncodePoolLink(ListHead);
146 return Entry;
147}
148
150NTAPI
152{
153 PLIST_ENTRY Entry, Blink;
154 Entry = ExpDecodePoolLink(ListHead->Blink);
155 Blink = ExpDecodePoolLink(Entry->Blink);
156 ListHead->Blink = ExpEncodePoolLink(Blink);
157 Blink->Flink = ExpEncodePoolLink(ListHead);
158 return Entry;
159}
160
161VOID
162NTAPI
165{
166 PLIST_ENTRY Blink;
167 ExpCheckPoolLinks(ListHead);
168 Blink = ExpDecodePoolLink(ListHead->Blink);
169 Entry->Flink = ExpEncodePoolLink(ListHead);
170 Entry->Blink = ExpEncodePoolLink(Blink);
171 Blink->Flink = ExpEncodePoolLink(Entry);
172 ListHead->Blink = ExpEncodePoolLink(Entry);
173 ExpCheckPoolLinks(ListHead);
174}
175
176VOID
177NTAPI
180{
181 PLIST_ENTRY Flink;
182 ExpCheckPoolLinks(ListHead);
183 Flink = ExpDecodePoolLink(ListHead->Flink);
184 Entry->Flink = ExpEncodePoolLink(Flink);
185 Entry->Blink = ExpEncodePoolLink(ListHead);
186 Flink->Blink = ExpEncodePoolLink(Entry);
187 ListHead->Flink = ExpEncodePoolLink(Entry);
188 ExpCheckPoolLinks(ListHead);
189}
190
191VOID
192NTAPI
194{
195 PPOOL_HEADER PreviousEntry, NextEntry;
196
197 /* Is there a block before this one? */
198 if (Entry->PreviousSize)
199 {
200 /* Get it */
201 PreviousEntry = POOL_PREV_BLOCK(Entry);
202
203 /* The two blocks must be on the same page! */
204 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
205 {
206 /* Something is awry */
207 KeBugCheckEx(BAD_POOL_HEADER,
208 6,
209 (ULONG_PTR)PreviousEntry,
210 __LINE__,
212 }
213
214 /* This block should also indicate that it's as large as we think it is */
215 if (PreviousEntry->BlockSize != Entry->PreviousSize)
216 {
217 /* Otherwise, someone corrupted one of the sizes */
218 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
219 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
220 Entry->PreviousSize, (char *)&Entry->PoolTag);
221 KeBugCheckEx(BAD_POOL_HEADER,
222 5,
223 (ULONG_PTR)PreviousEntry,
224 __LINE__,
226 }
227 }
228 else if (PAGE_ALIGN(Entry) != Entry)
229 {
230 /* If there's no block before us, we are the first block, so we should be on a page boundary */
231 KeBugCheckEx(BAD_POOL_HEADER,
232 7,
233 0,
234 __LINE__,
236 }
237
238 /* This block must have a size */
239 if (!Entry->BlockSize)
240 {
241 /* Someone must've corrupted this field */
242 if (Entry->PreviousSize)
243 {
244 PreviousEntry = POOL_PREV_BLOCK(Entry);
245 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
246 (char *)&PreviousEntry->PoolTag,
247 (char *)&Entry->PoolTag);
248 }
249 else
250 {
251 DPRINT1("Entry tag %.4s\n",
252 (char *)&Entry->PoolTag);
253 }
254 KeBugCheckEx(BAD_POOL_HEADER,
255 8,
256 0,
257 __LINE__,
259 }
260
261 /* Okay, now get the next block */
262 NextEntry = POOL_NEXT_BLOCK(Entry);
263
264 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
265 if (PAGE_ALIGN(NextEntry) != NextEntry)
266 {
267 /* The two blocks must be on the same page! */
268 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
269 {
270 /* Something is messed up */
271 KeBugCheckEx(BAD_POOL_HEADER,
272 9,
273 (ULONG_PTR)NextEntry,
274 __LINE__,
276 }
277
278 /* And this block should think we are as large as we truly are */
279 if (NextEntry->PreviousSize != Entry->BlockSize)
280 {
281 /* Otherwise, someone corrupted the field */
282 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
283 Entry->BlockSize, (char *)&Entry->PoolTag,
284 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
285 KeBugCheckEx(BAD_POOL_HEADER,
286 5,
287 (ULONG_PTR)NextEntry,
288 __LINE__,
290 }
291 }
292}
293
294VOID
295NTAPI
297 PVOID P,
299 ULONG Tag)
300{
302 ULONG i;
304 POOL_TYPE RealPoolType;
305
306 /* Get the pool header */
307 Entry = ((PPOOL_HEADER)P) - 1;
308
309 /* Check if this is a large allocation */
310 if (PAGE_ALIGN(P) == P)
311 {
312 /* Lock the pool table */
314
315 /* Find the pool tag */
316 for (i = 0; i < PoolBigPageTableSize; i++)
317 {
318 /* Check if this is our allocation */
319 if (PoolBigPageTable[i].Va == P)
320 {
321 /* Make sure the tag is ok */
322 if (PoolBigPageTable[i].Key != Tag)
323 {
324 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
325 }
326
327 break;
328 }
329 }
330
331 /* Release the lock */
333
334 if (i == PoolBigPageTableSize)
335 {
336 /* Did not find the allocation */
337 //ASSERT(FALSE);
338 }
339
340 /* Get Pool type by address */
341 RealPoolType = MmDeterminePoolType(P);
342 }
343 else
344 {
345 /* Verify the tag */
346 if (Entry->PoolTag != Tag)
347 {
348 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
349 &Tag, &Entry->PoolTag, Entry->PoolTag);
350 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
351 }
352
353 /* Check the rest of the header */
355
356 /* Get Pool type from entry */
357 RealPoolType = (Entry->PoolType - 1);
358 }
359
360 /* Should we check the pool type? */
361 if (PoolType != -1)
362 {
363 /* Verify the pool type */
364 if (RealPoolType != PoolType)
365 {
366 DPRINT1("Wrong pool type! Expected %s, got %s\n",
367 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
368 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
369 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
370 }
371 }
372}
373
374VOID
375NTAPI
377{
378 BOOLEAN FoundBlock = FALSE;
379 SIZE_T Size = 0;
381
382 /* Get the first entry for this page, make sure it really is the first */
383 Entry = PAGE_ALIGN(Block);
384 ASSERT(Entry->PreviousSize == 0);
385
386 /* Now scan each entry */
387 while (TRUE)
388 {
389 /* When we actually found our block, remember this */
390 if (Entry == Block) FoundBlock = TRUE;
391
392 /* Now validate this block header */
395 /* And go to the next one, keeping track of our size */
396 Size += Entry->BlockSize;
398
399 /* If we hit the last block, stop */
400 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
401
402 /* If we hit the end of the page, stop */
403 if (PAGE_ALIGN(Entry) == Entry) break;
404 }
405
406 /* We must've found our block, and we must have hit the end of the page */
407 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
408 {
409 /* Otherwise, the blocks are messed up */
410 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
411 }
412}
413
415VOID
418 IN PVOID Entry)
419{
420 //
421 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
422 // be DISPATCH_LEVEL or lower for Non Paged Pool
423 //
427 {
428 //
429 // Take the system down
430 //
431 KeBugCheckEx(BAD_POOL_CALLER,
434 PoolType,
436 }
437}
438
440ULONG
442 IN SIZE_T BucketMask)
443{
444 //
445 // Compute the hash by multiplying with a large prime number and then XORing
446 // with the HIDWORD of the result.
447 //
448 // Finally, AND with the bucket mask to generate a valid index/bucket into
449 // the table
450 //
451 ULONGLONG Result = (ULONGLONG)40543 * Tag;
452 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
453}
454
456ULONG
458{
460 //
461 // Compute the hash by converting the address into a page number, and then
462 // XORing each nibble with the next one.
463 //
464 // We do *NOT* AND with the bucket mask at this point because big table expansion
465 // might happen. Therefore, the final step of the hash must be performed
466 // while holding the expansion pushlock, and this is why we call this a
467 // "partial" hash only.
468 //
470 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
471}
472
473#if DBG
474/*
475 * FORCEINLINE
476 * BOOLEAN
477 * ExpTagAllowPrint(CHAR Tag);
478 */
479#define ExpTagAllowPrint(Tag) \
480 ((Tag) >= 0x20 /* Space */ && (Tag) <= 0x7E /* Tilde */)
481
482#ifdef KDBG
483#include <kdbg/kdb.h>
484#endif
485
486#ifdef KDBG
487#define MiDumperPrint(dbg, fmt, ...) \
488 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
489 else DPRINT1(fmt, ##__VA_ARGS__)
490#else
491#define MiDumperPrint(dbg, fmt, ...) \
492 DPRINT1(fmt, ##__VA_ARGS__)
493#endif
494
495VOID
497{
498 SIZE_T i;
500
501 //
502 // Only print header if called from OOM situation
503 //
504 if (!CalledFromDbg)
505 {
506 DPRINT1("---------------------\n");
507 DPRINT1("Out of memory dumper!\n");
508 }
509#ifdef KDBG
510 else
511 {
512 KdbpPrint("Pool Used:\n");
513 }
514#endif
515
516 //
517 // Remember whether we'll have to be verbose
518 // This is the only supported flag!
519 //
521
522 //
523 // Print table header
524 //
525 if (Verbose)
526 {
527 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
528 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
529 }
530 else
531 {
532 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
533 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
534 }
535
536 //
537 // We'll extract allocations for all the tracked pools
538 //
539 for (i = 0; i < PoolTrackTableSize; ++i)
540 {
542
544
545 //
546 // We only care about tags which have allocated memory
547 //
548 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
549 {
550 //
551 // If there's a tag, attempt to do a pretty print
552 // only if it matches the caller's tag, or if
553 // any tag is allowed
554 // For checking whether it matches caller's tag,
555 // use the mask to make sure not to mess with the wildcards
556 //
557 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
558 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
559 {
560 CHAR Tag[4];
561
562 //
563 // Extract each 'component' and check whether they are printable
564 //
565 Tag[0] = TableEntry->Key & 0xFF;
566 Tag[1] = TableEntry->Key >> 8 & 0xFF;
567 Tag[2] = TableEntry->Key >> 16 & 0xFF;
568 Tag[3] = TableEntry->Key >> 24 & 0xFF;
569
570 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
571 {
572 //
573 // Print in direct order to make !poolused TAG usage easier
574 //
575 if (Verbose)
576 {
577 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
578 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
579 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
580 TableEntry->PagedAllocs, TableEntry->PagedFrees,
581 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
582 }
583 else
584 {
585 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
586 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
587 TableEntry->PagedAllocs, TableEntry->PagedBytes);
588 }
589 }
590 else
591 {
592 if (Verbose)
593 {
594 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
595 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
596 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
597 TableEntry->PagedAllocs, TableEntry->PagedFrees,
598 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
599 }
600 else
601 {
602 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
603 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
604 TableEntry->PagedAllocs, TableEntry->PagedBytes);
605 }
606 }
607 }
608 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
609 {
610 if (Verbose)
611 {
612 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
613 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
614 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
615 TableEntry->PagedAllocs, TableEntry->PagedFrees,
616 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
617 }
618 else
619 {
620 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
621 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
622 TableEntry->PagedAllocs, TableEntry->PagedBytes);
623 }
624 }
625 }
626 }
627
628 if (!CalledFromDbg)
629 {
630 DPRINT1("---------------------\n");
631 }
632}
633#endif
634
635/* PRIVATE FUNCTIONS **********************************************************/
636
637CODE_SEG("INIT")
638VOID
639NTAPI
641{
642 ULONG i, Key, Hash, Index;
644 ULONG TagList[] =
645 {
646 ' oI',
647 ' laH',
648 'PldM',
649 'LooP',
650 'tSbO',
651 ' prI',
652 'bdDN',
653 'LprI',
654 'pOoI',
655 ' ldM',
656 'eliF',
657 'aVMC',
658 'dSeS',
659 'CFtN',
660 'looP',
661 'rPCT',
662 'bNMC',
663 'dTeS',
664 'sFtN',
665 'TPCT',
666 'CPCT',
667 ' yeK',
668 'qSbO',
669 'mNoI',
670 'aEoI',
671 'cPCT',
672 'aFtN',
673 '0ftN',
674 'tceS',
675 'SprI',
676 'ekoT',
677 ' eS',
678 'lCbO',
679 'cScC',
680 'lFtN',
681 'cAeS',
682 'mfSF',
683 'kWcC',
684 'miSF',
685 'CdfA',
686 'EdfA',
687 'orSF',
688 'nftN',
689 'PRIU',
690 'rFpN',
691 'RFpN',
692 'aPeS',
693 'sUeS',
694 'FpcA',
695 'MpcA',
696 'cSeS',
697 'mNbO',
698 'sFpN',
699 'uLeS',
700 'DPcS',
701 'nevE',
702 'vrqR',
703 'ldaV',
704 ' pP',
705 'SdaV',
706 ' daV',
707 'LdaV',
708 'FdaV',
709 ' GIB',
710 };
711
712 //
713 // Loop all 64 hot tags
714 //
715 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
716 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
717 {
718 //
719 // Get the current tag, and compute its hash in the tracker table
720 //
721 Key = TagList[i];
723
724 //
725 // Loop all the hashes in this index/bucket
726 //
727 Index = Hash;
728 while (TRUE)
729 {
730 //
731 // Find an empty entry, and make sure this isn't the last hash that
732 // can fit.
733 //
734 // On checked builds, also make sure this is the first time we are
735 // seeding this tag.
736 //
737 ASSERT(TrackTable[Hash].Key != Key);
738 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
739 {
740 //
741 // It has been seeded, move on to the next tag
742 //
743 TrackTable[Hash].Key = Key;
744 break;
745 }
746
747 //
748 // This entry was already taken, compute the next possible hash while
749 // making sure we're not back at our initial index.
750 //
751 ASSERT(TrackTable[Hash].Key != Key);
752 Hash = (Hash + 1) & PoolTrackTableMask;
753 if (Hash == Index) break;
754 }
755 }
756}
757
758VOID
759NTAPI
763{
766 SIZE_T TableMask, TableSize;
767
768 //
769 // Remove the PROTECTED_POOL flag which is not part of the tag
770 //
771 Key &= ~PROTECTED_POOL;
772
773 //
774 // With WinDBG you can set a tag you want to break on when an allocation is
775 // attempted
776 //
777 if (Key == PoolHitTag) DbgBreakPoint();
778
779 //
780 // Why the double indirection? Because normally this function is also used
781 // when doing session pool allocations, which has another set of tables,
782 // sizes, and masks that live in session pool. Now we don't support session
783 // pool so we only ever use the regular tables, but I'm keeping the code this
784 // way so that the day we DO support session pool, it won't require that
785 // many changes
786 //
788 TableMask = PoolTrackTableMask;
791
792 //
793 // Compute the hash for this key, and loop all the possible buckets
794 //
795 Hash = ExpComputeHashForTag(Key, TableMask);
796 Index = Hash;
797 while (TRUE)
798 {
799 //
800 // Have we found the entry for this tag? */
801 //
803 if (TableEntry->Key == Key)
804 {
805 //
806 // Decrement the counters depending on if this was paged or nonpaged
807 // pool
808 //
810 {
811 InterlockedIncrement(&TableEntry->NonPagedFrees);
814 return;
815 }
816 InterlockedIncrement(&TableEntry->PagedFrees);
819 return;
820 }
821
822 //
823 // We should have only ended up with an empty entry if we've reached
824 // the last bucket
825 //
826 if (!TableEntry->Key)
827 {
828 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
829 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
830 ASSERT(Hash == TableMask);
831 }
832
833 //
834 // This path is hit when we don't have an entry, and the current bucket
835 // is full, so we simply try the next one
836 //
837 Hash = (Hash + 1) & TableMask;
838 if (Hash == Index) break;
839 }
840
841 //
842 // And finally this path is hit when all the buckets are full, and we need
843 // some expansion. This path is not yet supported in ReactOS and so we'll
844 // ignore the tag
845 //
846 DPRINT1("Out of pool tag space, ignoring...\n");
847}
848
849VOID
850NTAPI
854{
858 SIZE_T TableMask, TableSize;
859
860 //
861 // Remove the PROTECTED_POOL flag which is not part of the tag
862 //
863 Key &= ~PROTECTED_POOL;
864
865 //
866 // With WinDBG you can set a tag you want to break on when an allocation is
867 // attempted
868 //
869 if (Key == PoolHitTag) DbgBreakPoint();
870
871 //
872 // There is also an internal flag you can set to break on malformed tags
873 //
874 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
875
876 //
877 // ASSERT on ReactOS features not yet supported
878 //
880
881 //
882 // Why the double indirection? Because normally this function is also used
883 // when doing session pool allocations, which has another set of tables,
884 // sizes, and masks that live in session pool. Now we don't support session
885 // pool so we only ever use the regular tables, but I'm keeping the code this
886 // way so that the day we DO support session pool, it won't require that
887 // many changes
888 //
890 TableMask = PoolTrackTableMask;
893
894 //
895 // Compute the hash for this key, and loop all the possible buckets
896 //
897 Hash = ExpComputeHashForTag(Key, TableMask);
898 Index = Hash;
899 while (TRUE)
900 {
901 //
902 // Do we already have an entry for this tag? */
903 //
905 if (TableEntry->Key == Key)
906 {
907 //
908 // Increment the counters depending on if this was paged or nonpaged
909 // pool
910 //
912 {
913 InterlockedIncrement(&TableEntry->NonPagedAllocs);
915 return;
916 }
917 InterlockedIncrement(&TableEntry->PagedAllocs);
919 return;
920 }
921
922 //
923 // We don't have an entry yet, but we've found a free bucket for it
924 //
925 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
926 {
927 //
928 // We need to hold the lock while creating a new entry, since other
929 // processors might be in this code path as well
930 //
932 if (!PoolTrackTable[Hash].Key)
933 {
934 //
935 // We've won the race, so now create this entry in the bucket
936 //
937 ASSERT(Table[Hash].Key == 0);
939 TableEntry->Key = Key;
940 }
942
943 //
944 // Now we force the loop to run again, and we should now end up in
945 // the code path above which does the interlocked increments...
946 //
947 continue;
948 }
949
950 //
951 // This path is hit when we don't have an entry, and the current bucket
952 // is full, so we simply try the next one
953 //
954 Hash = (Hash + 1) & TableMask;
955 if (Hash == Index) break;
956 }
957
958 //
959 // And finally this path is hit when all the buckets are full, and we need
960 // some expansion. This path is not yet supported in ReactOS and so we'll
961 // ignore the tag
962 //
963 DPRINT1("Out of pool tag space, ignoring...\n");
964}
965
966CODE_SEG("INIT")
967VOID
968NTAPI
971 IN ULONG PoolIndex,
972 IN ULONG Threshold,
973 IN PVOID PoolLock)
974{
975 PLIST_ENTRY NextEntry, LastEntry;
976
977 //
978 // Setup the descriptor based on the caller's request
979 //
980 PoolDescriptor->PoolType = PoolType;
981 PoolDescriptor->PoolIndex = PoolIndex;
982 PoolDescriptor->Threshold = Threshold;
983 PoolDescriptor->LockAddress = PoolLock;
984
985 //
986 // Initialize accounting data
987 //
988 PoolDescriptor->RunningAllocs = 0;
989 PoolDescriptor->RunningDeAllocs = 0;
990 PoolDescriptor->TotalPages = 0;
991 PoolDescriptor->TotalBytes = 0;
992 PoolDescriptor->TotalBigPages = 0;
993
994 //
995 // Nothing pending for now
996 //
997 PoolDescriptor->PendingFrees = NULL;
998 PoolDescriptor->PendingFreeDepth = 0;
999
1000 //
1001 // Loop all the descriptor's allocation lists and initialize them
1002 //
1003 NextEntry = PoolDescriptor->ListHeads;
1004 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
1005 while (NextEntry < LastEntry)
1006 {
1007 ExpInitializePoolListHead(NextEntry);
1008 NextEntry++;
1009 }
1010
1011 //
1012 // Note that ReactOS does not support Session Pool Yet
1013 //
1015}
1016
1017CODE_SEG("INIT")
1018VOID
1019NTAPI
1021 IN ULONG Threshold)
1022{
1025 ULONG i;
1026
1027 //
1028 // Check what kind of pool this is
1029 //
1030 if (PoolType == NonPagedPool)
1031 {
1032 //
1033 // Compute the track table size and convert it from a power of two to an
1034 // actual byte size
1035 //
1036 // NOTE: On checked builds, we'll assert if the registry table size was
1037 // invalid, while on retail builds we'll just break out of the loop at
1038 // that point.
1039 //
1041 for (i = 0; i < 32; i++)
1042 {
1043 if (TableSize & 1)
1044 {
1045 ASSERT((TableSize & ~1) == 0);
1046 if (!(TableSize & ~1)) break;
1047 }
1048 TableSize >>= 1;
1049 }
1050
1051 //
1052 // If we hit bit 32, than no size was defined in the registry, so
1053 // we'll use the default size of 2048 entries.
1054 //
1055 // Otherwise, use the size from the registry, as long as it's not
1056 // smaller than 64 entries.
1057 //
1058 if (i == 32)
1059 {
1060 PoolTrackTableSize = 2048;
1061 }
1062 else
1063 {
1064 PoolTrackTableSize = max(1 << i, 64);
1065 }
1066
1067 //
1068 // Loop trying with the biggest specified size first, and cut it down
1069 // by a power of two each iteration in case not enough memory exist
1070 //
1071 while (TRUE)
1072 {
1073 //
1074 // Do not allow overflow
1075 //
1076 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1077 {
1078 PoolTrackTableSize >>= 1;
1079 continue;
1080 }
1081
1082 //
1083 // Allocate the tracker table and exit the loop if this worked
1084 //
1086 (PoolTrackTableSize + 1) *
1087 sizeof(POOL_TRACKER_TABLE));
1088 if (PoolTrackTable) break;
1089
1090 //
1091 // Otherwise, as long as we're not down to the last bit, keep
1092 // iterating
1093 //
1094 if (PoolTrackTableSize == 1)
1095 {
1096 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1097 TableSize,
1098 0xFFFFFFFF,
1099 0xFFFFFFFF,
1100 0xFFFFFFFF);
1101 }
1102 PoolTrackTableSize >>= 1;
1103 }
1104
1105 //
1106 // Add one entry, compute the hash, and zero the table
1107 //
1110
1113
1114 //
1115 // Finally, add the most used tags to speed up those allocations
1116 //
1118
1119 //
1120 // We now do the exact same thing with the tracker table for big pages
1121 //
1123 for (i = 0; i < 32; i++)
1124 {
1125 if (TableSize & 1)
1126 {
1127 ASSERT((TableSize & ~1) == 0);
1128 if (!(TableSize & ~1)) break;
1129 }
1130 TableSize >>= 1;
1131 }
1132
1133 //
1134 // For big pages, the default tracker table is 4096 entries, while the
1135 // minimum is still 64
1136 //
1137 if (i == 32)
1138 {
1139 PoolBigPageTableSize = 4096;
1140 }
1141 else
1142 {
1143 PoolBigPageTableSize = max(1 << i, 64);
1144 }
1145
1146 //
1147 // Again, run the exact same loop we ran earlier, but this time for the
1148 // big pool tracker instead
1149 //
1150 while (TRUE)
1151 {
1153 {
1155 continue;
1156 }
1157
1160 sizeof(POOL_TRACKER_BIG_PAGES));
1161 if (PoolBigPageTable) break;
1162
1163 if (PoolBigPageTableSize == 1)
1164 {
1165 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1166 TableSize,
1167 0xFFFFFFFF,
1168 0xFFFFFFFF,
1169 0xFFFFFFFF);
1170 }
1171
1173 }
1174
1175 //
1176 // An extra entry is not needed for for the big pool tracker, so just
1177 // compute the hash and zero it
1178 //
1182 for (i = 0; i < PoolBigPageTableSize; i++)
1183 {
1185 }
1186
1187 //
1188 // During development, print this out so we can see what's happening
1189 //
1190 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1192 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1194
1195 //
1196 // Insert the generic tracker for all of big pool
1197 //
1198 ExpInsertPoolTracker('looP',
1200 sizeof(POOL_TRACKER_BIG_PAGES)),
1201 NonPagedPool);
1202
1203 //
1204 // No support for NUMA systems at this time
1205 //
1206 ASSERT(KeNumberNodes == 1);
1207
1208 //
1209 // Initialize the tag spinlock
1210 //
1212
1213 //
1214 // Initialize the nonpaged pool descriptor
1215 //
1219 0,
1220 Threshold,
1221 NULL);
1222 }
1223 else
1224 {
1225 //
1226 // No support for NUMA systems at this time
1227 //
1228 ASSERT(KeNumberNodes == 1);
1229
1230 //
1231 // Allocate the pool descriptor
1232 //
1234 sizeof(KGUARDED_MUTEX) +
1235 sizeof(POOL_DESCRIPTOR),
1236 'looP');
1237 if (!Descriptor)
1238 {
1239 //
1240 // This is really bad...
1241 //
1242 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1243 0,
1244 -1,
1245 -1,
1246 -1);
1247 }
1248
1249 //
1250 // Setup the vector and guarded mutex for paged pool
1251 //
1257 PagedPool,
1258 0,
1259 Threshold,
1261
1262 //
1263 // Insert the generic tracker for all of nonpaged pool
1264 //
1265 ExpInsertPoolTracker('looP',
1267 NonPagedPool);
1268 }
1269}
1270
1272KIRQL
1274{
1275 //
1276 // Check if this is nonpaged pool
1277 //
1278 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1279 {
1280 //
1281 // Use the queued spin lock
1282 //
1284 }
1285 else
1286 {
1287 //
1288 // Use the guarded mutex
1289 //
1290 KeAcquireGuardedMutex(Descriptor->LockAddress);
1291 return APC_LEVEL;
1292 }
1293}
1294
1296VOID
1299{
1300 //
1301 // Check if this is nonpaged pool
1302 //
1303 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1304 {
1305 //
1306 // Use the queued spin lock
1307 //
1309 }
1310 else
1311 {
1312 //
1313 // Use the guarded mutex
1314 //
1315 KeReleaseGuardedMutex(Descriptor->LockAddress);
1316 }
1317}
1318
1319VOID
1320NTAPI
1325{
1329
1330 //
1331 // Make sure we win the race, and if we did, copy the data atomically
1332 //
1334 {
1335 RtlCopyMemory(Context->PoolTrackTable,
1337 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1338
1339 //
1340 // This is here because ReactOS does not yet support expansion
1341 //
1342 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1343 }
1344
1345 //
1346 // Regardless of whether we won or not, we must now synchronize and then
1347 // decrement the barrier since this is one more processor that has completed
1348 // the callback.
1349 //
1352}
1353
1355NTAPI
1357 IN ULONG SystemInformationLength,
1359{
1360 ULONG TableSize, CurrentLength;
1361 ULONG EntryCount;
1363 PSYSTEM_POOLTAG TagEntry;
1364 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1367
1368 //
1369 // Keep track of how much data the caller's buffer must hold
1370 //
1371 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1372
1373 //
1374 // Initialize the caller's buffer
1375 //
1376 TagEntry = &SystemInformation->TagInfo[0];
1377 SystemInformation->Count = 0;
1378
1379 //
1380 // Capture the number of entries, and the total size needed to make a copy
1381 // of the table
1382 //
1383 EntryCount = (ULONG)PoolTrackTableSize;
1384 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1385
1386 //
1387 // Allocate the "Generic DPC" temporary buffer
1388 //
1391
1392 //
1393 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1394 //
1395 Context.PoolTrackTable = Buffer;
1396 Context.PoolTrackTableSize = PoolTrackTableSize;
1397 Context.PoolTrackTableExpansion = NULL;
1398 Context.PoolTrackTableSizeExpansion = 0;
1400
1401 //
1402 // Now parse the results
1403 //
1404 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1405 {
1406 //
1407 // If the entry is empty, skip it
1408 //
1409 if (!TrackerEntry->Key) continue;
1410
1411 //
1412 // Otherwise, add one more entry to the caller's buffer, and ensure that
1413 // enough space has been allocated in it
1414 //
1415 SystemInformation->Count++;
1416 CurrentLength += sizeof(*TagEntry);
1417 if (SystemInformationLength < CurrentLength)
1418 {
1419 //
1420 // The caller's buffer is too small, so set a failure code. The
1421 // caller will know the count, as well as how much space is needed.
1422 //
1423 // We do NOT break out of the loop, because we want to keep incrementing
1424 // the Count as well as CurrentLength so that the caller can know the
1425 // final numbers
1426 //
1428 }
1429 else
1430 {
1431 //
1432 // Small sanity check that our accounting is working correctly
1433 //
1434 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1435 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1436
1437 //
1438 // Return the data into the caller's buffer
1439 //
1440 TagEntry->TagUlong = TrackerEntry->Key;
1441 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1442 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1443 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1444 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1445 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1446 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1447 TagEntry++;
1448 }
1449 }
1450
1451 //
1452 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1453 //
1454 ExFreePoolWithTag(Buffer, 'ofnI');
1455 if (ReturnLength) *ReturnLength = CurrentLength;
1456 return Status;
1457}
1458
1460static
1461BOOLEAN
1462ExpReallocateBigPageTable(
1464 _In_ BOOLEAN Shrink)
1465{
1466 SIZE_T OldSize = PoolBigPageTableSize;
1467 SIZE_T NewSize, NewSizeInBytes;
1468 PPOOL_TRACKER_BIG_PAGES NewTable;
1469 PPOOL_TRACKER_BIG_PAGES OldTable;
1470 ULONG i;
1471 ULONG PagesFreed;
1472 ULONG Hash;
1473 ULONG HashMask;
1474
1475 /* Must be holding ExpLargePoolTableLock */
1477
1478 /* Make sure we don't overflow */
1479 if (Shrink)
1480 {
1481 NewSize = OldSize / 2;
1482
1483 /* Make sure we don't shrink too much. */
1485
1487 ASSERT(NewSize <= OldSize);
1488
1489 /* If there is only one page left, then keep it around. Not a failure either. */
1490 if (NewSize == OldSize)
1491 {
1494 return TRUE;
1495 }
1496 }
1497 else
1498 {
1499 if (!NT_SUCCESS(RtlSIZETMult(2, OldSize, &NewSize)))
1500 {
1501 DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
1503 return FALSE;
1504 }
1505
1506 /* Make sure we don't stupidly waste pages */
1508 ASSERT(NewSize > OldSize);
1509 }
1510
1511 if (!NT_SUCCESS(RtlSIZETMult(sizeof(POOL_TRACKER_BIG_PAGES), NewSize, &NewSizeInBytes)))
1512 {
1513 DPRINT1("Overflow while calculating big page table size. Size=%lu\n", OldSize);
1515 return FALSE;
1516 }
1517
1518 NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes);
1519 if (NewTable == NULL)
1520 {
1521 DPRINT("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes);
1523 return FALSE;
1524 }
1525
1526 DPRINT("%s big pool tracker table to %lu entries\n", Shrink ? "Shrinking" : "Expanding", NewSize);
1527
1528 /* Initialize the new table */
1529 RtlZeroMemory(NewTable, NewSizeInBytes);
1530 for (i = 0; i < NewSize; i++)
1531 {
1532 NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1533 }
1534
1535 /* Copy over all items */
1536 OldTable = PoolBigPageTable;
1537 HashMask = NewSize - 1;
1538 for (i = 0; i < OldSize; i++)
1539 {
1540 /* Skip over empty items */
1541 if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE)
1542 {
1543 continue;
1544 }
1545
1546 /* Recalculate the hash due to the new table size */
1547 Hash = ExpComputePartialHashForAddress(OldTable[i].Va) % HashMask;
1548
1549 /* Find the location in the new table */
1550 while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE))
1551 {
1552 if (++Hash == NewSize)
1553 Hash = 0;
1554 }
1555
1556 /* We must have space */
1558
1559 /* Finally, copy the item */
1560 NewTable[Hash] = OldTable[i];
1561 }
1562
1563 /* Activate the new table */
1564 PoolBigPageTable = NewTable;
1567
1568 /* Release the lock, we're done changing global state */
1570
1571 /* Free the old table and update our tracker */
1572 PagesFreed = MiFreePoolPages(OldTable);
1573 ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0);
1574 ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0);
1575
1576 return TRUE;
1577}
1578
1579BOOLEAN
1580NTAPI
1582 IN ULONG Key,
1583 IN ULONG NumberOfPages,
1585{
1586 ULONG Hash, i = 0;
1587 PVOID OldVa;
1588 KIRQL OldIrql;
1590 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1593
1594 //
1595 // As the table is expandable, these values must only be read after acquiring
1596 // the lock to avoid a teared access during an expansion
1597 // NOTE: Windows uses a special reader/writer SpinLock to improve
1598 // performance in the common case (add/remove a tracker entry)
1599 //
1600Retry:
1605
1606 //
1607 // We loop from the current hash bucket to the end of the table, and then
1608 // rollover to hash bucket 0 and keep going from there. If we return back
1609 // to the beginning, then we attempt expansion at the bottom of the loop
1610 //
1611 EntryStart = Entry = &PoolBigPageTable[Hash];
1612 EntryEnd = &PoolBigPageTable[TableSize];
1613 do
1614 {
1615 //
1616 // Make sure that this is a free entry and attempt to atomically make the
1617 // entry busy now
1618 // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock
1619 //
1620 OldVa = Entry->Va;
1621 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1622 (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa)))
1623 {
1624 //
1625 // We now own this entry, write down the size and the pool tag
1626 //
1627 Entry->Key = Key;
1628 Entry->NumberOfPages = NumberOfPages;
1629
1630 //
1631 // Add one more entry to the count, and see if we're getting within
1632 // 75% of the table size, at which point we'll do an expansion now
1633 // to avoid blocking too hard later on.
1634 //
1635 // Note that we only do this if it's also been the 16th time that we
1636 // keep losing the race or that we are not finding a free entry anymore,
1637 // which implies a massive number of concurrent big pool allocations.
1638 //
1641 {
1642 DPRINT("Attempting expansion since we now have %lu entries\n",
1645 ExpReallocateBigPageTable(OldIrql, FALSE);
1646 return TRUE;
1647 }
1648
1649 //
1650 // We have our entry, return
1651 //
1653 return TRUE;
1654 }
1655
1656 //
1657 // We don't have our entry yet, so keep trying, making the entry list
1658 // circular if we reach the last entry. We'll eventually break out of
1659 // the loop once we've rolled over and returned back to our original
1660 // hash bucket
1661 //
1662 i++;
1663 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1664 } while (Entry != EntryStart);
1665
1666 //
1667 // This means there's no free hash buckets whatsoever, so we now have
1668 // to attempt expanding the table
1669 //
1671 if (ExpReallocateBigPageTable(OldIrql, FALSE))
1672 {
1673 goto Retry;
1674 }
1676 DPRINT1("Big pool table expansion failed\n");
1677 return FALSE;
1678}
1679
1680ULONG
1681NTAPI
1683 OUT PULONG_PTR BigPages,
1685{
1688 KIRQL OldIrql;
1693
1694 //
1695 // As the table is expandable, these values must only be read after acquiring
1696 // the lock to avoid a teared access during an expansion
1697 //
1702
1703 //
1704 // Loop while trying to find this big page allocation
1705 //
1706 while (PoolBigPageTable[Hash].Va != Va)
1707 {
1708 //
1709 // Increment the size until we go past the end of the table
1710 //
1711 if (++Hash >= TableSize)
1712 {
1713 //
1714 // Is this the second time we've tried?
1715 //
1716 if (!FirstTry)
1717 {
1718 //
1719 // This means it was never inserted into the pool table and it
1720 // received the special "BIG" tag -- return that and return 0
1721 // so that the code can ask Mm for the page count instead
1722 //
1724 *BigPages = 0;
1725 return ' GIB';
1726 }
1727
1728 //
1729 // The first time this happens, reset the hash index and try again
1730 //
1731 Hash = 0;
1732 FirstTry = FALSE;
1733 }
1734 }
1735
1736 //
1737 // Now capture all the information we need from the entry, since after we
1738 // release the lock, the data can change
1739 //
1741 *BigPages = Entry->NumberOfPages;
1742 PoolTag = Entry->Key;
1743
1744 //
1745 // Set the free bit, and decrement the number of allocations. Finally, release
1746 // the lock and return the tag that was located
1747 //
1749
1751
1752 /* If reaching 12.5% of the size (or whatever integer rounding gets us to),
1753 * halve the allocation size, which will get us to 25% of space used. */
1755 {
1756 /* Shrink the table. */
1757 ExpReallocateBigPageTable(OldIrql, TRUE);
1758 }
1759 else
1760 {
1762 }
1763 return PoolTag;
1764}
1765
1766VOID
1767NTAPI
1769 OUT PULONG NonPagedPoolPages,
1770 OUT PULONG PagedPoolAllocs,
1771 OUT PULONG PagedPoolFrees,
1772 OUT PULONG PagedPoolLookasideHits,
1773 OUT PULONG NonPagedPoolAllocs,
1774 OUT PULONG NonPagedPoolFrees,
1775 OUT PULONG NonPagedPoolLookasideHits)
1776{
1777 ULONG i;
1778 PPOOL_DESCRIPTOR PoolDesc;
1779
1780 //
1781 // Assume all failures
1782 //
1783 *PagedPoolPages = 0;
1784 *PagedPoolAllocs = 0;
1785 *PagedPoolFrees = 0;
1786
1787 //
1788 // Tally up the totals for all the apged pool
1789 //
1790 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1791 {
1792 PoolDesc = ExpPagedPoolDescriptor[i];
1793 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1794 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1795 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1796 }
1797
1798 //
1799 // The first non-paged pool has a hardcoded well-known descriptor name
1800 //
1801 PoolDesc = &NonPagedPoolDescriptor;
1802 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1803 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1804 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1805
1806 //
1807 // If the system has more than one non-paged pool, copy the other descriptor
1808 // totals as well
1809 //
1810#if 0
1811 if (ExpNumberOfNonPagedPools > 1)
1812 {
1813 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1814 {
1815 PoolDesc = ExpNonPagedPoolDescriptor[i];
1816 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1817 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1818 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1819 }
1820 }
1821#endif
1822
1823 //
1824 // Get the amount of hits in the system lookaside lists
1825 //
1827 {
1828 PLIST_ENTRY ListEntry;
1829
1830 for (ListEntry = ExPoolLookasideListHead.Flink;
1831 ListEntry != &ExPoolLookasideListHead;
1832 ListEntry = ListEntry->Flink)
1833 {
1835
1836 Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry);
1837
1838 if (Lookaside->Type == NonPagedPool)
1839 {
1840 *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
1841 }
1842 else
1843 {
1844 *PagedPoolLookasideHits += Lookaside->AllocateHits;
1845 }
1846 }
1847 }
1848}
1849
1850VOID
1851NTAPI
1853{
1856 USHORT BlockSize;
1858
1861 {
1862 return;
1863 }
1864
1865 Entry = P;
1866 Entry--;
1868
1869 PoolType = Entry->PoolType - 1;
1870 BlockSize = Entry->BlockSize;
1871
1873 {
1874 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1875 ASSERT(Process != NULL);
1876 if (Process)
1877 {
1878 if (Process->Pcb.Header.Type != ProcessObject)
1879 {
1880 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1881 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1882 KeBugCheckEx(BAD_POOL_CALLER,
1884 (ULONG_PTR)P,
1885 Entry->PoolTag,
1887 }
1888 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1891 BlockSize * POOL_BLOCK_SIZE);
1893 }
1894 }
1895}
1896
1897/* PUBLIC FUNCTIONS ***********************************************************/
1898
1899/*
1900 * @implemented
1901 */
1902PVOID
1903NTAPI
1906 IN ULONG Tag)
1907{
1908 PPOOL_DESCRIPTOR PoolDesc;
1909 PLIST_ENTRY ListHead;
1910 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1911 KIRQL OldIrql;
1912 USHORT BlockSize, i;
1913 ULONG OriginalType;
1914 PKPRCB Prcb = KeGetCurrentPrcb();
1916
1917 //
1918 // Some sanity checks
1919 //
1920 ASSERT(Tag != 0);
1921 ASSERT(Tag != ' GIB');
1922 ASSERT(NumberOfBytes != 0);
1924
1925 //
1926 // Not supported in ReactOS
1927 //
1929
1930 //
1931 // Check if verifier or special pool is enabled
1932 //
1934 {
1935 //
1936 // For verifier, we should call the verification routine
1937 //
1939 {
1940 DPRINT1("Driver Verifier is not yet supported\n");
1941 }
1942
1943 //
1944 // For special pool, we check if this is a suitable allocation and do
1945 // the special allocation if needed
1946 //
1948 {
1949 //
1950 // Check if this is a special pool allocation
1951 //
1953 {
1954 //
1955 // Try to allocate using special pool
1956 //
1958 if (Entry) return Entry;
1959 }
1960 }
1961 }
1962
1963 //
1964 // Get the pool type and its corresponding vector for this request
1965 //
1966 OriginalType = PoolType;
1968 PoolDesc = PoolVector[PoolType];
1969 ASSERT(PoolDesc != NULL);
1970
1971 //
1972 // Check if this is a big page allocation
1973 //
1975 {
1976 //
1977 // Allocate pages for it
1978 //
1979 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1980 if (!Entry)
1981 {
1982#if DBG
1983 //
1984 // Out of memory, display current consumption
1985 // Let's consider that if the caller wanted more
1986 // than a hundred pages, that's a bogus caller
1987 // and we are not out of memory. Dump at most
1988 // once a second to avoid spamming the log.
1989 //
1990 if (NumberOfBytes < 100 * PAGE_SIZE &&
1992 {
1993 MiDumpPoolConsumers(FALSE, 0, 0, 0);
1995 }
1996#endif
1997
1998 //
1999 // Must succeed pool is deprecated, but still supported. These allocation
2000 // failures must cause an immediate bugcheck
2001 //
2002 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2003 {
2004 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2008 0);
2009 }
2010
2011 //
2012 // Internal debugging
2013 //
2015
2016 //
2017 // This flag requests printing failures, and can also further specify
2018 // breaking on failures
2019 //
2021 {
2022 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2024 OriginalType);
2026 }
2027
2028 //
2029 // Finally, this flag requests an exception, which we are more than
2030 // happy to raise!
2031 //
2032 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2033 {
2035 }
2036
2037 return NULL;
2038 }
2039
2040 //
2041 // Increment required counters
2042 //
2047
2048 //
2049 // Add a tag for the big page allocation and switch to the generic "BIG"
2050 // tag if we failed to do so, then insert a tracker for this alloation.
2051 //
2053 Tag,
2055 OriginalType))
2056 {
2057 Tag = ' GIB';
2058 }
2060 return Entry;
2061 }
2062
2063 //
2064 // Should never request 0 bytes from the pool, but since so many drivers do
2065 // it, we'll just assume they want 1 byte, based on NT's similar behavior
2066 //
2067 if (!NumberOfBytes) NumberOfBytes = 1;
2068
2069 //
2070 // A pool allocation is defined by its data, a linked list to connect it to
2071 // the free list (if necessary), and a pool header to store accounting info.
2072 // Calculate this size, then convert it into a block size (units of pool
2073 // headers)
2074 //
2075 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
2076 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
2077 // the direct allocation of pages.
2078 //
2079 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
2080 / POOL_BLOCK_SIZE);
2082
2083 //
2084 // Handle lookaside list optimization for both paged and nonpaged pool
2085 //
2087 {
2088 //
2089 // Try popping it from the per-CPU lookaside list
2090 //
2092 Prcb->PPPagedLookasideList[i - 1].P :
2093 Prcb->PPNPagedLookasideList[i - 1].P;
2094 LookasideList->TotalAllocates++;
2096 if (!Entry)
2097 {
2098 //
2099 // We failed, try popping it from the global list
2100 //
2102 Prcb->PPPagedLookasideList[i - 1].L :
2103 Prcb->PPNPagedLookasideList[i - 1].L;
2104 LookasideList->TotalAllocates++;
2106 }
2107
2108 //
2109 // If we were able to pop it, update the accounting and return the block
2110 //
2111 if (Entry)
2112 {
2113 LookasideList->AllocateHits++;
2114
2115 //
2116 // Get the real entry, write down its pool type, and track it
2117 //
2118 Entry--;
2119 Entry->PoolType = OriginalType + 1;
2121 Entry->BlockSize * POOL_BLOCK_SIZE,
2122 OriginalType);
2123
2124 //
2125 // Return the pool allocation
2126 //
2127 Entry->PoolTag = Tag;
2128 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2129 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2130 return POOL_FREE_BLOCK(Entry);
2131 }
2132 }
2133
2134 //
2135 // Loop in the free lists looking for a block if this size. Start with the
2136 // list optimized for this kind of size lookup
2137 //
2138 ListHead = &PoolDesc->ListHeads[i];
2139 do
2140 {
2141 //
2142 // Are there any free entries available on this list?
2143 //
2144 if (!ExpIsPoolListEmpty(ListHead))
2145 {
2146 //
2147 // Acquire the pool lock now
2148 //
2149 OldIrql = ExLockPool(PoolDesc);
2150
2151 //
2152 // And make sure the list still has entries
2153 //
2154 if (ExpIsPoolListEmpty(ListHead))
2155 {
2156 //
2157 // Someone raced us (and won) before we had a chance to acquire
2158 // the lock.
2159 //
2160 // Try again!
2161 //
2162 ExUnlockPool(PoolDesc, OldIrql);
2163 continue;
2164 }
2165
2166 //
2167 // Remove a free entry from the list
2168 // Note that due to the way we insert free blocks into multiple lists
2169 // there is a guarantee that any block on this list will either be
2170 // of the correct size, or perhaps larger.
2171 //
2172 ExpCheckPoolLinks(ListHead);
2174 ExpCheckPoolLinks(ListHead);
2176 ASSERT(Entry->BlockSize >= i);
2177 ASSERT(Entry->PoolType == 0);
2178
2179 //
2180 // Check if this block is larger that what we need. The block could
2181 // not possibly be smaller, due to the reason explained above (and
2182 // we would've asserted on a checked build if this was the case).
2183 //
2184 if (Entry->BlockSize != i)
2185 {
2186 //
2187 // Is there an entry before this one?
2188 //
2189 if (Entry->PreviousSize == 0)
2190 {
2191 //
2192 // There isn't anyone before us, so take the next block and
2193 // turn it into a fragment that contains the leftover data
2194 // that we don't need to satisfy the caller's request
2195 //
2196 FragmentEntry = POOL_BLOCK(Entry, i);
2197 FragmentEntry->BlockSize = Entry->BlockSize - i;
2198
2199 //
2200 // And make it point back to us
2201 //
2202 FragmentEntry->PreviousSize = i;
2203
2204 //
2205 // Now get the block that follows the new fragment and check
2206 // if it's still on the same page as us (and not at the end)
2207 //
2208 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2209 if (PAGE_ALIGN(NextEntry) != NextEntry)
2210 {
2211 //
2212 // Adjust this next block to point to our newly created
2213 // fragment block
2214 //
2215 NextEntry->PreviousSize = FragmentEntry->BlockSize;
2216 }
2217 }
2218 else
2219 {
2220 //
2221 // There is a free entry before us, which we know is smaller
2222 // so we'll make this entry the fragment instead
2223 //
2224 FragmentEntry = Entry;
2225
2226 //
2227 // And then we'll remove from it the actual size required.
2228 // Now the entry is a leftover free fragment
2229 //
2230 Entry->BlockSize -= i;
2231
2232 //
2233 // Now let's go to the next entry after the fragment (which
2234 // used to point to our original free entry) and make it
2235 // reference the new fragment entry instead.
2236 //
2237 // This is the entry that will actually end up holding the
2238 // allocation!
2239 //
2241 Entry->PreviousSize = FragmentEntry->BlockSize;
2242
2243 //
2244 // And now let's go to the entry after that one and check if
2245 // it's still on the same page, and not at the end
2246 //
2247 NextEntry = POOL_BLOCK(Entry, i);
2248 if (PAGE_ALIGN(NextEntry) != NextEntry)
2249 {
2250 //
2251 // Make it reference the allocation entry
2252 //
2253 NextEntry->PreviousSize = i;
2254 }
2255 }
2256
2257 //
2258 // Now our (allocation) entry is the right size
2259 //
2260 Entry->BlockSize = i;
2261
2262 //
2263 // And the next entry is now the free fragment which contains
2264 // the remaining difference between how big the original entry
2265 // was, and the actual size the caller needs/requested.
2266 //
2267 FragmentEntry->PoolType = 0;
2268 BlockSize = FragmentEntry->BlockSize;
2269
2270 //
2271 // Now check if enough free bytes remained for us to have a
2272 // "full" entry, which contains enough bytes for a linked list
2273 // and thus can be used for allocations (up to 8 bytes...)
2274 //
2275 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2276 if (BlockSize != 1)
2277 {
2278 //
2279 // Insert the free entry into the free list for this size
2280 //
2281 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2282 POOL_FREE_BLOCK(FragmentEntry));
2283 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2284 }
2285 }
2286
2287 //
2288 // We have found an entry for this allocation, so set the pool type
2289 // and release the lock since we're done
2290 //
2291 Entry->PoolType = OriginalType + 1;
2293 ExUnlockPool(PoolDesc, OldIrql);
2294
2295 //
2296 // Increment required counters
2297 //
2300
2301 //
2302 // Track this allocation
2303 //
2305 Entry->BlockSize * POOL_BLOCK_SIZE,
2306 OriginalType);
2307
2308 //
2309 // Return the pool allocation
2310 //
2311 Entry->PoolTag = Tag;
2312 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2313 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2314 return POOL_FREE_BLOCK(Entry);
2315 }
2316 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2317
2318 //
2319 // There were no free entries left, so we have to allocate a new fresh page
2320 //
2321 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2322 if (!Entry)
2323 {
2324#if DBG
2325 //
2326 // Out of memory, display current consumption
2327 // Let's consider that if the caller wanted more
2328 // than a hundred pages, that's a bogus caller
2329 // and we are not out of memory. Dump at most
2330 // once a second to avoid spamming the log.
2331 //
2332 if (NumberOfBytes < 100 * PAGE_SIZE &&
2334 {
2335 MiDumpPoolConsumers(FALSE, 0, 0, 0);
2337 }
2338#endif
2339
2340 //
2341 // Must succeed pool is deprecated, but still supported. These allocation
2342 // failures must cause an immediate bugcheck
2343 //
2344 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2345 {
2346 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2347 PAGE_SIZE,
2350 0);
2351 }
2352
2353 //
2354 // Internal debugging
2355 //
2357
2358 //
2359 // This flag requests printing failures, and can also further specify
2360 // breaking on failures
2361 //
2363 {
2364 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2366 OriginalType);
2368 }
2369
2370 //
2371 // Finally, this flag requests an exception, which we are more than
2372 // happy to raise!
2373 //
2374 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2375 {
2377 }
2378
2379 //
2380 // Return NULL to the caller in all other cases
2381 //
2382 return NULL;
2383 }
2384
2385 //
2386 // Setup the entry data
2387 //
2388 Entry->Ulong1 = 0;
2389 Entry->BlockSize = i;
2390 Entry->PoolType = OriginalType + 1;
2391
2392 //
2393 // This page will have two entries -- one for the allocation (which we just
2394 // created above), and one for the remaining free bytes, which we're about
2395 // to create now. The free bytes are the whole page minus what was allocated
2396 // and then converted into units of block headers.
2397 //
2398 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2399 FragmentEntry = POOL_BLOCK(Entry, i);
2400 FragmentEntry->Ulong1 = 0;
2401 FragmentEntry->BlockSize = BlockSize;
2402 FragmentEntry->PreviousSize = i;
2403
2404 //
2405 // Increment required counters
2406 //
2409
2410 //
2411 // Now check if enough free bytes remained for us to have a "full" entry,
2412 // which contains enough bytes for a linked list and thus can be used for
2413 // allocations (up to 8 bytes...)
2414 //
2415 if (FragmentEntry->BlockSize != 1)
2416 {
2417 //
2418 // Excellent -- acquire the pool lock
2419 //
2420 OldIrql = ExLockPool(PoolDesc);
2421
2422 //
2423 // And insert the free entry into the free list for this block size
2424 //
2425 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2426 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2427 POOL_FREE_BLOCK(FragmentEntry));
2428 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2429
2430 //
2431 // Release the pool lock
2432 //
2434 ExUnlockPool(PoolDesc, OldIrql);
2435 }
2436 else
2437 {
2438 //
2439 // Simply do a sanity check
2440 //
2442 }
2443
2444 //
2445 // Increment performance counters and track this allocation
2446 //
2449 Entry->BlockSize * POOL_BLOCK_SIZE,
2450 OriginalType);
2451
2452 //
2453 // And return the pool allocation
2454 //
2456 Entry->PoolTag = Tag;
2457 return POOL_FREE_BLOCK(Entry);
2458}
2459
2460/*
2461 * @implemented
2462 */
2463PVOID
2464NTAPI
2467{
2468 ULONG Tag = TAG_NONE;
2469#if 0 && DBG
2470 PLDR_DATA_TABLE_ENTRY LdrEntry;
2471
2472 /* Use the first four letters of the driver name, or "None" if unavailable */
2473 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2475 : NULL;
2476 if (LdrEntry)
2477 {
2478 ULONG i;
2479 Tag = 0;
2480 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2481 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2482 for (; i < 4; i++)
2483 Tag = Tag >> 8 | ' ' << 24;
2484 }
2485#endif
2487}
2488
2489/*
2490 * @implemented
2491 */
2492VOID
2493NTAPI
2495 IN ULONG TagToFree)
2496{
2497 PPOOL_HEADER Entry, NextEntry;
2498 USHORT BlockSize;
2499 KIRQL OldIrql;
2501 PPOOL_DESCRIPTOR PoolDesc;
2502 ULONG Tag;
2503 BOOLEAN Combined = FALSE;
2504 PFN_NUMBER PageCount, RealPageCount;
2505 PKPRCB Prcb = KeGetCurrentPrcb();
2508
2509 //
2510 // Check if any of the debug flags are enabled
2511 //
2518 {
2519 //
2520 // Check if special pool is enabled
2521 //
2523 {
2524 //
2525 // Check if it was allocated from a special pool
2526 //
2528 {
2529 //
2530 // Was deadlock verification also enabled? We can do some extra
2531 // checks at this point
2532 //
2534 {
2535 DPRINT1("Verifier not yet supported\n");
2536 }
2537
2538 //
2539 // It is, so handle it via special pool free routine
2540 //
2542 return;
2543 }
2544 }
2545
2546 //
2547 // For non-big page allocations, we'll do a bunch of checks in here
2548 //
2549 if (PAGE_ALIGN(P) != P)
2550 {
2551 //
2552 // Get the entry for this pool allocation
2553 // The pointer math here may look wrong or confusing, but it is quite right
2554 //
2555 Entry = P;
2556 Entry--;
2557
2558 //
2559 // Get the pool type
2560 //
2561 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2562
2563 //
2564 // FIXME: Many other debugging checks go here
2565 //
2567 }
2568 }
2569
2570 //
2571 // Check if this is a big page allocation
2572 //
2573 if (PAGE_ALIGN(P) == P)
2574 {
2575 //
2576 // We need to find the tag for it, so first we need to find out what
2577 // kind of allocation this was (paged or nonpaged), then we can go
2578 // ahead and try finding the tag for it. Remember to get rid of the
2579 // PROTECTED_POOL tag if it's found.
2580 //
2581 // Note that if at insertion time, we failed to add the tag for a big
2582 // pool allocation, we used a special tag called 'BIG' to identify the
2583 // allocation, and we may get this tag back. In this scenario, we must
2584 // manually get the size of the allocation by actually counting through
2585 // the PFN database.
2586 //
2589 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2590 if (!Tag)
2591 {
2592 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2593 ASSERT(Tag == ' GIB');
2594 PageCount = 1; // We are going to lie! This might screw up accounting?
2595 }
2596 else if (Tag & PROTECTED_POOL)
2597 {
2598 Tag &= ~PROTECTED_POOL;
2599 }
2600
2601 //
2602 // Check block tag
2603 //
2604 if (TagToFree && TagToFree != Tag)
2605 {
2606 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2607#if DBG
2608 /* Do not bugcheck in case this is a big allocation for which we didn't manage to insert the tag */
2609 if (Tag != ' GIB')
2610 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2611#endif
2612 }
2613
2614 //
2615 // We have our tag and our page count, so we can go ahead and remove this
2616 // tracker now
2617 //
2619
2620 //
2621 // Check if any of the debug flags are enabled
2622 //
2627 {
2628 //
2629 // Was deadlock verification also enabled? We can do some extra
2630 // checks at this point
2631 //
2633 {
2634 DPRINT1("Verifier not yet supported\n");
2635 }
2636
2637 //
2638 // FIXME: Many debugging checks go here
2639 //
2640 }
2641
2642 //
2643 // Update counters
2644 //
2645 PoolDesc = PoolVector[PoolType];
2648 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2649
2650 //
2651 // Do the real free now and update the last counter with the big page count
2652 //
2653 RealPageCount = MiFreePoolPages(P);
2654 ASSERT(RealPageCount == PageCount);
2656 -(LONG)RealPageCount);
2657 return;
2658 }
2659
2660 //
2661 // Get the entry for this pool allocation
2662 // The pointer math here may look wrong or confusing, but it is quite right
2663 //
2664 Entry = P;
2665 Entry--;
2667
2668 //
2669 // Get the size of the entry, and it's pool type, then load the descriptor
2670 // for this pool type
2671 //
2672 BlockSize = Entry->BlockSize;
2673 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2674 PoolDesc = PoolVector[PoolType];
2675
2676 //
2677 // Make sure that the IRQL makes sense
2678 //
2680
2681 //
2682 // Get the pool tag and get rid of the PROTECTED_POOL flag
2683 //
2684 Tag = Entry->PoolTag;
2685 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2686
2687 //
2688 // Check block tag
2689 //
2690 if (TagToFree && TagToFree != Tag)
2691 {
2692 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2693#if DBG
2694 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2695#endif
2696 }
2697
2698 //
2699 // Track the removal of this allocation
2700 //
2702 BlockSize * POOL_BLOCK_SIZE,
2703 Entry->PoolType - 1);
2704
2705 //
2706 // Release pool quota, if any
2707 //
2708 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2709 {
2710 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2711 if (Process)
2712 {
2713 if (Process->Pcb.Header.Type != ProcessObject)
2714 {
2715 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2716 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2717 KeBugCheckEx(BAD_POOL_CALLER,
2719 (ULONG_PTR)P,
2720 Tag,
2722 }
2725 }
2726 }
2727
2728 //
2729 // Is this allocation small enough to have come from a lookaside list?
2730 //
2731 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2732 {
2733 //
2734 // Try pushing it into the per-CPU lookaside list
2735 //
2737 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2738 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2739 LookasideList->TotalFrees++;
2740 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2741 {
2742 LookasideList->FreeHits++;
2744 return;
2745 }
2746
2747 //
2748 // We failed, try to push it into the global lookaside list
2749 //
2751 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2752 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2753 LookasideList->TotalFrees++;
2754 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2755 {
2756 LookasideList->FreeHits++;
2758 return;
2759 }
2760 }
2761
2762 //
2763 // Get the pointer to the next entry
2764 //
2765 NextEntry = POOL_BLOCK(Entry, BlockSize);
2766
2767 //
2768 // Update performance counters
2769 //
2771 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2772
2773 //
2774 // Acquire the pool lock
2775 //
2776 OldIrql = ExLockPool(PoolDesc);
2777
2778 //
2779 // Check if the next allocation is at the end of the page
2780 //
2782 if (PAGE_ALIGN(NextEntry) != NextEntry)
2783 {
2784 //
2785 // We may be able to combine the block if it's free
2786 //
2787 if (NextEntry->PoolType == 0)
2788 {
2789 //
2790 // The next block is free, so we'll do a combine
2791 //
2792 Combined = TRUE;
2793
2794 //
2795 // Make sure there's actual data in the block -- anything smaller
2796 // than this means we only have the header, so there's no linked list
2797 // for us to remove
2798 //
2799 if ((NextEntry->BlockSize != 1))
2800 {
2801 //
2802 // The block is at least big enough to have a linked list, so go
2803 // ahead and remove it
2804 //
2809 }
2810
2811 //
2812 // Our entry is now combined with the next entry
2813 //
2814 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2815 }
2816 }
2817
2818 //
2819 // Now check if there was a previous entry on the same page as us
2820 //
2821 if (Entry->PreviousSize)
2822 {
2823 //
2824 // Great, grab that entry and check if it's free
2825 //
2826 NextEntry = POOL_PREV_BLOCK(Entry);
2827 if (NextEntry->PoolType == 0)
2828 {
2829 //
2830 // It is, so we can do a combine
2831 //
2832 Combined = TRUE;
2833
2834 //
2835 // Make sure there's actual data in the block -- anything smaller
2836 // than this means we only have the header so there's no linked list
2837 // for us to remove
2838 //
2839 if ((NextEntry->BlockSize != 1))
2840 {
2841 //
2842 // The block is at least big enough to have a linked list, so go
2843 // ahead and remove it
2844 //
2849 }
2850
2851 //
2852 // Combine our original block (which might've already been combined
2853 // with the next block), into the previous block
2854 //
2855 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2856
2857 //
2858 // And now we'll work with the previous block instead
2859 //
2860 Entry = NextEntry;
2861 }
2862 }
2863
2864 //
2865 // By now, it may have been possible for our combined blocks to actually
2866 // have made up a full page (if there were only 2-3 allocations on the
2867 // page, they could've all been combined).
2868 //
2869 if ((PAGE_ALIGN(Entry) == Entry) &&
2871 {
2872 //
2873 // In this case, release the pool lock, update the performance counter,
2874 // and free the page
2875 //
2876 ExUnlockPool(PoolDesc, OldIrql);
2877 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2879 return;
2880 }
2881
2882 //
2883 // Otherwise, we now have a free block (or a combination of 2 or 3)
2884 //
2885 Entry->PoolType = 0;
2886 BlockSize = Entry->BlockSize;
2887 ASSERT(BlockSize != 1);
2888
2889 //
2890 // Check if we actually did combine it with anyone
2891 //
2892 if (Combined)
2893 {
2894 //
2895 // Get the first combined block (either our original to begin with, or
2896 // the one after the original, depending if we combined with the previous)
2897 //
2898 NextEntry = POOL_NEXT_BLOCK(Entry);
2899
2900 //
2901 // As long as the next block isn't on a page boundary, have it point
2902 // back to us
2903 //
2904 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2905 }
2906
2907 //
2908 // Insert this new free block, and release the pool lock
2909 //
2910 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2912 ExUnlockPool(PoolDesc, OldIrql);
2913}
2914
2915/*
2916 * @implemented
2917 */
2918VOID
2919NTAPI
2921{
2922 //
2923 // Just free without checking for the tag
2924 //
2925 ExFreePoolWithTag(P, 0);
2926}
2927
2928/*
2929 * @unimplemented
2930 */
2931SIZE_T
2932NTAPI
2935{
2936 //
2937 // Not implemented
2938 //
2940 return FALSE;
2941}
2942
2943/*
2944 * @implemented
2945 */
2946
2947PVOID
2948NTAPI
2951{
2952 //
2953 // Allocate the pool
2954 //
2956}
2957
2958/*
2959 * @implemented
2960 */
2961PVOID
2962NTAPI
2965 IN ULONG Tag,
2967{
2968 PVOID Buffer;
2969
2970 //
2971 // Allocate the pool
2972 //
2974 if (Buffer == NULL)
2975 {
2977 }
2978
2979 return Buffer;
2980}
2981
2982/*
2983 * @implemented
2984 */
2985PVOID
2986NTAPI
2989 IN ULONG Tag)
2990{
2991 BOOLEAN Raise = TRUE;
2992 PVOID Buffer;
2996
2997 //
2998 // Check if we should fail instead of raising an exception
2999 //
3001 {
3002 Raise = FALSE;
3003 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
3004 }
3005
3006 //
3007 // Inject the pool quota mask
3008 //
3010
3011 //
3012 // Check if we have enough space to add the quota owner process, as long as
3013 // this isn't the system process, which never gets charged quota
3014 //
3015 ASSERT(NumberOfBytes != 0);
3016 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
3018 {
3019 //
3020 // Add space for our EPROCESS pointer
3021 //
3022 NumberOfBytes += sizeof(PEPROCESS);
3023 }
3024 else
3025 {
3026 //
3027 // We won't be able to store the pointer, so don't use quota for this
3028 //
3030 }
3031
3032 //
3033 // Allocate the pool buffer now
3034 //
3036
3037 //
3038 // If the buffer is page-aligned, this is a large page allocation and we
3039 // won't touch it
3040 //
3041 if (PAGE_ALIGN(Buffer) != Buffer)
3042 {
3043 //
3044 // Also if special pool is enabled, and this was allocated from there,
3045 // we won't touch it either
3046 //
3049 {
3050 return Buffer;
3051 }
3052
3053 //
3054 // If it wasn't actually allocated with quota charges, ignore it too
3055 //
3056 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
3057
3058 //
3059 // If this is the system process, we don't charge quota, so ignore
3060 //
3061 if (Process == PsInitialSystemProcess) return Buffer;
3062
3063 //
3064 // Actually go and charge quota for the process now
3065 //
3069 Entry->BlockSize * POOL_BLOCK_SIZE);
3070 if (!NT_SUCCESS(Status))
3071 {
3072 //
3073 // Quota failed, back out the allocation, clear the owner, and fail
3074 //
3075 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
3077 if (Raise) RtlRaiseStatus(Status);
3078 return NULL;
3079 }
3080
3081 //
3082 // Quota worked, write the owner and then reference it before returning
3083 //
3084 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
3086 }
3087 else if (!(Buffer) && (Raise))
3088 {
3089 //
3090 // The allocation failed, raise an error if we are in raise mode
3091 //
3093 }
3094
3095 //
3096 // Return the allocated buffer
3097 //
3098 return Buffer;
3099}
3100
3101/* EOF */
#define CODE_SEG(...)
#define BASE_POOL_TYPE_MASK
Definition: ExPools.c:15
#define QUOTA_POOL_MASK
Definition: ExPools.c:16
#define ALIGN_DOWN_BY(size, align)
#define ALIGN_UP_BY(size, align)
unsigned char BOOLEAN
#define InterlockedIncrement
Definition: armddk.h:53
LONG NTSTATUS
Definition: precomp.h:26
#define DPRINT1
Definition: precomp.h:8
BOOL Verbose
Definition: chkdsk.c:72
LONG_PTR SSIZE_T
Definition: basetsd.h:181
#define MAXULONG_PTR
Definition: basetsd.h:103
#define UNIMPLEMENTED
Definition: debug.h:115
Definition: bufpool.h:45
_In_ PSCSI_REQUEST_BLOCK _Out_ NTSTATUS _Inout_ BOOLEAN * Retry
Definition: classpnp.h:312
#define NULL
Definition: types.h:112
#define TRUE
Definition: types.h:120
#define FALSE
Definition: types.h:117
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:32
#define P(row, col)
static int Hash(const char *)
Definition: reader.c:2257
VOID NTAPI KeGenericCallDpc(IN PKDEFERRED_ROUTINE Routine, IN PVOID Context)
Definition: dpc.c:984
BOOLEAN NTAPI KeSignalCallDpcSynchronize(IN PVOID SystemArgument2)
Definition: dpc.c:1026
VOID NTAPI KeSignalCallDpcDone(IN PVOID SystemArgument1)
Definition: dpc.c:1013
#define ULONG_PTR
Definition: config.h:101
#define _IRQL_restores_
Definition: driverspecs.h:233
#define _IRQL_requires_(irql)
Definition: driverspecs.h:229
IN CINT OUT PVOID IN ULONG OUT PULONG ReturnLength
Definition: dumpinfo.c:43
#define ExAllocatePoolWithTag(hernya, size, tag)
Definition: env_spec_w32.h:350
#define IsListEmpty(ListHead)
Definition: env_spec_w32.h:954
#define PASSIVE_LEVEL
Definition: env_spec_w32.h:693
UCHAR KIRQL
Definition: env_spec_w32.h:591
ULONG KSPIN_LOCK
Definition: env_spec_w32.h:72
#define KeReleaseSpinLock(sl, irql)
Definition: env_spec_w32.h:627
#define APC_LEVEL
Definition: env_spec_w32.h:695
#define PAGE_SIZE
Definition: env_spec_w32.h:49
#define PAGE_SHIFT
Definition: env_spec_w32.h:45
#define KeAcquireSpinLock(sl, irql)
Definition: env_spec_w32.h:609
#define KeGetCurrentIrql()
Definition: env_spec_w32.h:706
#define ExFreePool(addr)
Definition: env_spec_w32.h:352
#define NonPagedPool
Definition: env_spec_w32.h:307
#define DISPATCH_LEVEL
Definition: env_spec_w32.h:696
#define KeInitializeSpinLock(sl)
Definition: env_spec_w32.h:604
#define PagedPool
Definition: env_spec_w32.h:308
ULONG ExPoolFailures
Definition: expool.c:57
ULONG ExpPoolBigEntriesInUse
Definition: expool.c:55
#define POOL_BLOCK(x, i)
Definition: expool.c:63
VOID NTAPI ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
Definition: expool.c:114
VOID NTAPI ExpCheckPoolAllocation(PVOID P, POOL_TYPE PoolType, ULONG Tag)
Definition: expool.c:296
ULONG ExpBigTableExpansionFailed
Definition: expool.c:48
#define POOL_FREE_BLOCK(x)
Definition: expool.c:62
VOID NTAPI ExpCheckPoolBlocks(IN PVOID Block)
Definition: expool.c:376
FORCEINLINE ULONG ExpComputeHashForTag(IN ULONG Tag, IN SIZE_T BucketMask)
Definition: expool.c:441
BOOLEAN ExStopBadTags
Definition: expool.c:53
VOID NTAPI ExQueryPoolUsage(OUT PULONG PagedPoolPages, OUT PULONG NonPagedPoolPages, OUT PULONG PagedPoolAllocs, OUT PULONG PagedPoolFrees, OUT PULONG PagedPoolLookasideHits, OUT PULONG NonPagedPoolAllocs, OUT PULONG NonPagedPoolFrees, OUT PULONG NonPagedPoolLookasideHits)
Definition: expool.c:1768
#define POOL_BIG_TABLE_USE_RATE
Definition: expool.c:31
VOID NTAPI ExpRemovePoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:760
VOID NTAPI ExReturnPoolQuota(IN PVOID P)
Definition: expool.c:1852
VOID NTAPI ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, IN POOL_TYPE PoolType, IN ULONG PoolIndex, IN ULONG Threshold, IN PVOID PoolLock)
Definition: expool.c:969
struct _POOL_DPC_CONTEXT POOL_DPC_CONTEXT
PPOOL_DESCRIPTOR PoolVector[2]
Definition: expool.c:44
#define POOL_PREV_BLOCK(x)
Definition: expool.c:65
NTSTATUS NTAPI ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, IN ULONG SystemInformationLength, IN OUT PULONG ReturnLength OPTIONAL)
Definition: expool.c:1356
SIZE_T PoolTrackTableMask
Definition: expool.c:46
VOID NTAPI InitializePool(IN POOL_TYPE PoolType, IN ULONG Threshold)
Definition: expool.c:1020
VOID NTAPI ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:163
VOID NTAPI ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
Definition: expool.c:193
SIZE_T PoolBigPageTableSize
Definition: expool.c:47
struct _POOL_DPC_CONTEXT * PPOOL_DPC_CONTEXT
FORCEINLINE ULONG ExpComputePartialHashForAddress(IN PVOID BaseAddress)
Definition: expool.c:457
VOID NTAPI ExpGetPoolTagInfoTarget(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
Definition: expool.c:1321
ULONG ExpNumberOfPagedPools
Definition: expool.c:41
VOID NTAPI ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
Definition: expool.c:99
SIZE_T NTAPI ExQueryPoolBlockSize(IN PVOID PoolBlock, OUT PBOOLEAN QuotaCharged)
Definition: expool.c:2933
PPOOL_TRACKER_BIG_PAGES PoolBigPageTable
Definition: expool.c:50
PKGUARDED_MUTEX ExpPagedPoolMutex
Definition: expool.c:45
FORCEINLINE VOID ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN PVOID Entry)
Definition: expool.c:416
#define POOL_ENTRY(x)
Definition: expool.c:61
PLIST_ENTRY NTAPI ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
Definition: expool.c:139
PVOID NTAPI ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag, IN EX_POOL_PRIORITY Priority)
Definition: expool.c:2963
PLIST_ENTRY NTAPI ExpDecodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:85
KSPIN_LOCK ExpTaggedPoolLock
Definition: expool.c:51
VOID NTAPI ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
Definition: expool.c:128
VOID NTAPI ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:178
FORCEINLINE VOID ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, IN KIRQL OldIrql)
Definition: expool.c:1297
#define POOL_NEXT_BLOCK(x)
Definition: expool.c:64
POOL_DESCRIPTOR NonPagedPoolDescriptor
Definition: expool.c:42
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:49
PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16+1]
Definition: expool.c:43
BOOLEAN NTAPI ExpAddTagForBigPages(IN PVOID Va, IN ULONG Key, IN ULONG NumberOfPages, IN POOL_TYPE PoolType)
Definition: expool.c:1581
#define POOL_BIG_TABLE_ENTRY_FREE
Definition: expool.c:23
ULONG PoolHitTag
Definition: expool.c:52
FORCEINLINE KIRQL ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
Definition: expool.c:1273
VOID NTAPI ExpSeedHotTags(VOID)
Definition: expool.c:640
SIZE_T PoolTrackTableSize
Definition: expool.c:46
ULONG ExpPoolFlags
Definition: expool.c:56
KSPIN_LOCK ExpLargePoolTableLock
Definition: expool.c:54
PVOID NTAPI ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes)
Definition: expool.c:2949
PLIST_ENTRY NTAPI ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
Definition: expool.c:151
VOID NTAPI ExpInsertPoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:851
ULONGLONG MiLastPoolDumpTime
Definition: expool.c:58
ULONG NTAPI ExpFindAndRemoveTagBigPages(IN PVOID Va, OUT PULONG_PTR BigPages, IN POOL_TYPE PoolType)
Definition: expool.c:1682
SIZE_T PoolBigPageTableHash
Definition: expool.c:47
PLIST_ENTRY NTAPI ExpEncodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:92
BOOLEAN NTAPI ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
Definition: expool.c:121
#define BooleanFlagOn(F, SF)
Definition: ext2fs.h:183
#define ExAllocatePool(type, size)
Definition: fbtusb.h:44
_Must_inspect_result_ _In_ LPCGUID _In_ ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _Inout_ PVOID LookasideList
Definition: fltkernel.h:2554
_Must_inspect_result_ _In_ USHORT NewSize
Definition: fltkernel.h:975
unsigned int Mask
Definition: fpcontrol.c:82
_Must_inspect_result_ _In_ PLARGE_INTEGER _In_ PLARGE_INTEGER _In_ ULONG _In_ PFILE_OBJECT _In_ PVOID Process
Definition: fsrtlfuncs.h:223
Status
Definition: gdiplustypes.h:25
ASMGENDATA Table[]
Definition: genincdata.c:61
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
VOID FASTCALL KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:31
VOID FASTCALL KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:53
VOID FASTCALL KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:42
VOID FASTCALL KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber, IN KIRQL OldIrql)
Definition: spinlock.c:154
KIRQL FASTCALL KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
Definition: spinlock.c:108
NTSYSAPI void WINAPI DbgBreakPoint(void)
#define InterlockedExchangeAdd
Definition: interlocked.h:181
#define InterlockedCompareExchangePointer
Definition: interlocked.h:129
#define InterlockedExchangeAddSizeT(a, b)
Definition: interlocked.h:196
#define _ReturnAddress()
Definition: intrin_arm.h:35
VOID KdbpPrint(_In_ PSTR Format, _In_ ...)
Prints the given string with printf-like formatting.
Definition: kdb_cli.c:3082
struct _POOL_HEADER * PPOOL_HEADER
if(dx< 0)
Definition: linetemp.h:194
LIST_ENTRY ExPoolLookasideListHead
Definition: lookas.c:22
struct _POOL_TRACKER_TABLE POOL_TRACKER_TABLE
#define POOL_FREE_IRQL_INVALID
Definition: miarm.h:305
#define POOL_LISTS_PER_PAGE
Definition: miarm.h:271
#define POOL_BILLED_PROCESS_INVALID
Definition: miarm.h:306
#define POOL_FLAG_CHECK_WORKERS
Definition: miarm.h:279
#define POOL_BLOCK_SIZE
Definition: miarm.h:269
#define POOL_FLAG_DBGPRINT_ON_FAILURE
Definition: miarm.h:284
#define POOL_FLAG_CRASH_ON_FAILURE
Definition: miarm.h:285
#define POOL_FLAG_CHECK_RESOURCES
Definition: miarm.h:280
#define POOL_FLAG_CHECK_TIMERS
Definition: miarm.h:278
#define POOL_FLAG_SPECIAL_POOL
Definition: miarm.h:283
#define POOL_MAX_ALLOC
Definition: miarm.h:273
#define POOL_FLAG_VERIFIER
Definition: miarm.h:281
#define POOL_FLAG_CHECK_DEADLOCK
Definition: miarm.h:282
#define POOL_ALLOC_IRQL_INVALID
Definition: miarm.h:304
PLDR_DATA_TABLE_ENTRY NTAPI MiLookupDataTableEntry(IN PVOID Address)
Definition: sysldr.c:3517
#define ASSERT(a)
Definition: mode.c:44
#define ExFreePoolWithTag(_P, _T)
Definition: module.h:1109
#define min(a, b)
Definition: monoChain.cc:55
#define _In_
Definition: ms_sal.h:308
__int3264 LONG_PTR
Definition: mstsclib_h.h:276
#define NUMBER_POOL_LOOKASIDE_LISTS
Definition: ketypes.h:356
FORCEINLINE struct _KPRCB * KeGetCurrentPrcb(VOID)
Definition: ketypes.h:1146
@ ProcessObject
Definition: ketypes.h:409
_In_ HANDLE _Outptr_result_bytebuffer_ ViewSize PVOID * BaseAddress
Definition: mmfuncs.h:404
DECLSPEC_NORETURN NTSYSAPI VOID NTAPI RtlRaiseStatus(_In_ NTSTATUS Status)
struct _EPROCESS * PEPROCESS
Definition: nt_native.h:30
#define DBG_UNREFERENCED_LOCAL_VARIABLE(L)
Definition: ntbasedef.h:319
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:317
@ FirstTry
Definition: copy.c:25
UCHAR KeNumberNodes
Definition: krnlinit.c:40
POOL_TYPE NTAPI MmDeterminePoolType(IN PVOID VirtualAddress)
Definition: pool.c:408
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes)
Definition: pool.c:422
#define MUST_SUCCEED_POOL_MASK
Definition: mm.h:119
#define SESSION_POOL_MASK
Definition: mm.h:122
BOOLEAN NTAPI MmIsSpecialPoolAddress(IN PVOID P)
BOOLEAN NTAPI MmUseSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag)
PVOID NTAPI MmAllocateSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag, IN POOL_TYPE PoolType, IN ULONG SpecialType)
VOID NTAPI MmFreeSpecialPool(IN PVOID P)
ULONG NTAPI MiFreePoolPages(IN PVOID StartingAddress)
Definition: pool.c:918
#define ExRaiseStatus
Definition: ntoskrnl.h:114
ULONG MmSizeOfNonPagedPoolInBytes
Definition: init.c:21
VOID MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
long LONG
Definition: pedump.c:60
unsigned short USHORT
Definition: pedump.c:61
PEPROCESS PsInitialSystemProcess
Definition: psmgr.c:50
NTSTATUS NTAPI PsChargeProcessPoolQuota(_In_ PEPROCESS Process, _In_ POOL_TYPE PoolType, _In_ SIZE_T Amount)
Charges the process' quota pool. The type of quota to be charged depends upon the PoolType parameter.
Definition: quota.c:872
VOID NTAPI PsReturnPoolQuota(_In_ PEPROCESS Process, _In_ POOL_TYPE PoolType, _In_ SIZE_T Amount)
Returns the pool quota that the process was taking up.
Definition: quota.c:907
VOID NTAPI KeBugCheckEx(_In_ ULONG BugCheckCode, _In_ ULONG_PTR BugCheckParameter1, _In_ ULONG_PTR BugCheckParameter2, _In_ ULONG_PTR BugCheckParameter3, _In_ ULONG_PTR BugCheckParameter4)
Definition: rtlcompat.c:108
#define KeQueryInterruptTime()
Definition: ke.h:37
ULONG PFN_NUMBER
Definition: ke.h:9
#define STATUS_SUCCESS
Definition: shellext.h:65
#define DPRINT
Definition: sndvol32.h:71
PULONG MinorVersion OPTIONAL
Definition: CrossNt.h:68
base of all file and directory entries
Definition: entries.h:83
Definition: ketypes.h:699
GENERAL_LOOKASIDE_POOL PPNPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:694
GENERAL_LOOKASIDE_POOL PPPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:695
Definition: btrfs_drv.h:1876
UNICODE_STRING BaseDllName
Definition: ldrtypes.h:145
Definition: typedefs.h:120
struct _LIST_ENTRY * Blink
Definition: typedefs.h:122
struct _LIST_ENTRY * Flink
Definition: typedefs.h:121
LIST_ENTRY ListHeads[POOL_LISTS_PER_PAGE]
Definition: miarm.h:323
ULONG RunningAllocs
Definition: miarm.h:313
ULONG TotalPages
Definition: miarm.h:315
SIZE_T TotalBytes
Definition: miarm.h:321
ULONG RunningDeAllocs
Definition: miarm.h:314
ULONG TotalBigPages
Definition: miarm.h:316
SIZE_T PoolTrackTableSize
Definition: expool.c:36
PPOOL_TRACKER_TABLE PoolTrackTableExpansion
Definition: expool.c:37
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:35
SIZE_T PoolTrackTableSizeExpansion
Definition: expool.c:38
USHORT PreviousSize
LONG NonPagedAllocs
Definition: miarm.h:370
SIZE_T NonPagedBytes
Definition: miarm.h:372
LONG NonPagedFrees
Definition: miarm.h:371
SIZE_T PagedBytes
Definition: miarm.h:375
SIZE_T PagedUsed
Definition: extypes.h:1143
ULONG TagUlong
Definition: extypes.h:1139
ULONG PagedFrees
Definition: extypes.h:1142
ULONG PagedAllocs
Definition: extypes.h:1141
ULONG NonPagedAllocs
Definition: extypes.h:1144
SIZE_T NonPagedUsed
Definition: extypes.h:1146
ULONG NonPagedFrees
Definition: extypes.h:1145
#define max(a, b)
Definition: svc.c:63
#define TAG_NONE
Definition: tag.h:110
uint32_t * PULONG_PTR
Definition: typedefs.h:65
uint32_t * PULONG
Definition: typedefs.h:59
#define FIELD_OFFSET(t, f)
Definition: typedefs.h:255
unsigned char * PBOOLEAN
Definition: typedefs.h:53
INT POOL_TYPE
Definition: typedefs.h:78
#define NTAPI
Definition: typedefs.h:36
void * PVOID
Definition: typedefs.h:50
ULONG_PTR SIZE_T
Definition: typedefs.h:80
#define RtlCopyMemory(Destination, Source, Length)
Definition: typedefs.h:263
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
uint32_t ULONG_PTR
Definition: typedefs.h:65
#define IN
Definition: typedefs.h:39
int32_t * PLONG
Definition: typedefs.h:58
#define CONTAINING_RECORD(address, type, field)
Definition: typedefs.h:260
uint32_t ULONG
Definition: typedefs.h:59
uint64_t ULONGLONG
Definition: typedefs.h:67
#define OUT
Definition: typedefs.h:40
#define STATUS_INFO_LENGTH_MISMATCH
Definition: udferr_usr.h:133
#define STATUS_INSUFFICIENT_RESOURCES
Definition: udferr_usr.h:158
static int Link(const char **args)
Definition: vfdcmd.c:2414
_In_ WDFCOLLECTION _In_ ULONG Index
_Must_inspect_result_ _In_ WDFDEVICE _In_ BOOLEAN _In_opt_ PVOID Tag
Definition: wdfdevice.h:4065
_Must_inspect_result_ _In_ WDFDEVICE _In_ DEVICE_REGISTRY_PROPERTY _In_ _Strict_type_match_ POOL_TYPE PoolType
Definition: wdfdevice.h:3815
_Must_inspect_result_ _In_ WDFDEVICE _In_ PWDF_DEVICE_PROPERTY_DATA _In_ DEVPROPTYPE _In_ ULONG Size
Definition: wdfdevice.h:4533
_Must_inspect_result_ _In_ PWDF_DPC_CONFIG _In_ PWDF_OBJECT_ATTRIBUTES _Out_ WDFDPC * Dpc
Definition: wdfdpc.h:112
_In_ WDFINTERRUPT _In_ WDF_INTERRUPT_POLICY _In_ WDF_INTERRUPT_PRIORITY Priority
Definition: wdfinterrupt.h:655
_Must_inspect_result_ _In_opt_ PWDF_OBJECT_ATTRIBUTES _In_ _In_ _Strict_type_match_ POOL_TYPE _In_opt_ PWDF_OBJECT_ATTRIBUTES _In_opt_ ULONG _Out_ WDFLOOKASIDE * Lookaside
Definition: wdfmemory.h:414
_Must_inspect_result_ _In_opt_ PWDF_OBJECT_ATTRIBUTES _In_ _Strict_type_match_ POOL_TYPE _In_opt_ ULONG PoolTag
Definition: wdfmemory.h:164
_Must_inspect_result_ _In_ WDFIORESLIST _In_ PIO_RESOURCE_DESCRIPTOR Descriptor
Definition: wdfresource.h:342
#define FORCEINLINE
Definition: wdftypes.h:67
_At_(*)(_In_ PWSK_CLIENT Client, _In_opt_ PUNICODE_STRING NodeName, _In_opt_ PUNICODE_STRING ServiceName, _In_opt_ ULONG NameSpace, _In_opt_ GUID *Provider, _In_opt_ PADDRINFOEXW Hints, _Outptr_ PADDRINFOEXW *Result, _In_opt_ PEPROCESS OwningProcess, _In_opt_ PETHREAD OwningThread, _Inout_ PIRP Irp Result)(Mem)) NTSTATUS(WSKAPI *PFN_WSK_GET_ADDRESS_INFO
Definition: wsk.h:409
_Must_inspect_result_ _In_ ULONG Flags
Definition: wsk.h:170
FORCEINLINE USHORT ExQueryDepthSList(_In_ PSLIST_HEADER SListHead)
Definition: exfuncs.h:153
#define ExReleaseSpinLock(Lock, OldIrql)
#define ExAllocatePoolWithQuotaTag(a, b, c)
Definition: exfuncs.h:530
#define ExAcquireSpinLock(Lock, OldIrql)
_Out_ PBOOLEAN QuotaCharged
Definition: exfuncs.h:1153
#define PROTECTED_POOL
Definition: extypes.h:340
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE GENERAL_LOOKASIDE
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE * PGENERAL_LOOKASIDE
enum _EX_POOL_PRIORITY EX_POOL_PRIORITY
_Must_inspect_result_ typedef _Out_ PULONG TableSize
Definition: iotypes.h:4327
#define POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
_Must_inspect_result_ typedef _In_ ULONG TableEntry
Definition: iotypes.h:4303
#define POOL_RAISE_IF_ALLOCATION_FAILURE
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _Inout_ PLARGE_INTEGER NumberOfBytes
Definition: iotypes.h:1036
_Requires_lock_held_ Interrupt _Releases_lock_ Interrupt _In_ _IRQL_restores_ KIRQL OldIrql
Definition: kefuncs.h:778
@ PagedPoolSession
Definition: ketypes.h:893
struct _KGUARDED_MUTEX * PKGUARDED_MUTEX
_In_opt_ PVOID _In_opt_ PVOID SystemArgument1
Definition: ketypes.h:688
_In_opt_ PVOID DeferredContext
Definition: ketypes.h:687
@ LockQueueNonPagedPoolLock
Definition: ketypes.h:664
_In_opt_ PVOID _In_opt_ PVOID _In_opt_ PVOID SystemArgument2
Definition: ketypes.h:689
#define ROUND_TO_PAGES(Size)
#define BYTES_TO_PAGES(Size)
#define PAGE_ALIGN(Va)
#define ObDereferenceObject
Definition: obfuncs.h:203
#define ObReferenceObject
Definition: obfuncs.h:204
#define PsGetCurrentProcess
Definition: psfuncs.h:17
#define NT_VERIFY(exp)
Definition: rtlfuncs.h:3287
#define InterlockedPushEntrySList(SListHead, SListEntry)
Definition: rtlfuncs.h:3389
#define InterlockedPopEntrySList(SListHead)
Definition: rtlfuncs.h:3392
__wchar_t WCHAR
Definition: xmlstorage.h:180
char CHAR
Definition: xmlstorage.h:175