ReactOS 0.4.15-dev-6644-g539123c
expool.c
Go to the documentation of this file.
1/*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9/* INCLUDES *******************************************************************/
10
11#include <ntoskrnl.h>
12#define NDEBUG
13#include <debug.h>
14
15#define MODULE_INVOLVED_IN_ARM3
16#include <mm/ARM3/miarm.h>
17
18#undef ExAllocatePoolWithQuota
19#undef ExAllocatePoolWithQuotaTag
20
21/* GLOBALS ********************************************************************/
22
23#define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25/*
26 * This defines when we shrink or expand the table.
27 * 3 --> keep the number of used entries in the 33%-66% of the table capacity.
28 * 4 --> 25% - 75%
29 * etc.
30 */
31#define POOL_BIG_TABLE_USE_RATE 4
32
33typedef struct _POOL_DPC_CONTEXT
34{
40
59
60/* Pool block/header/list access macros */
61#define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
62#define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
63#define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
64#define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
65#define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
66
67/*
68 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
69 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
70 * pool code, but only for checked builds.
71 *
72 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
73 * that these checks are done even on retail builds, due to the increasing
74 * number of kernel-mode attacks which depend on dangling list pointers and other
75 * kinds of list-based attacks.
76 *
77 * For now, I will leave these checks on all the time, but later they are likely
78 * to be DBG-only, at least until there are enough kernel-mode security attacks
79 * against ReactOS to warrant the performance hit.
80 *
81 * For now, these are not made inline, so we can get good stack traces.
82 */
86{
87 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
88}
89
93{
94 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
95}
96
97VOID
100{
101 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
102 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
103 {
104 KeBugCheckEx(BAD_POOL_HEADER,
105 3,
106 (ULONG_PTR)ListHead,
109 }
110}
111
112VOID
113NTAPI
115{
116 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
117}
118
120NTAPI
122{
123 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
124}
125
126VOID
127NTAPI
129{
130 PLIST_ENTRY Blink, Flink;
131 Flink = ExpDecodePoolLink(Entry->Flink);
132 Blink = ExpDecodePoolLink(Entry->Blink);
133 Flink->Blink = ExpEncodePoolLink(Blink);
134 Blink->Flink = ExpEncodePoolLink(Flink);
135}
136
138NTAPI
140{
141 PLIST_ENTRY Entry, Flink;
142 Entry = ExpDecodePoolLink(ListHead->Flink);
143 Flink = ExpDecodePoolLink(Entry->Flink);
144 ListHead->Flink = ExpEncodePoolLink(Flink);
145 Flink->Blink = ExpEncodePoolLink(ListHead);
146 return Entry;
147}
148
150NTAPI
152{
153 PLIST_ENTRY Entry, Blink;
154 Entry = ExpDecodePoolLink(ListHead->Blink);
155 Blink = ExpDecodePoolLink(Entry->Blink);
156 ListHead->Blink = ExpEncodePoolLink(Blink);
157 Blink->Flink = ExpEncodePoolLink(ListHead);
158 return Entry;
159}
160
161VOID
162NTAPI
165{
166 PLIST_ENTRY Blink;
167 ExpCheckPoolLinks(ListHead);
168 Blink = ExpDecodePoolLink(ListHead->Blink);
169 Entry->Flink = ExpEncodePoolLink(ListHead);
170 Entry->Blink = ExpEncodePoolLink(Blink);
171 Blink->Flink = ExpEncodePoolLink(Entry);
172 ListHead->Blink = ExpEncodePoolLink(Entry);
173 ExpCheckPoolLinks(ListHead);
174}
175
176VOID
177NTAPI
180{
181 PLIST_ENTRY Flink;
182 ExpCheckPoolLinks(ListHead);
183 Flink = ExpDecodePoolLink(ListHead->Flink);
184 Entry->Flink = ExpEncodePoolLink(Flink);
185 Entry->Blink = ExpEncodePoolLink(ListHead);
186 Flink->Blink = ExpEncodePoolLink(Entry);
187 ListHead->Flink = ExpEncodePoolLink(Entry);
188 ExpCheckPoolLinks(ListHead);
189}
190
191VOID
192NTAPI
194{
195 PPOOL_HEADER PreviousEntry, NextEntry;
196
197 /* Is there a block before this one? */
198 if (Entry->PreviousSize)
199 {
200 /* Get it */
201 PreviousEntry = POOL_PREV_BLOCK(Entry);
202
203 /* The two blocks must be on the same page! */
204 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
205 {
206 /* Something is awry */
207 KeBugCheckEx(BAD_POOL_HEADER,
208 6,
209 (ULONG_PTR)PreviousEntry,
210 __LINE__,
212 }
213
214 /* This block should also indicate that it's as large as we think it is */
215 if (PreviousEntry->BlockSize != Entry->PreviousSize)
216 {
217 /* Otherwise, someone corrupted one of the sizes */
218 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
219 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
220 Entry->PreviousSize, (char *)&Entry->PoolTag);
221 KeBugCheckEx(BAD_POOL_HEADER,
222 5,
223 (ULONG_PTR)PreviousEntry,
224 __LINE__,
226 }
227 }
228 else if (PAGE_ALIGN(Entry) != Entry)
229 {
230 /* If there's no block before us, we are the first block, so we should be on a page boundary */
231 KeBugCheckEx(BAD_POOL_HEADER,
232 7,
233 0,
234 __LINE__,
236 }
237
238 /* This block must have a size */
239 if (!Entry->BlockSize)
240 {
241 /* Someone must've corrupted this field */
242 if (Entry->PreviousSize)
243 {
244 PreviousEntry = POOL_PREV_BLOCK(Entry);
245 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
246 (char *)&PreviousEntry->PoolTag,
247 (char *)&Entry->PoolTag);
248 }
249 else
250 {
251 DPRINT1("Entry tag %.4s\n",
252 (char *)&Entry->PoolTag);
253 }
254 KeBugCheckEx(BAD_POOL_HEADER,
255 8,
256 0,
257 __LINE__,
259 }
260
261 /* Okay, now get the next block */
262 NextEntry = POOL_NEXT_BLOCK(Entry);
263
264 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
265 if (PAGE_ALIGN(NextEntry) != NextEntry)
266 {
267 /* The two blocks must be on the same page! */
268 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
269 {
270 /* Something is messed up */
271 KeBugCheckEx(BAD_POOL_HEADER,
272 9,
273 (ULONG_PTR)NextEntry,
274 __LINE__,
276 }
277
278 /* And this block should think we are as large as we truly are */
279 if (NextEntry->PreviousSize != Entry->BlockSize)
280 {
281 /* Otherwise, someone corrupted the field */
282 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
283 Entry->BlockSize, (char *)&Entry->PoolTag,
284 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
285 KeBugCheckEx(BAD_POOL_HEADER,
286 5,
287 (ULONG_PTR)NextEntry,
288 __LINE__,
290 }
291 }
292}
293
294VOID
295NTAPI
297 PVOID P,
299 ULONG Tag)
300{
302 ULONG i;
304 POOL_TYPE RealPoolType;
305
306 /* Get the pool header */
307 Entry = ((PPOOL_HEADER)P) - 1;
308
309 /* Check if this is a large allocation */
310 if (PAGE_ALIGN(P) == P)
311 {
312 /* Lock the pool table */
314
315 /* Find the pool tag */
316 for (i = 0; i < PoolBigPageTableSize; i++)
317 {
318 /* Check if this is our allocation */
319 if (PoolBigPageTable[i].Va == P)
320 {
321 /* Make sure the tag is ok */
322 if (PoolBigPageTable[i].Key != Tag)
323 {
324 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
325 }
326
327 break;
328 }
329 }
330
331 /* Release the lock */
333
334 if (i == PoolBigPageTableSize)
335 {
336 /* Did not find the allocation */
337 //ASSERT(FALSE);
338 }
339
340 /* Get Pool type by address */
341 RealPoolType = MmDeterminePoolType(P);
342 }
343 else
344 {
345 /* Verify the tag */
346 if (Entry->PoolTag != Tag)
347 {
348 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
349 &Tag, &Entry->PoolTag, Entry->PoolTag);
350 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
351 }
352
353 /* Check the rest of the header */
355
356 /* Get Pool type from entry */
357 RealPoolType = (Entry->PoolType - 1);
358 }
359
360 /* Should we check the pool type? */
361 if (PoolType != -1)
362 {
363 /* Verify the pool type */
364 if (RealPoolType != PoolType)
365 {
366 DPRINT1("Wrong pool type! Expected %s, got %s\n",
367 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
368 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
369 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
370 }
371 }
372}
373
374VOID
375NTAPI
377{
378 BOOLEAN FoundBlock = FALSE;
379 SIZE_T Size = 0;
381
382 /* Get the first entry for this page, make sure it really is the first */
383 Entry = PAGE_ALIGN(Block);
384 ASSERT(Entry->PreviousSize == 0);
385
386 /* Now scan each entry */
387 while (TRUE)
388 {
389 /* When we actually found our block, remember this */
390 if (Entry == Block) FoundBlock = TRUE;
391
392 /* Now validate this block header */
395 /* And go to the next one, keeping track of our size */
396 Size += Entry->BlockSize;
398
399 /* If we hit the last block, stop */
400 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
401
402 /* If we hit the end of the page, stop */
403 if (PAGE_ALIGN(Entry) == Entry) break;
404 }
405
406 /* We must've found our block, and we must have hit the end of the page */
407 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
408 {
409 /* Otherwise, the blocks are messed up */
410 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
411 }
412}
413
415VOID
418 IN PVOID Entry)
419{
420 //
421 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
422 // be DISPATCH_LEVEL or lower for Non Paged Pool
423 //
427 {
428 //
429 // Take the system down
430 //
431 KeBugCheckEx(BAD_POOL_CALLER,
434 PoolType,
436 }
437}
438
440ULONG
442 IN SIZE_T BucketMask)
443{
444 //
445 // Compute the hash by multiplying with a large prime number and then XORing
446 // with the HIDWORD of the result.
447 //
448 // Finally, AND with the bucket mask to generate a valid index/bucket into
449 // the table
450 //
451 ULONGLONG Result = (ULONGLONG)40543 * Tag;
452 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
453}
454
456ULONG
458{
460 //
461 // Compute the hash by converting the address into a page number, and then
462 // XORing each nibble with the next one.
463 //
464 // We do *NOT* AND with the bucket mask at this point because big table expansion
465 // might happen. Therefore, the final step of the hash must be performed
466 // while holding the expansion pushlock, and this is why we call this a
467 // "partial" hash only.
468 //
470 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
471}
472
473#if DBG
474/*
475 * FORCEINLINE
476 * BOOLEAN
477 * ExpTagAllowPrint(CHAR Tag);
478 */
479#define ExpTagAllowPrint(Tag) \
480 ((Tag) >= 0x20 /* Space */ && (Tag) <= 0x7E /* Tilde */)
481
482#ifdef KDBG
483#include <kdbg/kdb.h>
484#endif
485
486#ifdef KDBG
487#define MiDumperPrint(dbg, fmt, ...) \
488 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
489 else DPRINT1(fmt, ##__VA_ARGS__)
490#else
491#define MiDumperPrint(dbg, fmt, ...) \
492 DPRINT1(fmt, ##__VA_ARGS__)
493#endif
494
495VOID
497{
498 SIZE_T i;
500
501 //
502 // Only print header if called from OOM situation
503 //
504 if (!CalledFromDbg)
505 {
506 DPRINT1("---------------------\n");
507 DPRINT1("Out of memory dumper!\n");
508 }
509#ifdef KDBG
510 else
511 {
512 KdbpPrint("Pool Used:\n");
513 }
514#endif
515
516 //
517 // Remember whether we'll have to be verbose
518 // This is the only supported flag!
519 //
521
522 //
523 // Print table header
524 //
525 if (Verbose)
526 {
527 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
528 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
529 }
530 else
531 {
532 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
533 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
534 }
535
536 //
537 // We'll extract allocations for all the tracked pools
538 //
539 for (i = 0; i < PoolTrackTableSize; ++i)
540 {
542
544
545 //
546 // We only care about tags which have allocated memory
547 //
548 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
549 {
550 //
551 // If there's a tag, attempt to do a pretty print
552 // only if it matches the caller's tag, or if
553 // any tag is allowed
554 // For checking whether it matches caller's tag,
555 // use the mask to make sure not to mess with the wildcards
556 //
557 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
558 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
559 {
560 CHAR Tag[4];
561
562 //
563 // Extract each 'component' and check whether they are printable
564 //
565 Tag[0] = TableEntry->Key & 0xFF;
566 Tag[1] = TableEntry->Key >> 8 & 0xFF;
567 Tag[2] = TableEntry->Key >> 16 & 0xFF;
568 Tag[3] = TableEntry->Key >> 24 & 0xFF;
569
570 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
571 {
572 //
573 // Print in direct order to make !poolused TAG usage easier
574 //
575 if (Verbose)
576 {
577 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
578 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
579 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
580 TableEntry->PagedAllocs, TableEntry->PagedFrees,
581 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
582 }
583 else
584 {
585 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
586 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
587 TableEntry->PagedAllocs, TableEntry->PagedBytes);
588 }
589 }
590 else
591 {
592 if (Verbose)
593 {
594 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
595 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
596 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
597 TableEntry->PagedAllocs, TableEntry->PagedFrees,
598 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
599 }
600 else
601 {
602 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
603 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
604 TableEntry->PagedAllocs, TableEntry->PagedBytes);
605 }
606 }
607 }
608 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
609 {
610 if (Verbose)
611 {
612 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
613 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
614 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
615 TableEntry->PagedAllocs, TableEntry->PagedFrees,
616 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
617 }
618 else
619 {
620 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
621 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
622 TableEntry->PagedAllocs, TableEntry->PagedBytes);
623 }
624 }
625 }
626 }
627
628 if (!CalledFromDbg)
629 {
630 DPRINT1("---------------------\n");
631 }
632}
633#endif
634
635/* PRIVATE FUNCTIONS **********************************************************/
636
637CODE_SEG("INIT")
638VOID
639NTAPI
641{
642 ULONG i, Key, Hash, Index;
644 ULONG TagList[] =
645 {
646 ' oI',
647 ' laH',
648 'PldM',
649 'LooP',
650 'tSbO',
651 ' prI',
652 'bdDN',
653 'LprI',
654 'pOoI',
655 ' ldM',
656 'eliF',
657 'aVMC',
658 'dSeS',
659 'CFtN',
660 'looP',
661 'rPCT',
662 'bNMC',
663 'dTeS',
664 'sFtN',
665 'TPCT',
666 'CPCT',
667 ' yeK',
668 'qSbO',
669 'mNoI',
670 'aEoI',
671 'cPCT',
672 'aFtN',
673 '0ftN',
674 'tceS',
675 'SprI',
676 'ekoT',
677 ' eS',
678 'lCbO',
679 'cScC',
680 'lFtN',
681 'cAeS',
682 'mfSF',
683 'kWcC',
684 'miSF',
685 'CdfA',
686 'EdfA',
687 'orSF',
688 'nftN',
689 'PRIU',
690 'rFpN',
691 'RFpN',
692 'aPeS',
693 'sUeS',
694 'FpcA',
695 'MpcA',
696 'cSeS',
697 'mNbO',
698 'sFpN',
699 'uLeS',
700 'DPcS',
701 'nevE',
702 'vrqR',
703 'ldaV',
704 ' pP',
705 'SdaV',
706 ' daV',
707 'LdaV',
708 'FdaV',
709 ' GIB',
710 };
711
712 //
713 // Loop all 64 hot tags
714 //
715 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
716 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
717 {
718 //
719 // Get the current tag, and compute its hash in the tracker table
720 //
721 Key = TagList[i];
723
724 //
725 // Loop all the hashes in this index/bucket
726 //
727 Index = Hash;
728 while (TRUE)
729 {
730 //
731 // Find an empty entry, and make sure this isn't the last hash that
732 // can fit.
733 //
734 // On checked builds, also make sure this is the first time we are
735 // seeding this tag.
736 //
737 ASSERT(TrackTable[Hash].Key != Key);
738 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
739 {
740 //
741 // It has been seeded, move on to the next tag
742 //
743 TrackTable[Hash].Key = Key;
744 break;
745 }
746
747 //
748 // This entry was already taken, compute the next possible hash while
749 // making sure we're not back at our initial index.
750 //
751 ASSERT(TrackTable[Hash].Key != Key);
752 Hash = (Hash + 1) & PoolTrackTableMask;
753 if (Hash == Index) break;
754 }
755 }
756}
757
758VOID
759NTAPI
763{
766 SIZE_T TableMask, TableSize;
767
768 //
769 // Remove the PROTECTED_POOL flag which is not part of the tag
770 //
771 Key &= ~PROTECTED_POOL;
772
773 //
774 // With WinDBG you can set a tag you want to break on when an allocation is
775 // attempted
776 //
777 if (Key == PoolHitTag) DbgBreakPoint();
778
779 //
780 // Why the double indirection? Because normally this function is also used
781 // when doing session pool allocations, which has another set of tables,
782 // sizes, and masks that live in session pool. Now we don't support session
783 // pool so we only ever use the regular tables, but I'm keeping the code this
784 // way so that the day we DO support session pool, it won't require that
785 // many changes
786 //
788 TableMask = PoolTrackTableMask;
791
792 //
793 // Compute the hash for this key, and loop all the possible buckets
794 //
795 Hash = ExpComputeHashForTag(Key, TableMask);
796 Index = Hash;
797 while (TRUE)
798 {
799 //
800 // Have we found the entry for this tag? */
801 //
803 if (TableEntry->Key == Key)
804 {
805 //
806 // Decrement the counters depending on if this was paged or nonpaged
807 // pool
808 //
810 {
811 InterlockedIncrement(&TableEntry->NonPagedFrees);
814 return;
815 }
816 InterlockedIncrement(&TableEntry->PagedFrees);
819 return;
820 }
821
822 //
823 // We should have only ended up with an empty entry if we've reached
824 // the last bucket
825 //
826 if (!TableEntry->Key)
827 {
828 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
829 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
830 ASSERT(Hash == TableMask);
831 }
832
833 //
834 // This path is hit when we don't have an entry, and the current bucket
835 // is full, so we simply try the next one
836 //
837 Hash = (Hash + 1) & TableMask;
838 if (Hash == Index) break;
839 }
840
841 //
842 // And finally this path is hit when all the buckets are full, and we need
843 // some expansion. This path is not yet supported in ReactOS and so we'll
844 // ignore the tag
845 //
846 DPRINT1("Out of pool tag space, ignoring...\n");
847}
848
849VOID
850NTAPI
854{
858 SIZE_T TableMask, TableSize;
859
860 //
861 // Remove the PROTECTED_POOL flag which is not part of the tag
862 //
863 Key &= ~PROTECTED_POOL;
864
865 //
866 // With WinDBG you can set a tag you want to break on when an allocation is
867 // attempted
868 //
869 if (Key == PoolHitTag) DbgBreakPoint();
870
871 //
872 // There is also an internal flag you can set to break on malformed tags
873 //
874 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
875
876 //
877 // ASSERT on ReactOS features not yet supported
878 //
881
882 //
883 // Why the double indirection? Because normally this function is also used
884 // when doing session pool allocations, which has another set of tables,
885 // sizes, and masks that live in session pool. Now we don't support session
886 // pool so we only ever use the regular tables, but I'm keeping the code this
887 // way so that the day we DO support session pool, it won't require that
888 // many changes
889 //
891 TableMask = PoolTrackTableMask;
894
895 //
896 // Compute the hash for this key, and loop all the possible buckets
897 //
898 Hash = ExpComputeHashForTag(Key, TableMask);
899 Index = Hash;
900 while (TRUE)
901 {
902 //
903 // Do we already have an entry for this tag? */
904 //
906 if (TableEntry->Key == Key)
907 {
908 //
909 // Increment the counters depending on if this was paged or nonpaged
910 // pool
911 //
913 {
914 InterlockedIncrement(&TableEntry->NonPagedAllocs);
916 return;
917 }
918 InterlockedIncrement(&TableEntry->PagedAllocs);
920 return;
921 }
922
923 //
924 // We don't have an entry yet, but we've found a free bucket for it
925 //
926 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
927 {
928 //
929 // We need to hold the lock while creating a new entry, since other
930 // processors might be in this code path as well
931 //
933 if (!PoolTrackTable[Hash].Key)
934 {
935 //
936 // We've won the race, so now create this entry in the bucket
937 //
938 ASSERT(Table[Hash].Key == 0);
940 TableEntry->Key = Key;
941 }
943
944 //
945 // Now we force the loop to run again, and we should now end up in
946 // the code path above which does the interlocked increments...
947 //
948 continue;
949 }
950
951 //
952 // This path is hit when we don't have an entry, and the current bucket
953 // is full, so we simply try the next one
954 //
955 Hash = (Hash + 1) & TableMask;
956 if (Hash == Index) break;
957 }
958
959 //
960 // And finally this path is hit when all the buckets are full, and we need
961 // some expansion. This path is not yet supported in ReactOS and so we'll
962 // ignore the tag
963 //
964 DPRINT1("Out of pool tag space, ignoring...\n");
965}
966
967CODE_SEG("INIT")
968VOID
969NTAPI
972 IN ULONG PoolIndex,
973 IN ULONG Threshold,
974 IN PVOID PoolLock)
975{
976 PLIST_ENTRY NextEntry, LastEntry;
977
978 //
979 // Setup the descriptor based on the caller's request
980 //
981 PoolDescriptor->PoolType = PoolType;
982 PoolDescriptor->PoolIndex = PoolIndex;
983 PoolDescriptor->Threshold = Threshold;
984 PoolDescriptor->LockAddress = PoolLock;
985
986 //
987 // Initialize accounting data
988 //
989 PoolDescriptor->RunningAllocs = 0;
990 PoolDescriptor->RunningDeAllocs = 0;
991 PoolDescriptor->TotalPages = 0;
992 PoolDescriptor->TotalBytes = 0;
993 PoolDescriptor->TotalBigPages = 0;
994
995 //
996 // Nothing pending for now
997 //
998 PoolDescriptor->PendingFrees = NULL;
999 PoolDescriptor->PendingFreeDepth = 0;
1000
1001 //
1002 // Loop all the descriptor's allocation lists and initialize them
1003 //
1004 NextEntry = PoolDescriptor->ListHeads;
1005 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
1006 while (NextEntry < LastEntry)
1007 {
1008 ExpInitializePoolListHead(NextEntry);
1009 NextEntry++;
1010 }
1011
1012 //
1013 // Note that ReactOS does not support Session Pool Yet
1014 //
1016}
1017
1018CODE_SEG("INIT")
1019VOID
1020NTAPI
1022 IN ULONG Threshold)
1023{
1026 ULONG i;
1027
1028 //
1029 // Check what kind of pool this is
1030 //
1031 if (PoolType == NonPagedPool)
1032 {
1033 //
1034 // Compute the track table size and convert it from a power of two to an
1035 // actual byte size
1036 //
1037 // NOTE: On checked builds, we'll assert if the registry table size was
1038 // invalid, while on retail builds we'll just break out of the loop at
1039 // that point.
1040 //
1042 for (i = 0; i < 32; i++)
1043 {
1044 if (TableSize & 1)
1045 {
1046 ASSERT((TableSize & ~1) == 0);
1047 if (!(TableSize & ~1)) break;
1048 }
1049 TableSize >>= 1;
1050 }
1051
1052 //
1053 // If we hit bit 32, than no size was defined in the registry, so
1054 // we'll use the default size of 2048 entries.
1055 //
1056 // Otherwise, use the size from the registry, as long as it's not
1057 // smaller than 64 entries.
1058 //
1059 if (i == 32)
1060 {
1061 PoolTrackTableSize = 2048;
1062 }
1063 else
1064 {
1065 PoolTrackTableSize = max(1 << i, 64);
1066 }
1067
1068 //
1069 // Loop trying with the biggest specified size first, and cut it down
1070 // by a power of two each iteration in case not enough memory exist
1071 //
1072 while (TRUE)
1073 {
1074 //
1075 // Do not allow overflow
1076 //
1077 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1078 {
1079 PoolTrackTableSize >>= 1;
1080 continue;
1081 }
1082
1083 //
1084 // Allocate the tracker table and exit the loop if this worked
1085 //
1087 (PoolTrackTableSize + 1) *
1088 sizeof(POOL_TRACKER_TABLE));
1089 if (PoolTrackTable) break;
1090
1091 //
1092 // Otherwise, as long as we're not down to the last bit, keep
1093 // iterating
1094 //
1095 if (PoolTrackTableSize == 1)
1096 {
1097 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1098 TableSize,
1099 0xFFFFFFFF,
1100 0xFFFFFFFF,
1101 0xFFFFFFFF);
1102 }
1103 PoolTrackTableSize >>= 1;
1104 }
1105
1106 //
1107 // Add one entry, compute the hash, and zero the table
1108 //
1111
1114
1115 //
1116 // Finally, add the most used tags to speed up those allocations
1117 //
1119
1120 //
1121 // We now do the exact same thing with the tracker table for big pages
1122 //
1124 for (i = 0; i < 32; i++)
1125 {
1126 if (TableSize & 1)
1127 {
1128 ASSERT((TableSize & ~1) == 0);
1129 if (!(TableSize & ~1)) break;
1130 }
1131 TableSize >>= 1;
1132 }
1133
1134 //
1135 // For big pages, the default tracker table is 4096 entries, while the
1136 // minimum is still 64
1137 //
1138 if (i == 32)
1139 {
1140 PoolBigPageTableSize = 4096;
1141 }
1142 else
1143 {
1144 PoolBigPageTableSize = max(1 << i, 64);
1145 }
1146
1147 //
1148 // Again, run the exact same loop we ran earlier, but this time for the
1149 // big pool tracker instead
1150 //
1151 while (TRUE)
1152 {
1154 {
1156 continue;
1157 }
1158
1161 sizeof(POOL_TRACKER_BIG_PAGES));
1162 if (PoolBigPageTable) break;
1163
1164 if (PoolBigPageTableSize == 1)
1165 {
1166 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1167 TableSize,
1168 0xFFFFFFFF,
1169 0xFFFFFFFF,
1170 0xFFFFFFFF);
1171 }
1172
1174 }
1175
1176 //
1177 // An extra entry is not needed for for the big pool tracker, so just
1178 // compute the hash and zero it
1179 //
1183 for (i = 0; i < PoolBigPageTableSize; i++)
1184 {
1186 }
1187
1188 //
1189 // During development, print this out so we can see what's happening
1190 //
1191 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1193 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1195
1196 //
1197 // Insert the generic tracker for all of big pool
1198 //
1199 ExpInsertPoolTracker('looP',
1201 sizeof(POOL_TRACKER_BIG_PAGES)),
1202 NonPagedPool);
1203
1204 //
1205 // No support for NUMA systems at this time
1206 //
1207 ASSERT(KeNumberNodes == 1);
1208
1209 //
1210 // Initialize the tag spinlock
1211 //
1213
1214 //
1215 // Initialize the nonpaged pool descriptor
1216 //
1220 0,
1221 Threshold,
1222 NULL);
1223 }
1224 else
1225 {
1226 //
1227 // No support for NUMA systems at this time
1228 //
1229 ASSERT(KeNumberNodes == 1);
1230
1231 //
1232 // Allocate the pool descriptor
1233 //
1235 sizeof(KGUARDED_MUTEX) +
1236 sizeof(POOL_DESCRIPTOR),
1237 'looP');
1238 if (!Descriptor)
1239 {
1240 //
1241 // This is really bad...
1242 //
1243 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1244 0,
1245 -1,
1246 -1,
1247 -1);
1248 }
1249
1250 //
1251 // Setup the vector and guarded mutex for paged pool
1252 //
1258 PagedPool,
1259 0,
1260 Threshold,
1262
1263 //
1264 // Insert the generic tracker for all of nonpaged pool
1265 //
1266 ExpInsertPoolTracker('looP',
1268 NonPagedPool);
1269 }
1270}
1271
1273KIRQL
1275{
1276 //
1277 // Check if this is nonpaged pool
1278 //
1279 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1280 {
1281 //
1282 // Use the queued spin lock
1283 //
1285 }
1286 else
1287 {
1288 //
1289 // Use the guarded mutex
1290 //
1291 KeAcquireGuardedMutex(Descriptor->LockAddress);
1292 return APC_LEVEL;
1293 }
1294}
1295
1297VOID
1300{
1301 //
1302 // Check if this is nonpaged pool
1303 //
1304 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1305 {
1306 //
1307 // Use the queued spin lock
1308 //
1310 }
1311 else
1312 {
1313 //
1314 // Use the guarded mutex
1315 //
1316 KeReleaseGuardedMutex(Descriptor->LockAddress);
1317 }
1318}
1319
1320VOID
1321NTAPI
1326{
1330
1331 //
1332 // Make sure we win the race, and if we did, copy the data atomically
1333 //
1335 {
1336 RtlCopyMemory(Context->PoolTrackTable,
1338 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1339
1340 //
1341 // This is here because ReactOS does not yet support expansion
1342 //
1343 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1344 }
1345
1346 //
1347 // Regardless of whether we won or not, we must now synchronize and then
1348 // decrement the barrier since this is one more processor that has completed
1349 // the callback.
1350 //
1353}
1354
1356NTAPI
1358 IN ULONG SystemInformationLength,
1360{
1361 ULONG TableSize, CurrentLength;
1362 ULONG EntryCount;
1364 PSYSTEM_POOLTAG TagEntry;
1365 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1368
1369 //
1370 // Keep track of how much data the caller's buffer must hold
1371 //
1372 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1373
1374 //
1375 // Initialize the caller's buffer
1376 //
1377 TagEntry = &SystemInformation->TagInfo[0];
1378 SystemInformation->Count = 0;
1379
1380 //
1381 // Capture the number of entries, and the total size needed to make a copy
1382 // of the table
1383 //
1384 EntryCount = (ULONG)PoolTrackTableSize;
1385 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1386
1387 //
1388 // Allocate the "Generic DPC" temporary buffer
1389 //
1392
1393 //
1394 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1395 //
1396 Context.PoolTrackTable = Buffer;
1397 Context.PoolTrackTableSize = PoolTrackTableSize;
1398 Context.PoolTrackTableExpansion = NULL;
1399 Context.PoolTrackTableSizeExpansion = 0;
1401
1402 //
1403 // Now parse the results
1404 //
1405 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1406 {
1407 //
1408 // If the entry is empty, skip it
1409 //
1410 if (!TrackerEntry->Key) continue;
1411
1412 //
1413 // Otherwise, add one more entry to the caller's buffer, and ensure that
1414 // enough space has been allocated in it
1415 //
1416 SystemInformation->Count++;
1417 CurrentLength += sizeof(*TagEntry);
1418 if (SystemInformationLength < CurrentLength)
1419 {
1420 //
1421 // The caller's buffer is too small, so set a failure code. The
1422 // caller will know the count, as well as how much space is needed.
1423 //
1424 // We do NOT break out of the loop, because we want to keep incrementing
1425 // the Count as well as CurrentLength so that the caller can know the
1426 // final numbers
1427 //
1429 }
1430 else
1431 {
1432 //
1433 // Small sanity check that our accounting is working correctly
1434 //
1435 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1436 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1437
1438 //
1439 // Return the data into the caller's buffer
1440 //
1441 TagEntry->TagUlong = TrackerEntry->Key;
1442 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1443 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1444 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1445 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1446 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1447 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1448 TagEntry++;
1449 }
1450 }
1451
1452 //
1453 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1454 //
1455 ExFreePoolWithTag(Buffer, 'ofnI');
1456 if (ReturnLength) *ReturnLength = CurrentLength;
1457 return Status;
1458}
1459
1461static
1462BOOLEAN
1463ExpReallocateBigPageTable(
1465 _In_ BOOLEAN Shrink)
1466{
1467 SIZE_T OldSize = PoolBigPageTableSize;
1468 SIZE_T NewSize, NewSizeInBytes;
1469 PPOOL_TRACKER_BIG_PAGES NewTable;
1470 PPOOL_TRACKER_BIG_PAGES OldTable;
1471 ULONG i;
1472 ULONG PagesFreed;
1473 ULONG Hash;
1474 ULONG HashMask;
1475
1476 /* Must be holding ExpLargePoolTableLock */
1478
1479 /* Make sure we don't overflow */
1480 if (Shrink)
1481 {
1482 NewSize = OldSize / 2;
1483
1484 /* Make sure we don't shrink too much. */
1486
1488 ASSERT(NewSize <= OldSize);
1489
1490 /* If there is only one page left, then keep it around. Not a failure either. */
1491 if (NewSize == OldSize)
1492 {
1495 return TRUE;
1496 }
1497 }
1498 else
1499 {
1500 if (!NT_SUCCESS(RtlSIZETMult(2, OldSize, &NewSize)))
1501 {
1502 DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
1504 return FALSE;
1505 }
1506
1507 /* Make sure we don't stupidly waste pages */
1509 ASSERT(NewSize > OldSize);
1510 }
1511
1512 if (!NT_SUCCESS(RtlSIZETMult(sizeof(POOL_TRACKER_BIG_PAGES), NewSize, &NewSizeInBytes)))
1513 {
1514 DPRINT1("Overflow while calculating big page table size. Size=%lu\n", OldSize);
1516 return FALSE;
1517 }
1518
1519 NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes);
1520 if (NewTable == NULL)
1521 {
1522 DPRINT("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes);
1524 return FALSE;
1525 }
1526
1527 DPRINT("%s big pool tracker table to %lu entries\n", Shrink ? "Shrinking" : "Expanding", NewSize);
1528
1529 /* Initialize the new table */
1530 RtlZeroMemory(NewTable, NewSizeInBytes);
1531 for (i = 0; i < NewSize; i++)
1532 {
1533 NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1534 }
1535
1536 /* Copy over all items */
1537 OldTable = PoolBigPageTable;
1538 HashMask = NewSize - 1;
1539 for (i = 0; i < OldSize; i++)
1540 {
1541 /* Skip over empty items */
1542 if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE)
1543 {
1544 continue;
1545 }
1546
1547 /* Recalculate the hash due to the new table size */
1548 Hash = ExpComputePartialHashForAddress(OldTable[i].Va) % HashMask;
1549
1550 /* Find the location in the new table */
1551 while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE))
1552 {
1553 if (++Hash == NewSize)
1554 Hash = 0;
1555 }
1556
1557 /* We must have space */
1559
1560 /* Finally, copy the item */
1561 NewTable[Hash] = OldTable[i];
1562 }
1563
1564 /* Activate the new table */
1565 PoolBigPageTable = NewTable;
1568
1569 /* Release the lock, we're done changing global state */
1571
1572 /* Free the old table and update our tracker */
1573 PagesFreed = MiFreePoolPages(OldTable);
1574 ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0);
1575 ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0);
1576
1577 return TRUE;
1578}
1579
1580BOOLEAN
1581NTAPI
1583 IN ULONG Key,
1584 IN ULONG NumberOfPages,
1586{
1587 ULONG Hash, i = 0;
1588 PVOID OldVa;
1589 KIRQL OldIrql;
1591 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1594
1595 //
1596 // As the table is expandable, these values must only be read after acquiring
1597 // the lock to avoid a teared access during an expansion
1598 // NOTE: Windows uses a special reader/writer SpinLock to improve
1599 // performance in the common case (add/remove a tracker entry)
1600 //
1601Retry:
1606
1607 //
1608 // We loop from the current hash bucket to the end of the table, and then
1609 // rollover to hash bucket 0 and keep going from there. If we return back
1610 // to the beginning, then we attempt expansion at the bottom of the loop
1611 //
1612 EntryStart = Entry = &PoolBigPageTable[Hash];
1613 EntryEnd = &PoolBigPageTable[TableSize];
1614 do
1615 {
1616 //
1617 // Make sure that this is a free entry and attempt to atomically make the
1618 // entry busy now
1619 // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock
1620 //
1621 OldVa = Entry->Va;
1622 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1623 (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa)))
1624 {
1625 //
1626 // We now own this entry, write down the size and the pool tag
1627 //
1628 Entry->Key = Key;
1629 Entry->NumberOfPages = NumberOfPages;
1630
1631 //
1632 // Add one more entry to the count, and see if we're getting within
1633 // 75% of the table size, at which point we'll do an expansion now
1634 // to avoid blocking too hard later on.
1635 //
1636 // Note that we only do this if it's also been the 16th time that we
1637 // keep losing the race or that we are not finding a free entry anymore,
1638 // which implies a massive number of concurrent big pool allocations.
1639 //
1642 {
1643 DPRINT("Attempting expansion since we now have %lu entries\n",
1646 ExpReallocateBigPageTable(OldIrql, FALSE);
1647 return TRUE;
1648 }
1649
1650 //
1651 // We have our entry, return
1652 //
1654 return TRUE;
1655 }
1656
1657 //
1658 // We don't have our entry yet, so keep trying, making the entry list
1659 // circular if we reach the last entry. We'll eventually break out of
1660 // the loop once we've rolled over and returned back to our original
1661 // hash bucket
1662 //
1663 i++;
1664 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1665 } while (Entry != EntryStart);
1666
1667 //
1668 // This means there's no free hash buckets whatsoever, so we now have
1669 // to attempt expanding the table
1670 //
1672 if (ExpReallocateBigPageTable(OldIrql, FALSE))
1673 {
1674 goto Retry;
1675 }
1677 DPRINT1("Big pool table expansion failed\n");
1678 return FALSE;
1679}
1680
1681ULONG
1682NTAPI
1684 OUT PULONG_PTR BigPages,
1686{
1689 KIRQL OldIrql;
1694
1695 //
1696 // As the table is expandable, these values must only be read after acquiring
1697 // the lock to avoid a teared access during an expansion
1698 //
1703
1704 //
1705 // Loop while trying to find this big page allocation
1706 //
1707 while (PoolBigPageTable[Hash].Va != Va)
1708 {
1709 //
1710 // Increment the size until we go past the end of the table
1711 //
1712 if (++Hash >= TableSize)
1713 {
1714 //
1715 // Is this the second time we've tried?
1716 //
1717 if (!FirstTry)
1718 {
1719 //
1720 // This means it was never inserted into the pool table and it
1721 // received the special "BIG" tag -- return that and return 0
1722 // so that the code can ask Mm for the page count instead
1723 //
1725 *BigPages = 0;
1726 return ' GIB';
1727 }
1728
1729 //
1730 // The first time this happens, reset the hash index and try again
1731 //
1732 Hash = 0;
1733 FirstTry = FALSE;
1734 }
1735 }
1736
1737 //
1738 // Now capture all the information we need from the entry, since after we
1739 // release the lock, the data can change
1740 //
1742 *BigPages = Entry->NumberOfPages;
1743 PoolTag = Entry->Key;
1744
1745 //
1746 // Set the free bit, and decrement the number of allocations. Finally, release
1747 // the lock and return the tag that was located
1748 //
1750
1752
1753 /* If reaching 12.5% of the size (or whatever integer rounding gets us to),
1754 * halve the allocation size, which will get us to 25% of space used. */
1756 {
1757 /* Shrink the table. */
1758 ExpReallocateBigPageTable(OldIrql, TRUE);
1759 }
1760 else
1761 {
1763 }
1764 return PoolTag;
1765}
1766
1767VOID
1768NTAPI
1770 OUT PULONG NonPagedPoolPages,
1771 OUT PULONG PagedPoolAllocs,
1772 OUT PULONG PagedPoolFrees,
1773 OUT PULONG PagedPoolLookasideHits,
1774 OUT PULONG NonPagedPoolAllocs,
1775 OUT PULONG NonPagedPoolFrees,
1776 OUT PULONG NonPagedPoolLookasideHits)
1777{
1778 ULONG i;
1779 PPOOL_DESCRIPTOR PoolDesc;
1780
1781 //
1782 // Assume all failures
1783 //
1784 *PagedPoolPages = 0;
1785 *PagedPoolAllocs = 0;
1786 *PagedPoolFrees = 0;
1787
1788 //
1789 // Tally up the totals for all the apged pool
1790 //
1791 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1792 {
1793 PoolDesc = ExpPagedPoolDescriptor[i];
1794 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1795 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1796 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1797 }
1798
1799 //
1800 // The first non-paged pool has a hardcoded well-known descriptor name
1801 //
1802 PoolDesc = &NonPagedPoolDescriptor;
1803 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1804 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1805 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1806
1807 //
1808 // If the system has more than one non-paged pool, copy the other descriptor
1809 // totals as well
1810 //
1811#if 0
1812 if (ExpNumberOfNonPagedPools > 1)
1813 {
1814 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1815 {
1816 PoolDesc = ExpNonPagedPoolDescriptor[i];
1817 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1818 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1819 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1820 }
1821 }
1822#endif
1823
1824 //
1825 // Get the amount of hits in the system lookaside lists
1826 //
1828 {
1829 PLIST_ENTRY ListEntry;
1830
1831 for (ListEntry = ExPoolLookasideListHead.Flink;
1832 ListEntry != &ExPoolLookasideListHead;
1833 ListEntry = ListEntry->Flink)
1834 {
1836
1837 Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry);
1838
1839 if (Lookaside->Type == NonPagedPool)
1840 {
1841 *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
1842 }
1843 else
1844 {
1845 *PagedPoolLookasideHits += Lookaside->AllocateHits;
1846 }
1847 }
1848 }
1849}
1850
1851VOID
1852NTAPI
1854{
1857 USHORT BlockSize;
1859
1862 {
1863 return;
1864 }
1865
1866 Entry = P;
1867 Entry--;
1869
1870 PoolType = Entry->PoolType - 1;
1871 BlockSize = Entry->BlockSize;
1872
1874 {
1875 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1876 ASSERT(Process != NULL);
1877 if (Process)
1878 {
1879 if (Process->Pcb.Header.Type != ProcessObject)
1880 {
1881 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1882 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1883 KeBugCheckEx(BAD_POOL_CALLER,
1885 (ULONG_PTR)P,
1886 Entry->PoolTag,
1888 }
1889 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1892 BlockSize * POOL_BLOCK_SIZE);
1894 }
1895 }
1896}
1897
1898/* PUBLIC FUNCTIONS ***********************************************************/
1899
1900/*
1901 * @implemented
1902 */
1903PVOID
1904NTAPI
1907 IN ULONG Tag)
1908{
1909 PPOOL_DESCRIPTOR PoolDesc;
1910 PLIST_ENTRY ListHead;
1911 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1912 KIRQL OldIrql;
1913 USHORT BlockSize, i;
1914 ULONG OriginalType;
1915 PKPRCB Prcb = KeGetCurrentPrcb();
1917
1918 //
1919 // Some sanity checks
1920 //
1921 ASSERT(Tag != 0);
1922 ASSERT(Tag != ' GIB');
1923 ASSERT(NumberOfBytes != 0);
1925
1926 //
1927 // Not supported in ReactOS
1928 //
1930
1931 //
1932 // Check if verifier or special pool is enabled
1933 //
1935 {
1936 //
1937 // For verifier, we should call the verification routine
1938 //
1940 {
1941 DPRINT1("Driver Verifier is not yet supported\n");
1942 }
1943
1944 //
1945 // For special pool, we check if this is a suitable allocation and do
1946 // the special allocation if needed
1947 //
1949 {
1950 //
1951 // Check if this is a special pool allocation
1952 //
1954 {
1955 //
1956 // Try to allocate using special pool
1957 //
1959 if (Entry) return Entry;
1960 }
1961 }
1962 }
1963
1964 //
1965 // Get the pool type and its corresponding vector for this request
1966 //
1967 OriginalType = PoolType;
1969 PoolDesc = PoolVector[PoolType];
1970 ASSERT(PoolDesc != NULL);
1971
1972 //
1973 // Check if this is a big page allocation
1974 //
1976 {
1977 //
1978 // Allocate pages for it
1979 //
1980 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1981 if (!Entry)
1982 {
1983#if DBG
1984 //
1985 // Out of memory, display current consumption
1986 // Let's consider that if the caller wanted more
1987 // than a hundred pages, that's a bogus caller
1988 // and we are not out of memory. Dump at most
1989 // once a second to avoid spamming the log.
1990 //
1991 if (NumberOfBytes < 100 * PAGE_SIZE &&
1993 {
1994 MiDumpPoolConsumers(FALSE, 0, 0, 0);
1996 }
1997#endif
1998
1999 //
2000 // Must succeed pool is deprecated, but still supported. These allocation
2001 // failures must cause an immediate bugcheck
2002 //
2003 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2004 {
2005 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2009 0);
2010 }
2011
2012 //
2013 // Internal debugging
2014 //
2016
2017 //
2018 // This flag requests printing failures, and can also further specify
2019 // breaking on failures
2020 //
2022 {
2023 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2025 OriginalType);
2027 }
2028
2029 //
2030 // Finally, this flag requests an exception, which we are more than
2031 // happy to raise!
2032 //
2033 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2034 {
2036 }
2037
2038 return NULL;
2039 }
2040
2041 //
2042 // Increment required counters
2043 //
2048
2049 //
2050 // Add a tag for the big page allocation and switch to the generic "BIG"
2051 // tag if we failed to do so, then insert a tracker for this alloation.
2052 //
2054 Tag,
2056 OriginalType))
2057 {
2058 Tag = ' GIB';
2059 }
2061 return Entry;
2062 }
2063
2064 //
2065 // Should never request 0 bytes from the pool, but since so many drivers do
2066 // it, we'll just assume they want 1 byte, based on NT's similar behavior
2067 //
2068 if (!NumberOfBytes) NumberOfBytes = 1;
2069
2070 //
2071 // A pool allocation is defined by its data, a linked list to connect it to
2072 // the free list (if necessary), and a pool header to store accounting info.
2073 // Calculate this size, then convert it into a block size (units of pool
2074 // headers)
2075 //
2076 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
2077 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
2078 // the direct allocation of pages.
2079 //
2080 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
2081 / POOL_BLOCK_SIZE);
2083
2084 //
2085 // Handle lookaside list optimization for both paged and nonpaged pool
2086 //
2088 {
2089 //
2090 // Try popping it from the per-CPU lookaside list
2091 //
2093 Prcb->PPPagedLookasideList[i - 1].P :
2094 Prcb->PPNPagedLookasideList[i - 1].P;
2095 LookasideList->TotalAllocates++;
2097 if (!Entry)
2098 {
2099 //
2100 // We failed, try popping it from the global list
2101 //
2103 Prcb->PPPagedLookasideList[i - 1].L :
2104 Prcb->PPNPagedLookasideList[i - 1].L;
2105 LookasideList->TotalAllocates++;
2107 }
2108
2109 //
2110 // If we were able to pop it, update the accounting and return the block
2111 //
2112 if (Entry)
2113 {
2114 LookasideList->AllocateHits++;
2115
2116 //
2117 // Get the real entry, write down its pool type, and track it
2118 //
2119 Entry--;
2120 Entry->PoolType = OriginalType + 1;
2122 Entry->BlockSize * POOL_BLOCK_SIZE,
2123 OriginalType);
2124
2125 //
2126 // Return the pool allocation
2127 //
2128 Entry->PoolTag = Tag;
2129 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2130 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2131 return POOL_FREE_BLOCK(Entry);
2132 }
2133 }
2134
2135 //
2136 // Loop in the free lists looking for a block if this size. Start with the
2137 // list optimized for this kind of size lookup
2138 //
2139 ListHead = &PoolDesc->ListHeads[i];
2140 do
2141 {
2142 //
2143 // Are there any free entries available on this list?
2144 //
2145 if (!ExpIsPoolListEmpty(ListHead))
2146 {
2147 //
2148 // Acquire the pool lock now
2149 //
2150 OldIrql = ExLockPool(PoolDesc);
2151
2152 //
2153 // And make sure the list still has entries
2154 //
2155 if (ExpIsPoolListEmpty(ListHead))
2156 {
2157 //
2158 // Someone raced us (and won) before we had a chance to acquire
2159 // the lock.
2160 //
2161 // Try again!
2162 //
2163 ExUnlockPool(PoolDesc, OldIrql);
2164 continue;
2165 }
2166
2167 //
2168 // Remove a free entry from the list
2169 // Note that due to the way we insert free blocks into multiple lists
2170 // there is a guarantee that any block on this list will either be
2171 // of the correct size, or perhaps larger.
2172 //
2173 ExpCheckPoolLinks(ListHead);
2175 ExpCheckPoolLinks(ListHead);
2177 ASSERT(Entry->BlockSize >= i);
2178 ASSERT(Entry->PoolType == 0);
2179
2180 //
2181 // Check if this block is larger that what we need. The block could
2182 // not possibly be smaller, due to the reason explained above (and
2183 // we would've asserted on a checked build if this was the case).
2184 //
2185 if (Entry->BlockSize != i)
2186 {
2187 //
2188 // Is there an entry before this one?
2189 //
2190 if (Entry->PreviousSize == 0)
2191 {
2192 //
2193 // There isn't anyone before us, so take the next block and
2194 // turn it into a fragment that contains the leftover data
2195 // that we don't need to satisfy the caller's request
2196 //
2197 FragmentEntry = POOL_BLOCK(Entry, i);
2198 FragmentEntry->BlockSize = Entry->BlockSize - i;
2199
2200 //
2201 // And make it point back to us
2202 //
2203 FragmentEntry->PreviousSize = i;
2204
2205 //
2206 // Now get the block that follows the new fragment and check
2207 // if it's still on the same page as us (and not at the end)
2208 //
2209 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2210 if (PAGE_ALIGN(NextEntry) != NextEntry)
2211 {
2212 //
2213 // Adjust this next block to point to our newly created
2214 // fragment block
2215 //
2216 NextEntry->PreviousSize = FragmentEntry->BlockSize;
2217 }
2218 }
2219 else
2220 {
2221 //
2222 // There is a free entry before us, which we know is smaller
2223 // so we'll make this entry the fragment instead
2224 //
2225 FragmentEntry = Entry;
2226
2227 //
2228 // And then we'll remove from it the actual size required.
2229 // Now the entry is a leftover free fragment
2230 //
2231 Entry->BlockSize -= i;
2232
2233 //
2234 // Now let's go to the next entry after the fragment (which
2235 // used to point to our original free entry) and make it
2236 // reference the new fragment entry instead.
2237 //
2238 // This is the entry that will actually end up holding the
2239 // allocation!
2240 //
2242 Entry->PreviousSize = FragmentEntry->BlockSize;
2243
2244 //
2245 // And now let's go to the entry after that one and check if
2246 // it's still on the same page, and not at the end
2247 //
2248 NextEntry = POOL_BLOCK(Entry, i);
2249 if (PAGE_ALIGN(NextEntry) != NextEntry)
2250 {
2251 //
2252 // Make it reference the allocation entry
2253 //
2254 NextEntry->PreviousSize = i;
2255 }
2256 }
2257
2258 //
2259 // Now our (allocation) entry is the right size
2260 //
2261 Entry->BlockSize = i;
2262
2263 //
2264 // And the next entry is now the free fragment which contains
2265 // the remaining difference between how big the original entry
2266 // was, and the actual size the caller needs/requested.
2267 //
2268 FragmentEntry->PoolType = 0;
2269 BlockSize = FragmentEntry->BlockSize;
2270
2271 //
2272 // Now check if enough free bytes remained for us to have a
2273 // "full" entry, which contains enough bytes for a linked list
2274 // and thus can be used for allocations (up to 8 bytes...)
2275 //
2276 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2277 if (BlockSize != 1)
2278 {
2279 //
2280 // Insert the free entry into the free list for this size
2281 //
2282 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2283 POOL_FREE_BLOCK(FragmentEntry));
2284 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2285 }
2286 }
2287
2288 //
2289 // We have found an entry for this allocation, so set the pool type
2290 // and release the lock since we're done
2291 //
2292 Entry->PoolType = OriginalType + 1;
2294 ExUnlockPool(PoolDesc, OldIrql);
2295
2296 //
2297 // Increment required counters
2298 //
2301
2302 //
2303 // Track this allocation
2304 //
2306 Entry->BlockSize * POOL_BLOCK_SIZE,
2307 OriginalType);
2308
2309 //
2310 // Return the pool allocation
2311 //
2312 Entry->PoolTag = Tag;
2313 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2314 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2315 return POOL_FREE_BLOCK(Entry);
2316 }
2317 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2318
2319 //
2320 // There were no free entries left, so we have to allocate a new fresh page
2321 //
2322 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2323 if (!Entry)
2324 {
2325#if DBG
2326 //
2327 // Out of memory, display current consumption
2328 // Let's consider that if the caller wanted more
2329 // than a hundred pages, that's a bogus caller
2330 // and we are not out of memory. Dump at most
2331 // once a second to avoid spamming the log.
2332 //
2333 if (NumberOfBytes < 100 * PAGE_SIZE &&
2335 {
2336 MiDumpPoolConsumers(FALSE, 0, 0, 0);
2338 }
2339#endif
2340
2341 //
2342 // Must succeed pool is deprecated, but still supported. These allocation
2343 // failures must cause an immediate bugcheck
2344 //
2345 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2346 {
2347 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2348 PAGE_SIZE,
2351 0);
2352 }
2353
2354 //
2355 // Internal debugging
2356 //
2358
2359 //
2360 // This flag requests printing failures, and can also further specify
2361 // breaking on failures
2362 //
2364 {
2365 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2367 OriginalType);
2369 }
2370
2371 //
2372 // Finally, this flag requests an exception, which we are more than
2373 // happy to raise!
2374 //
2375 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2376 {
2378 }
2379
2380 //
2381 // Return NULL to the caller in all other cases
2382 //
2383 return NULL;
2384 }
2385
2386 //
2387 // Setup the entry data
2388 //
2389 Entry->Ulong1 = 0;
2390 Entry->BlockSize = i;
2391 Entry->PoolType = OriginalType + 1;
2392
2393 //
2394 // This page will have two entries -- one for the allocation (which we just
2395 // created above), and one for the remaining free bytes, which we're about
2396 // to create now. The free bytes are the whole page minus what was allocated
2397 // and then converted into units of block headers.
2398 //
2399 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2400 FragmentEntry = POOL_BLOCK(Entry, i);
2401 FragmentEntry->Ulong1 = 0;
2402 FragmentEntry->BlockSize = BlockSize;
2403 FragmentEntry->PreviousSize = i;
2404
2405 //
2406 // Increment required counters
2407 //
2410
2411 //
2412 // Now check if enough free bytes remained for us to have a "full" entry,
2413 // which contains enough bytes for a linked list and thus can be used for
2414 // allocations (up to 8 bytes...)
2415 //
2416 if (FragmentEntry->BlockSize != 1)
2417 {
2418 //
2419 // Excellent -- acquire the pool lock
2420 //
2421 OldIrql = ExLockPool(PoolDesc);
2422
2423 //
2424 // And insert the free entry into the free list for this block size
2425 //
2426 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2427 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2428 POOL_FREE_BLOCK(FragmentEntry));
2429 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2430
2431 //
2432 // Release the pool lock
2433 //
2435 ExUnlockPool(PoolDesc, OldIrql);
2436 }
2437 else
2438 {
2439 //
2440 // Simply do a sanity check
2441 //
2443 }
2444
2445 //
2446 // Increment performance counters and track this allocation
2447 //
2450 Entry->BlockSize * POOL_BLOCK_SIZE,
2451 OriginalType);
2452
2453 //
2454 // And return the pool allocation
2455 //
2457 Entry->PoolTag = Tag;
2458 return POOL_FREE_BLOCK(Entry);
2459}
2460
2461/*
2462 * @implemented
2463 */
2464PVOID
2465NTAPI
2468{
2469 ULONG Tag = TAG_NONE;
2470#if 0 && DBG
2471 PLDR_DATA_TABLE_ENTRY LdrEntry;
2472
2473 /* Use the first four letters of the driver name, or "None" if unavailable */
2474 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2476 : NULL;
2477 if (LdrEntry)
2478 {
2479 ULONG i;
2480 Tag = 0;
2481 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2482 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2483 for (; i < 4; i++)
2484 Tag = Tag >> 8 | ' ' << 24;
2485 }
2486#endif
2488}
2489
2490/*
2491 * @implemented
2492 */
2493VOID
2494NTAPI
2496 IN ULONG TagToFree)
2497{
2498 PPOOL_HEADER Entry, NextEntry;
2499 USHORT BlockSize;
2500 KIRQL OldIrql;
2502 PPOOL_DESCRIPTOR PoolDesc;
2503 ULONG Tag;
2504 BOOLEAN Combined = FALSE;
2505 PFN_NUMBER PageCount, RealPageCount;
2506 PKPRCB Prcb = KeGetCurrentPrcb();
2509
2510 //
2511 // Check if any of the debug flags are enabled
2512 //
2519 {
2520 //
2521 // Check if special pool is enabled
2522 //
2524 {
2525 //
2526 // Check if it was allocated from a special pool
2527 //
2529 {
2530 //
2531 // Was deadlock verification also enabled? We can do some extra
2532 // checks at this point
2533 //
2535 {
2536 DPRINT1("Verifier not yet supported\n");
2537 }
2538
2539 //
2540 // It is, so handle it via special pool free routine
2541 //
2543 return;
2544 }
2545 }
2546
2547 //
2548 // For non-big page allocations, we'll do a bunch of checks in here
2549 //
2550 if (PAGE_ALIGN(P) != P)
2551 {
2552 //
2553 // Get the entry for this pool allocation
2554 // The pointer math here may look wrong or confusing, but it is quite right
2555 //
2556 Entry = P;
2557 Entry--;
2558
2559 //
2560 // Get the pool type
2561 //
2562 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2563
2564 //
2565 // FIXME: Many other debugging checks go here
2566 //
2568 }
2569 }
2570
2571 //
2572 // Check if this is a big page allocation
2573 //
2574 if (PAGE_ALIGN(P) == P)
2575 {
2576 //
2577 // We need to find the tag for it, so first we need to find out what
2578 // kind of allocation this was (paged or nonpaged), then we can go
2579 // ahead and try finding the tag for it. Remember to get rid of the
2580 // PROTECTED_POOL tag if it's found.
2581 //
2582 // Note that if at insertion time, we failed to add the tag for a big
2583 // pool allocation, we used a special tag called 'BIG' to identify the
2584 // allocation, and we may get this tag back. In this scenario, we must
2585 // manually get the size of the allocation by actually counting through
2586 // the PFN database.
2587 //
2590 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2591 if (!Tag)
2592 {
2593 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2594 ASSERT(Tag == ' GIB');
2595 PageCount = 1; // We are going to lie! This might screw up accounting?
2596 }
2597 else if (Tag & PROTECTED_POOL)
2598 {
2599 Tag &= ~PROTECTED_POOL;
2600 }
2601
2602 //
2603 // Check block tag
2604 //
2605 if (TagToFree && TagToFree != Tag)
2606 {
2607 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2608#if DBG
2609 /* Do not bugcheck in case this is a big allocation for which we didn't manage to insert the tag */
2610 if (Tag != ' GIB')
2611 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2612#endif
2613 }
2614
2615 //
2616 // We have our tag and our page count, so we can go ahead and remove this
2617 // tracker now
2618 //
2620
2621 //
2622 // Check if any of the debug flags are enabled
2623 //
2628 {
2629 //
2630 // Was deadlock verification also enabled? We can do some extra
2631 // checks at this point
2632 //
2634 {
2635 DPRINT1("Verifier not yet supported\n");
2636 }
2637
2638 //
2639 // FIXME: Many debugging checks go here
2640 //
2641 }
2642
2643 //
2644 // Update counters
2645 //
2646 PoolDesc = PoolVector[PoolType];
2649 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2650
2651 //
2652 // Do the real free now and update the last counter with the big page count
2653 //
2654 RealPageCount = MiFreePoolPages(P);
2655 ASSERT(RealPageCount == PageCount);
2657 -(LONG)RealPageCount);
2658 return;
2659 }
2660
2661 //
2662 // Get the entry for this pool allocation
2663 // The pointer math here may look wrong or confusing, but it is quite right
2664 //
2665 Entry = P;
2666 Entry--;
2668
2669 //
2670 // Get the size of the entry, and it's pool type, then load the descriptor
2671 // for this pool type
2672 //
2673 BlockSize = Entry->BlockSize;
2674 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2675 PoolDesc = PoolVector[PoolType];
2676
2677 //
2678 // Make sure that the IRQL makes sense
2679 //
2681
2682 //
2683 // Get the pool tag and get rid of the PROTECTED_POOL flag
2684 //
2685 Tag = Entry->PoolTag;
2686 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2687
2688 //
2689 // Check block tag
2690 //
2691 if (TagToFree && TagToFree != Tag)
2692 {
2693 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2694#if DBG
2695 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2696#endif
2697 }
2698
2699 //
2700 // Track the removal of this allocation
2701 //
2703 BlockSize * POOL_BLOCK_SIZE,
2704 Entry->PoolType - 1);
2705
2706 //
2707 // Release pool quota, if any
2708 //
2709 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2710 {
2711 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2712 if (Process)
2713 {
2714 if (Process->Pcb.Header.Type != ProcessObject)
2715 {
2716 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2717 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2718 KeBugCheckEx(BAD_POOL_CALLER,
2720 (ULONG_PTR)P,
2721 Tag,
2723 }
2726 }
2727 }
2728
2729 //
2730 // Is this allocation small enough to have come from a lookaside list?
2731 //
2732 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2733 {
2734 //
2735 // Try pushing it into the per-CPU lookaside list
2736 //
2738 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2739 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2740 LookasideList->TotalFrees++;
2741 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2742 {
2743 LookasideList->FreeHits++;
2745 return;
2746 }
2747
2748 //
2749 // We failed, try to push it into the global lookaside list
2750 //
2752 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2753 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2754 LookasideList->TotalFrees++;
2755 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2756 {
2757 LookasideList->FreeHits++;
2759 return;
2760 }
2761 }
2762
2763 //
2764 // Get the pointer to the next entry
2765 //
2766 NextEntry = POOL_BLOCK(Entry, BlockSize);
2767
2768 //
2769 // Update performance counters
2770 //
2772 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2773
2774 //
2775 // Acquire the pool lock
2776 //
2777 OldIrql = ExLockPool(PoolDesc);
2778
2779 //
2780 // Check if the next allocation is at the end of the page
2781 //
2783 if (PAGE_ALIGN(NextEntry) != NextEntry)
2784 {
2785 //
2786 // We may be able to combine the block if it's free
2787 //
2788 if (NextEntry->PoolType == 0)
2789 {
2790 //
2791 // The next block is free, so we'll do a combine
2792 //
2793 Combined = TRUE;
2794
2795 //
2796 // Make sure there's actual data in the block -- anything smaller
2797 // than this means we only have the header, so there's no linked list
2798 // for us to remove
2799 //
2800 if ((NextEntry->BlockSize != 1))
2801 {
2802 //
2803 // The block is at least big enough to have a linked list, so go
2804 // ahead and remove it
2805 //
2810 }
2811
2812 //
2813 // Our entry is now combined with the next entry
2814 //
2815 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2816 }
2817 }
2818
2819 //
2820 // Now check if there was a previous entry on the same page as us
2821 //
2822 if (Entry->PreviousSize)
2823 {
2824 //
2825 // Great, grab that entry and check if it's free
2826 //
2827 NextEntry = POOL_PREV_BLOCK(Entry);
2828 if (NextEntry->PoolType == 0)
2829 {
2830 //
2831 // It is, so we can do a combine
2832 //
2833 Combined = TRUE;
2834
2835 //
2836 // Make sure there's actual data in the block -- anything smaller
2837 // than this means we only have the header so there's no linked list
2838 // for us to remove
2839 //
2840 if ((NextEntry->BlockSize != 1))
2841 {
2842 //
2843 // The block is at least big enough to have a linked list, so go
2844 // ahead and remove it
2845 //
2850 }
2851
2852 //
2853 // Combine our original block (which might've already been combined
2854 // with the next block), into the previous block
2855 //
2856 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2857
2858 //
2859 // And now we'll work with the previous block instead
2860 //
2861 Entry = NextEntry;
2862 }
2863 }
2864
2865 //
2866 // By now, it may have been possible for our combined blocks to actually
2867 // have made up a full page (if there were only 2-3 allocations on the
2868 // page, they could've all been combined).
2869 //
2870 if ((PAGE_ALIGN(Entry) == Entry) &&
2872 {
2873 //
2874 // In this case, release the pool lock, update the performance counter,
2875 // and free the page
2876 //
2877 ExUnlockPool(PoolDesc, OldIrql);
2878 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2880 return;
2881 }
2882
2883 //
2884 // Otherwise, we now have a free block (or a combination of 2 or 3)
2885 //
2886 Entry->PoolType = 0;
2887 BlockSize = Entry->BlockSize;
2888 ASSERT(BlockSize != 1);
2889
2890 //
2891 // Check if we actually did combine it with anyone
2892 //
2893 if (Combined)
2894 {
2895 //
2896 // Get the first combined block (either our original to begin with, or
2897 // the one after the original, depending if we combined with the previous)
2898 //
2899 NextEntry = POOL_NEXT_BLOCK(Entry);
2900
2901 //
2902 // As long as the next block isn't on a page boundary, have it point
2903 // back to us
2904 //
2905 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2906 }
2907
2908 //
2909 // Insert this new free block, and release the pool lock
2910 //
2911 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2913 ExUnlockPool(PoolDesc, OldIrql);
2914}
2915
2916/*
2917 * @implemented
2918 */
2919VOID
2920NTAPI
2922{
2923 //
2924 // Just free without checking for the tag
2925 //
2926 ExFreePoolWithTag(P, 0);
2927}
2928
2929/*
2930 * @unimplemented
2931 */
2932SIZE_T
2933NTAPI
2936{
2937 //
2938 // Not implemented
2939 //
2941 return FALSE;
2942}
2943
2944/*
2945 * @implemented
2946 */
2947
2948PVOID
2949NTAPI
2952{
2953 //
2954 // Allocate the pool
2955 //
2957}
2958
2959/*
2960 * @implemented
2961 */
2962PVOID
2963NTAPI
2966 IN ULONG Tag,
2968{
2969 PVOID Buffer;
2970
2971 //
2972 // Allocate the pool
2973 //
2975 if (Buffer == NULL)
2976 {
2978 }
2979
2980 return Buffer;
2981}
2982
2983/*
2984 * @implemented
2985 */
2986PVOID
2987NTAPI
2990 IN ULONG Tag)
2991{
2992 BOOLEAN Raise = TRUE;
2993 PVOID Buffer;
2997
2998 //
2999 // Check if we should fail instead of raising an exception
3000 //
3002 {
3003 Raise = FALSE;
3004 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
3005 }
3006
3007 //
3008 // Inject the pool quota mask
3009 //
3011
3012 //
3013 // Check if we have enough space to add the quota owner process, as long as
3014 // this isn't the system process, which never gets charged quota
3015 //
3016 ASSERT(NumberOfBytes != 0);
3017 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
3019 {
3020 //
3021 // Add space for our EPROCESS pointer
3022 //
3023 NumberOfBytes += sizeof(PEPROCESS);
3024 }
3025 else
3026 {
3027 //
3028 // We won't be able to store the pointer, so don't use quota for this
3029 //
3031 }
3032
3033 //
3034 // Allocate the pool buffer now
3035 //
3037
3038 //
3039 // If the buffer is page-aligned, this is a large page allocation and we
3040 // won't touch it
3041 //
3042 if (PAGE_ALIGN(Buffer) != Buffer)
3043 {
3044 //
3045 // Also if special pool is enabled, and this was allocated from there,
3046 // we won't touch it either
3047 //
3050 {
3051 return Buffer;
3052 }
3053
3054 //
3055 // If it wasn't actually allocated with quota charges, ignore it too
3056 //
3057 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
3058
3059 //
3060 // If this is the system process, we don't charge quota, so ignore
3061 //
3062 if (Process == PsInitialSystemProcess) return Buffer;
3063
3064 //
3065 // Actually go and charge quota for the process now
3066 //
3070 Entry->BlockSize * POOL_BLOCK_SIZE);
3071 if (!NT_SUCCESS(Status))
3072 {
3073 //
3074 // Quota failed, back out the allocation, clear the owner, and fail
3075 //
3076 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
3078 if (Raise) RtlRaiseStatus(Status);
3079 return NULL;
3080 }
3081
3082 //
3083 // Quota worked, write the owner and then reference it before returning
3084 //
3085 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
3087 }
3088 else if (!(Buffer) && (Raise))
3089 {
3090 //
3091 // The allocation failed, raise an error if we are in raise mode
3092 //
3094 }
3095
3096 //
3097 // Return the allocated buffer
3098 //
3099 return Buffer;
3100}
3101
3102/* EOF */
#define BASE_POOL_TYPE_MASK
Definition: ExPools.c:15
#define QUOTA_POOL_MASK
Definition: ExPools.c:16
#define ALIGN_DOWN_BY(size, align)
#define ALIGN_UP_BY(size, align)
unsigned char BOOLEAN
#define InterlockedIncrement
Definition: armddk.h:53
LONG NTSTATUS
Definition: precomp.h:26
#define DPRINT1
Definition: precomp.h:8
BOOL Verbose
Definition: chkdsk.c:72
LONG_PTR SSIZE_T
Definition: basetsd.h:183
#define MAXULONG_PTR
Definition: basetsd.h:103
#define UNIMPLEMENTED
Definition: debug.h:115
Definition: bufpool.h:45
_In_ PSCSI_REQUEST_BLOCK _Out_ NTSTATUS _Inout_ BOOLEAN * Retry
Definition: classpnp.h:312
#define NULL
Definition: types.h:112
#define TRUE
Definition: types.h:120
#define FALSE
Definition: types.h:117
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:32
#define P(row, col)
static int Hash(const char *)
Definition: reader.c:2257
VOID NTAPI KeGenericCallDpc(IN PKDEFERRED_ROUTINE Routine, IN PVOID Context)
Definition: dpc.c:984
BOOLEAN NTAPI KeSignalCallDpcSynchronize(IN PVOID SystemArgument2)
Definition: dpc.c:1026
VOID NTAPI KeSignalCallDpcDone(IN PVOID SystemArgument1)
Definition: dpc.c:1013
#define ULONG_PTR
Definition: config.h:101
#define _IRQL_restores_
Definition: driverspecs.h:233
#define _IRQL_requires_(irql)
Definition: driverspecs.h:229
IN CINT OUT PVOID IN ULONG OUT PULONG ReturnLength
Definition: dumpinfo.c:43
#define ExAllocatePoolWithTag(hernya, size, tag)
Definition: env_spec_w32.h:350
#define IsListEmpty(ListHead)
Definition: env_spec_w32.h:954
#define PASSIVE_LEVEL
Definition: env_spec_w32.h:693
UCHAR KIRQL
Definition: env_spec_w32.h:591
ULONG KSPIN_LOCK
Definition: env_spec_w32.h:72
#define KeReleaseSpinLock(sl, irql)
Definition: env_spec_w32.h:627
#define APC_LEVEL
Definition: env_spec_w32.h:695
#define PAGE_SIZE
Definition: env_spec_w32.h:49
#define PAGE_SHIFT
Definition: env_spec_w32.h:45
#define KeAcquireSpinLock(sl, irql)
Definition: env_spec_w32.h:609
#define KeGetCurrentIrql()
Definition: env_spec_w32.h:706
#define ExFreePool(addr)
Definition: env_spec_w32.h:352
#define NonPagedPool
Definition: env_spec_w32.h:307
#define DISPATCH_LEVEL
Definition: env_spec_w32.h:696
#define KeInitializeSpinLock(sl)
Definition: env_spec_w32.h:604
#define PagedPool
Definition: env_spec_w32.h:308
ULONG ExPoolFailures
Definition: expool.c:57
ULONG ExpPoolBigEntriesInUse
Definition: expool.c:55
#define POOL_BLOCK(x, i)
Definition: expool.c:63
VOID NTAPI ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
Definition: expool.c:114
VOID NTAPI ExpCheckPoolAllocation(PVOID P, POOL_TYPE PoolType, ULONG Tag)
Definition: expool.c:296
ULONG ExpBigTableExpansionFailed
Definition: expool.c:48
#define POOL_FREE_BLOCK(x)
Definition: expool.c:62
VOID NTAPI ExpCheckPoolBlocks(IN PVOID Block)
Definition: expool.c:376
FORCEINLINE ULONG ExpComputeHashForTag(IN ULONG Tag, IN SIZE_T BucketMask)
Definition: expool.c:441
BOOLEAN ExStopBadTags
Definition: expool.c:53
VOID NTAPI ExQueryPoolUsage(OUT PULONG PagedPoolPages, OUT PULONG NonPagedPoolPages, OUT PULONG PagedPoolAllocs, OUT PULONG PagedPoolFrees, OUT PULONG PagedPoolLookasideHits, OUT PULONG NonPagedPoolAllocs, OUT PULONG NonPagedPoolFrees, OUT PULONG NonPagedPoolLookasideHits)
Definition: expool.c:1769
#define POOL_BIG_TABLE_USE_RATE
Definition: expool.c:31
VOID NTAPI ExpRemovePoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:760
VOID NTAPI ExReturnPoolQuota(IN PVOID P)
Definition: expool.c:1853
VOID NTAPI ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, IN POOL_TYPE PoolType, IN ULONG PoolIndex, IN ULONG Threshold, IN PVOID PoolLock)
Definition: expool.c:970
struct _POOL_DPC_CONTEXT POOL_DPC_CONTEXT
PPOOL_DESCRIPTOR PoolVector[2]
Definition: expool.c:44
#define POOL_PREV_BLOCK(x)
Definition: expool.c:65
NTSTATUS NTAPI ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, IN ULONG SystemInformationLength, IN OUT PULONG ReturnLength OPTIONAL)
Definition: expool.c:1357
SIZE_T PoolTrackTableMask
Definition: expool.c:46
VOID NTAPI InitializePool(IN POOL_TYPE PoolType, IN ULONG Threshold)
Definition: expool.c:1021
VOID NTAPI ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:163
VOID NTAPI ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
Definition: expool.c:193
SIZE_T PoolBigPageTableSize
Definition: expool.c:47
struct _POOL_DPC_CONTEXT * PPOOL_DPC_CONTEXT
FORCEINLINE ULONG ExpComputePartialHashForAddress(IN PVOID BaseAddress)
Definition: expool.c:457
VOID NTAPI ExpGetPoolTagInfoTarget(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
Definition: expool.c:1322
ULONG ExpNumberOfPagedPools
Definition: expool.c:41
VOID NTAPI ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
Definition: expool.c:99
SIZE_T NTAPI ExQueryPoolBlockSize(IN PVOID PoolBlock, OUT PBOOLEAN QuotaCharged)
Definition: expool.c:2934
PPOOL_TRACKER_BIG_PAGES PoolBigPageTable
Definition: expool.c:50
PKGUARDED_MUTEX ExpPagedPoolMutex
Definition: expool.c:45
FORCEINLINE VOID ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN PVOID Entry)
Definition: expool.c:416
#define POOL_ENTRY(x)
Definition: expool.c:61
PLIST_ENTRY NTAPI ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
Definition: expool.c:139
PVOID NTAPI ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag, IN EX_POOL_PRIORITY Priority)
Definition: expool.c:2964
PLIST_ENTRY NTAPI ExpDecodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:85
KSPIN_LOCK ExpTaggedPoolLock
Definition: expool.c:51
VOID NTAPI ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
Definition: expool.c:128
VOID NTAPI ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:178
FORCEINLINE VOID ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, IN KIRQL OldIrql)
Definition: expool.c:1298
#define POOL_NEXT_BLOCK(x)
Definition: expool.c:64
POOL_DESCRIPTOR NonPagedPoolDescriptor
Definition: expool.c:42
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:49
PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16+1]
Definition: expool.c:43
BOOLEAN NTAPI ExpAddTagForBigPages(IN PVOID Va, IN ULONG Key, IN ULONG NumberOfPages, IN POOL_TYPE PoolType)
Definition: expool.c:1582
#define POOL_BIG_TABLE_ENTRY_FREE
Definition: expool.c:23
ULONG PoolHitTag
Definition: expool.c:52
FORCEINLINE KIRQL ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
Definition: expool.c:1274
VOID NTAPI ExpSeedHotTags(VOID)
Definition: expool.c:640
SIZE_T PoolTrackTableSize
Definition: expool.c:46
ULONG ExpPoolFlags
Definition: expool.c:56
KSPIN_LOCK ExpLargePoolTableLock
Definition: expool.c:54
PVOID NTAPI ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes)
Definition: expool.c:2950
PLIST_ENTRY NTAPI ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
Definition: expool.c:151
VOID NTAPI ExpInsertPoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:851
ULONGLONG MiLastPoolDumpTime
Definition: expool.c:58
ULONG NTAPI ExpFindAndRemoveTagBigPages(IN PVOID Va, OUT PULONG_PTR BigPages, IN POOL_TYPE PoolType)
Definition: expool.c:1683
SIZE_T PoolBigPageTableHash
Definition: expool.c:47
PLIST_ENTRY NTAPI ExpEncodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:92
BOOLEAN NTAPI ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
Definition: expool.c:121
#define BooleanFlagOn(F, SF)
Definition: ext2fs.h:183
#define ExAllocatePool(type, size)
Definition: fbtusb.h:44
_Must_inspect_result_ _In_ LPCGUID _In_ ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _Inout_ PVOID LookasideList
Definition: fltkernel.h:2554
_Must_inspect_result_ _In_ USHORT NewSize
Definition: fltkernel.h:975
unsigned int Mask
Definition: fpcontrol.c:82
_Must_inspect_result_ _In_ PLARGE_INTEGER _In_ PLARGE_INTEGER _In_ ULONG _In_ PFILE_OBJECT _In_ PVOID Process
Definition: fsrtlfuncs.h:223
Status
Definition: gdiplustypes.h:25
ASMGENDATA Table[]
Definition: genincdata.c:61
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
VOID FASTCALL KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:31
VOID FASTCALL KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:53
VOID FASTCALL KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:42
VOID FASTCALL KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber, IN KIRQL OldIrql)
Definition: spinlock.c:154
KIRQL FASTCALL KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
Definition: spinlock.c:108
NTSYSAPI void WINAPI DbgBreakPoint(void)
#define InterlockedExchangeAdd
Definition: interlocked.h:181
#define InterlockedCompareExchangePointer
Definition: interlocked.h:129
#define InterlockedExchangeAddSizeT(a, b)
Definition: interlocked.h:196
#define _ReturnAddress()
Definition: intrin_arm.h:35
static CODE_SEG("PAGE")
Definition: isapnp.c:1482
VOID KdbpPrint(_In_ PSTR Format, _In_ ...)
Prints the given string with printf-like formatting.
Definition: kdb_cli.c:3082
struct _POOL_HEADER * PPOOL_HEADER
if(dx< 0)
Definition: linetemp.h:194
LIST_ENTRY ExPoolLookasideListHead
Definition: lookas.c:22
struct _POOL_TRACKER_TABLE POOL_TRACKER_TABLE
#define POOL_FREE_IRQL_INVALID
Definition: miarm.h:305
#define POOL_LISTS_PER_PAGE
Definition: miarm.h:271
#define POOL_BILLED_PROCESS_INVALID
Definition: miarm.h:306
#define POOL_FLAG_CHECK_WORKERS
Definition: miarm.h:279
#define POOL_BLOCK_SIZE
Definition: miarm.h:269
#define POOL_FLAG_DBGPRINT_ON_FAILURE
Definition: miarm.h:284
#define POOL_FLAG_CRASH_ON_FAILURE
Definition: miarm.h:285
#define POOL_FLAG_CHECK_RESOURCES
Definition: miarm.h:280
#define POOL_FLAG_CHECK_TIMERS
Definition: miarm.h:278
#define POOL_FLAG_SPECIAL_POOL
Definition: miarm.h:283
#define POOL_MAX_ALLOC
Definition: miarm.h:273
#define POOL_FLAG_VERIFIER
Definition: miarm.h:281
#define POOL_FLAG_CHECK_DEADLOCK
Definition: miarm.h:282
#define POOL_ALLOC_IRQL_INVALID
Definition: miarm.h:304
PLDR_DATA_TABLE_ENTRY NTAPI MiLookupDataTableEntry(IN PVOID Address)
Definition: sysldr.c:3514
#define ASSERT(a)
Definition: mode.c:44
#define ExFreePoolWithTag(_P, _T)
Definition: module.h:1109
#define min(a, b)
Definition: monoChain.cc:55
#define _In_
Definition: ms_sal.h:308
__int3264 LONG_PTR
Definition: mstsclib_h.h:276
#define NUMBER_POOL_LOOKASIDE_LISTS
Definition: ketypes.h:295
FORCEINLINE struct _KPRCB * KeGetCurrentPrcb(VOID)
Definition: ketypes.h:1084
@ ProcessObject
Definition: ketypes.h:442
_In_ HANDLE _Outptr_result_bytebuffer_ ViewSize PVOID * BaseAddress
Definition: mmfuncs.h:404
DECLSPEC_NORETURN NTSYSAPI VOID NTAPI RtlRaiseStatus(_In_ NTSTATUS Status)
struct _EPROCESS * PEPROCESS
Definition: nt_native.h:30
#define DBG_UNREFERENCED_LOCAL_VARIABLE(L)
Definition: ntbasedef.h:319
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:317
@ FirstTry
Definition: copy.c:25
UCHAR KeNumberNodes
Definition: krnlinit.c:40
POOL_TYPE NTAPI MmDeterminePoolType(IN PVOID VirtualAddress)
Definition: pool.c:408
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes)
Definition: pool.c:422
#define MUST_SUCCEED_POOL_MASK
Definition: mm.h:119
#define SESSION_POOL_MASK
Definition: mm.h:122
BOOLEAN NTAPI MmIsSpecialPoolAddress(IN PVOID P)
BOOLEAN NTAPI MmUseSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag)
PVOID NTAPI MmAllocateSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag, IN POOL_TYPE PoolType, IN ULONG SpecialType)
VOID NTAPI MmFreeSpecialPool(IN PVOID P)
ULONG NTAPI MiFreePoolPages(IN PVOID StartingAddress)
Definition: pool.c:918
#define ExRaiseStatus
Definition: ntoskrnl.h:114
ULONG MmSizeOfNonPagedPoolInBytes
Definition: init.c:21
VOID MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
long LONG
Definition: pedump.c:60
unsigned short USHORT
Definition: pedump.c:61
PEPROCESS PsInitialSystemProcess
Definition: psmgr.c:50
NTSTATUS NTAPI PsChargeProcessPoolQuota(_In_ PEPROCESS Process, _In_ POOL_TYPE PoolType, _In_ SIZE_T Amount)
Charges the process' quota pool. The type of quota to be charged depends upon the PoolType parameter.
Definition: quota.c:872
VOID NTAPI PsReturnPoolQuota(_In_ PEPROCESS Process, _In_ POOL_TYPE PoolType, _In_ SIZE_T Amount)
Returns the pool quota that the process was taking up.
Definition: quota.c:907
VOID NTAPI KeBugCheckEx(_In_ ULONG BugCheckCode, _In_ ULONG_PTR BugCheckParameter1, _In_ ULONG_PTR BugCheckParameter2, _In_ ULONG_PTR BugCheckParameter3, _In_ ULONG_PTR BugCheckParameter4)
Definition: rtlcompat.c:108
#define KeQueryInterruptTime()
Definition: ke.h:37
FORCEINLINE ULONG KeGetCurrentProcessorNumber(VOID)
Definition: ke.h:341
ULONG PFN_NUMBER
Definition: ke.h:9
#define STATUS_SUCCESS
Definition: shellext.h:65
#define DPRINT
Definition: sndvol32.h:71
PULONG MinorVersion OPTIONAL
Definition: CrossNt.h:68
base of all file and directory entries
Definition: entries.h:83
Definition: ketypes.h:687
GENERAL_LOOKASIDE_POOL PPNPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:634
GENERAL_LOOKASIDE_POOL PPPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:635
Definition: btrfs_drv.h:1876
UNICODE_STRING BaseDllName
Definition: ldrtypes.h:145
Definition: typedefs.h:120
struct _LIST_ENTRY * Blink
Definition: typedefs.h:122
struct _LIST_ENTRY * Flink
Definition: typedefs.h:121
LIST_ENTRY ListHeads[POOL_LISTS_PER_PAGE]
Definition: miarm.h:323
ULONG RunningAllocs
Definition: miarm.h:313
ULONG TotalPages
Definition: miarm.h:315
SIZE_T TotalBytes
Definition: miarm.h:321
ULONG RunningDeAllocs
Definition: miarm.h:314
ULONG TotalBigPages
Definition: miarm.h:316
SIZE_T PoolTrackTableSize
Definition: expool.c:36
PPOOL_TRACKER_TABLE PoolTrackTableExpansion
Definition: expool.c:37
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:35
SIZE_T PoolTrackTableSizeExpansion
Definition: expool.c:38
USHORT PreviousSize
LONG NonPagedAllocs
Definition: miarm.h:370
SIZE_T NonPagedBytes
Definition: miarm.h:372
LONG NonPagedFrees
Definition: miarm.h:371
SIZE_T PagedBytes
Definition: miarm.h:375
SIZE_T PagedUsed
Definition: extypes.h:1143
ULONG TagUlong
Definition: extypes.h:1139
ULONG PagedFrees
Definition: extypes.h:1142
ULONG PagedAllocs
Definition: extypes.h:1141
ULONG NonPagedAllocs
Definition: extypes.h:1144
SIZE_T NonPagedUsed
Definition: extypes.h:1146
ULONG NonPagedFrees
Definition: extypes.h:1145
#define max(a, b)
Definition: svc.c:63
#define TAG_NONE
Definition: tag.h:110
uint32_t * PULONG_PTR
Definition: typedefs.h:65
uint32_t * PULONG
Definition: typedefs.h:59
#define FIELD_OFFSET(t, f)
Definition: typedefs.h:255
unsigned char * PBOOLEAN
Definition: typedefs.h:53
INT POOL_TYPE
Definition: typedefs.h:78
#define NTAPI
Definition: typedefs.h:36
void * PVOID
Definition: typedefs.h:50
ULONG_PTR SIZE_T
Definition: typedefs.h:80
#define RtlCopyMemory(Destination, Source, Length)
Definition: typedefs.h:263
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
uint32_t ULONG_PTR
Definition: typedefs.h:65
#define IN
Definition: typedefs.h:39
int32_t * PLONG
Definition: typedefs.h:58
#define CONTAINING_RECORD(address, type, field)
Definition: typedefs.h:260
uint32_t ULONG
Definition: typedefs.h:59
uint64_t ULONGLONG
Definition: typedefs.h:67
#define OUT
Definition: typedefs.h:40
#define STATUS_INFO_LENGTH_MISMATCH
Definition: udferr_usr.h:133
#define STATUS_INSUFFICIENT_RESOURCES
Definition: udferr_usr.h:158
static int Link(const char **args)
Definition: vfdcmd.c:2414
_In_ WDFCOLLECTION _In_ ULONG Index
_Must_inspect_result_ _In_ WDFDEVICE _In_ BOOLEAN _In_opt_ PVOID Tag
Definition: wdfdevice.h:4065
_Must_inspect_result_ _In_ WDFDEVICE _In_ DEVICE_REGISTRY_PROPERTY _In_ _Strict_type_match_ POOL_TYPE PoolType
Definition: wdfdevice.h:3815
_Must_inspect_result_ _In_ WDFDEVICE _In_ PWDF_DEVICE_PROPERTY_DATA _In_ DEVPROPTYPE _In_ ULONG Size
Definition: wdfdevice.h:4533
_Must_inspect_result_ _In_ PWDF_DPC_CONFIG _In_ PWDF_OBJECT_ATTRIBUTES _Out_ WDFDPC * Dpc
Definition: wdfdpc.h:112
_In_ WDFINTERRUPT _In_ WDF_INTERRUPT_POLICY _In_ WDF_INTERRUPT_PRIORITY Priority
Definition: wdfinterrupt.h:655
_Must_inspect_result_ _In_opt_ PWDF_OBJECT_ATTRIBUTES _In_ _In_ _Strict_type_match_ POOL_TYPE _In_opt_ PWDF_OBJECT_ATTRIBUTES _In_opt_ ULONG _Out_ WDFLOOKASIDE * Lookaside
Definition: wdfmemory.h:414
_Must_inspect_result_ _In_opt_ PWDF_OBJECT_ATTRIBUTES _In_ _Strict_type_match_ POOL_TYPE _In_opt_ ULONG PoolTag
Definition: wdfmemory.h:164
_Must_inspect_result_ _In_ WDFIORESLIST _In_ PIO_RESOURCE_DESCRIPTOR Descriptor
Definition: wdfresource.h:342
#define FORCEINLINE
Definition: wdftypes.h:67
_At_(*)(_In_ PWSK_CLIENT Client, _In_opt_ PUNICODE_STRING NodeName, _In_opt_ PUNICODE_STRING ServiceName, _In_opt_ ULONG NameSpace, _In_opt_ GUID *Provider, _In_opt_ PADDRINFOEXW Hints, _Outptr_ PADDRINFOEXW *Result, _In_opt_ PEPROCESS OwningProcess, _In_opt_ PETHREAD OwningThread, _Inout_ PIRP Irp Result)(Mem)) NTSTATUS(WSKAPI *PFN_WSK_GET_ADDRESS_INFO
Definition: wsk.h:409
_Must_inspect_result_ _In_ ULONG Flags
Definition: wsk.h:170
FORCEINLINE USHORT ExQueryDepthSList(_In_ PSLIST_HEADER SListHead)
Definition: exfuncs.h:153
#define ExReleaseSpinLock(Lock, OldIrql)
#define ExAllocatePoolWithQuotaTag(a, b, c)
Definition: exfuncs.h:530
#define ExAcquireSpinLock(Lock, OldIrql)
_Out_ PBOOLEAN QuotaCharged
Definition: exfuncs.h:1153
#define PROTECTED_POOL
Definition: extypes.h:340
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE GENERAL_LOOKASIDE
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE * PGENERAL_LOOKASIDE
enum _EX_POOL_PRIORITY EX_POOL_PRIORITY
_Must_inspect_result_ typedef _Out_ PULONG TableSize
Definition: iotypes.h:4327
#define POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
_Must_inspect_result_ typedef _In_ ULONG TableEntry
Definition: iotypes.h:4303
#define POOL_RAISE_IF_ALLOCATION_FAILURE
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _Inout_ PLARGE_INTEGER NumberOfBytes
Definition: iotypes.h:1036
_Requires_lock_held_ Interrupt _Releases_lock_ Interrupt _In_ _IRQL_restores_ KIRQL OldIrql
Definition: kefuncs.h:792
@ PagedPoolSession
Definition: ketypes.h:881
struct _KGUARDED_MUTEX * PKGUARDED_MUTEX
_In_opt_ PVOID _In_opt_ PVOID SystemArgument1
Definition: ketypes.h:676
_In_opt_ PVOID DeferredContext
Definition: ketypes.h:675
@ LockQueueNonPagedPoolLock
Definition: ketypes.h:652
_In_opt_ PVOID _In_opt_ PVOID _In_opt_ PVOID SystemArgument2
Definition: ketypes.h:677
#define ROUND_TO_PAGES(Size)
#define BYTES_TO_PAGES(Size)
#define PAGE_ALIGN(Va)
#define ObDereferenceObject
Definition: obfuncs.h:203
#define ObReferenceObject
Definition: obfuncs.h:204
#define PsGetCurrentProcess
Definition: psfuncs.h:17
#define NT_VERIFY(exp)
Definition: rtlfuncs.h:3287
#define InterlockedPushEntrySList(SListHead, SListEntry)
Definition: rtlfuncs.h:3389
#define InterlockedPopEntrySList(SListHead)
Definition: rtlfuncs.h:3392
__wchar_t WCHAR
Definition: xmlstorage.h:180
char CHAR
Definition: xmlstorage.h:175