ReactOS 0.4.15-dev-5865-g640e228
expool.c
Go to the documentation of this file.
1/*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9/* INCLUDES *******************************************************************/
10
11#include <ntoskrnl.h>
12#define NDEBUG
13#include <debug.h>
14
15#define MODULE_INVOLVED_IN_ARM3
16#include <mm/ARM3/miarm.h>
17
18#undef ExAllocatePoolWithQuota
19#undef ExAllocatePoolWithQuotaTag
20
21/* GLOBALS ********************************************************************/
22
23#define POOL_BIG_TABLE_ENTRY_FREE 0x1
24
25/*
26 * This defines when we shrink or expand the table.
27 * 3 --> keep the number of used entries in the 33%-66% of the table capacity.
28 * 4 --> 25% - 75%
29 * etc.
30 */
31#define POOL_BIG_TABLE_USE_RATE 4
32
33typedef struct _POOL_DPC_CONTEXT
34{
40
59
60/* Pool block/header/list access macros */
61#define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
62#define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
63#define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
64#define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
65#define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
66
67/*
68 * Pool list access debug macros, similar to Arthur's pfnlist.c work.
69 * Microsoft actually implements similar checks in the Windows Server 2003 SP1
70 * pool code, but only for checked builds.
71 *
72 * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
73 * that these checks are done even on retail builds, due to the increasing
74 * number of kernel-mode attacks which depend on dangling list pointers and other
75 * kinds of list-based attacks.
76 *
77 * For now, I will leave these checks on all the time, but later they are likely
78 * to be DBG-only, at least until there are enough kernel-mode security attacks
79 * against ReactOS to warrant the performance hit.
80 *
81 * For now, these are not made inline, so we can get good stack traces.
82 */
86{
87 return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
88}
89
93{
94 return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
95}
96
97VOID
100{
101 if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
102 (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
103 {
104 KeBugCheckEx(BAD_POOL_HEADER,
105 3,
106 (ULONG_PTR)ListHead,
109 }
110}
111
112VOID
113NTAPI
115{
116 ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
117}
118
120NTAPI
122{
123 return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
124}
125
126VOID
127NTAPI
129{
130 PLIST_ENTRY Blink, Flink;
131 Flink = ExpDecodePoolLink(Entry->Flink);
132 Blink = ExpDecodePoolLink(Entry->Blink);
133 Flink->Blink = ExpEncodePoolLink(Blink);
134 Blink->Flink = ExpEncodePoolLink(Flink);
135}
136
138NTAPI
140{
141 PLIST_ENTRY Entry, Flink;
142 Entry = ExpDecodePoolLink(ListHead->Flink);
143 Flink = ExpDecodePoolLink(Entry->Flink);
144 ListHead->Flink = ExpEncodePoolLink(Flink);
145 Flink->Blink = ExpEncodePoolLink(ListHead);
146 return Entry;
147}
148
150NTAPI
152{
153 PLIST_ENTRY Entry, Blink;
154 Entry = ExpDecodePoolLink(ListHead->Blink);
155 Blink = ExpDecodePoolLink(Entry->Blink);
156 ListHead->Blink = ExpEncodePoolLink(Blink);
157 Blink->Flink = ExpEncodePoolLink(ListHead);
158 return Entry;
159}
160
161VOID
162NTAPI
165{
166 PLIST_ENTRY Blink;
167 ExpCheckPoolLinks(ListHead);
168 Blink = ExpDecodePoolLink(ListHead->Blink);
169 Entry->Flink = ExpEncodePoolLink(ListHead);
170 Entry->Blink = ExpEncodePoolLink(Blink);
171 Blink->Flink = ExpEncodePoolLink(Entry);
172 ListHead->Blink = ExpEncodePoolLink(Entry);
173 ExpCheckPoolLinks(ListHead);
174}
175
176VOID
177NTAPI
180{
181 PLIST_ENTRY Flink;
182 ExpCheckPoolLinks(ListHead);
183 Flink = ExpDecodePoolLink(ListHead->Flink);
184 Entry->Flink = ExpEncodePoolLink(Flink);
185 Entry->Blink = ExpEncodePoolLink(ListHead);
186 Flink->Blink = ExpEncodePoolLink(Entry);
187 ListHead->Flink = ExpEncodePoolLink(Entry);
188 ExpCheckPoolLinks(ListHead);
189}
190
191VOID
192NTAPI
194{
195 PPOOL_HEADER PreviousEntry, NextEntry;
196
197 /* Is there a block before this one? */
198 if (Entry->PreviousSize)
199 {
200 /* Get it */
201 PreviousEntry = POOL_PREV_BLOCK(Entry);
202
203 /* The two blocks must be on the same page! */
204 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
205 {
206 /* Something is awry */
207 KeBugCheckEx(BAD_POOL_HEADER,
208 6,
209 (ULONG_PTR)PreviousEntry,
210 __LINE__,
212 }
213
214 /* This block should also indicate that it's as large as we think it is */
215 if (PreviousEntry->BlockSize != Entry->PreviousSize)
216 {
217 /* Otherwise, someone corrupted one of the sizes */
218 DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
219 PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
220 Entry->PreviousSize, (char *)&Entry->PoolTag);
221 KeBugCheckEx(BAD_POOL_HEADER,
222 5,
223 (ULONG_PTR)PreviousEntry,
224 __LINE__,
226 }
227 }
228 else if (PAGE_ALIGN(Entry) != Entry)
229 {
230 /* If there's no block before us, we are the first block, so we should be on a page boundary */
231 KeBugCheckEx(BAD_POOL_HEADER,
232 7,
233 0,
234 __LINE__,
236 }
237
238 /* This block must have a size */
239 if (!Entry->BlockSize)
240 {
241 /* Someone must've corrupted this field */
242 if (Entry->PreviousSize)
243 {
244 PreviousEntry = POOL_PREV_BLOCK(Entry);
245 DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
246 (char *)&PreviousEntry->PoolTag,
247 (char *)&Entry->PoolTag);
248 }
249 else
250 {
251 DPRINT1("Entry tag %.4s\n",
252 (char *)&Entry->PoolTag);
253 }
254 KeBugCheckEx(BAD_POOL_HEADER,
255 8,
256 0,
257 __LINE__,
259 }
260
261 /* Okay, now get the next block */
262 NextEntry = POOL_NEXT_BLOCK(Entry);
263
264 /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
265 if (PAGE_ALIGN(NextEntry) != NextEntry)
266 {
267 /* The two blocks must be on the same page! */
268 if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
269 {
270 /* Something is messed up */
271 KeBugCheckEx(BAD_POOL_HEADER,
272 9,
273 (ULONG_PTR)NextEntry,
274 __LINE__,
276 }
277
278 /* And this block should think we are as large as we truly are */
279 if (NextEntry->PreviousSize != Entry->BlockSize)
280 {
281 /* Otherwise, someone corrupted the field */
282 DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
283 Entry->BlockSize, (char *)&Entry->PoolTag,
284 NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
285 KeBugCheckEx(BAD_POOL_HEADER,
286 5,
287 (ULONG_PTR)NextEntry,
288 __LINE__,
290 }
291 }
292}
293
294VOID
295NTAPI
297 PVOID P,
299 ULONG Tag)
300{
302 ULONG i;
304 POOL_TYPE RealPoolType;
305
306 /* Get the pool header */
307 Entry = ((PPOOL_HEADER)P) - 1;
308
309 /* Check if this is a large allocation */
310 if (PAGE_ALIGN(P) == P)
311 {
312 /* Lock the pool table */
314
315 /* Find the pool tag */
316 for (i = 0; i < PoolBigPageTableSize; i++)
317 {
318 /* Check if this is our allocation */
319 if (PoolBigPageTable[i].Va == P)
320 {
321 /* Make sure the tag is ok */
322 if (PoolBigPageTable[i].Key != Tag)
323 {
324 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
325 }
326
327 break;
328 }
329 }
330
331 /* Release the lock */
333
334 if (i == PoolBigPageTableSize)
335 {
336 /* Did not find the allocation */
337 //ASSERT(FALSE);
338 }
339
340 /* Get Pool type by address */
341 RealPoolType = MmDeterminePoolType(P);
342 }
343 else
344 {
345 /* Verify the tag */
346 if (Entry->PoolTag != Tag)
347 {
348 DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
349 &Tag, &Entry->PoolTag, Entry->PoolTag);
350 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
351 }
352
353 /* Check the rest of the header */
355
356 /* Get Pool type from entry */
357 RealPoolType = (Entry->PoolType - 1);
358 }
359
360 /* Should we check the pool type? */
361 if (PoolType != -1)
362 {
363 /* Verify the pool type */
364 if (RealPoolType != PoolType)
365 {
366 DPRINT1("Wrong pool type! Expected %s, got %s\n",
367 PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
368 (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
369 KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
370 }
371 }
372}
373
374VOID
375NTAPI
377{
378 BOOLEAN FoundBlock = FALSE;
379 SIZE_T Size = 0;
381
382 /* Get the first entry for this page, make sure it really is the first */
383 Entry = PAGE_ALIGN(Block);
384 ASSERT(Entry->PreviousSize == 0);
385
386 /* Now scan each entry */
387 while (TRUE)
388 {
389 /* When we actually found our block, remember this */
390 if (Entry == Block) FoundBlock = TRUE;
391
392 /* Now validate this block header */
395 /* And go to the next one, keeping track of our size */
396 Size += Entry->BlockSize;
398
399 /* If we hit the last block, stop */
400 if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
401
402 /* If we hit the end of the page, stop */
403 if (PAGE_ALIGN(Entry) == Entry) break;
404 }
405
406 /* We must've found our block, and we must have hit the end of the page */
407 if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
408 {
409 /* Otherwise, the blocks are messed up */
410 KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
411 }
412}
413
415VOID
418 IN PVOID Entry)
419{
420 //
421 // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
422 // be DISPATCH_LEVEL or lower for Non Paged Pool
423 //
427 {
428 //
429 // Take the system down
430 //
431 KeBugCheckEx(BAD_POOL_CALLER,
434 PoolType,
436 }
437}
438
440ULONG
442 IN SIZE_T BucketMask)
443{
444 //
445 // Compute the hash by multiplying with a large prime number and then XORing
446 // with the HIDWORD of the result.
447 //
448 // Finally, AND with the bucket mask to generate a valid index/bucket into
449 // the table
450 //
451 ULONGLONG Result = (ULONGLONG)40543 * Tag;
452 return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
453}
454
456ULONG
458{
460 //
461 // Compute the hash by converting the address into a page number, and then
462 // XORing each nibble with the next one.
463 //
464 // We do *NOT* AND with the bucket mask at this point because big table expansion
465 // might happen. Therefore, the final step of the hash must be performed
466 // while holding the expansion pushlock, and this is why we call this a
467 // "partial" hash only.
468 //
470 return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
471}
472
473#if DBG
474/*
475 * FORCEINLINE
476 * BOOLEAN
477 * ExpTagAllowPrint(CHAR Tag);
478 */
479#define ExpTagAllowPrint(Tag) \
480 ((Tag) >= 0x20 /* Space */ && (Tag) <= 0x7E /* Tilde */)
481
482#ifdef KDBG
483#define MiDumperPrint(dbg, fmt, ...) \
484 if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
485 else DPRINT1(fmt, ##__VA_ARGS__)
486#else
487#define MiDumperPrint(dbg, fmt, ...) \
488 DPRINT1(fmt, ##__VA_ARGS__)
489#endif
490
491VOID
493{
494 SIZE_T i;
496
497 //
498 // Only print header if called from OOM situation
499 //
500 if (!CalledFromDbg)
501 {
502 DPRINT1("---------------------\n");
503 DPRINT1("Out of memory dumper!\n");
504 }
505#ifdef KDBG
506 else
507 {
508 KdbpPrint("Pool Used:\n");
509 }
510#endif
511
512 //
513 // Remember whether we'll have to be verbose
514 // This is the only supported flag!
515 //
517
518 //
519 // Print table header
520 //
521 if (Verbose)
522 {
523 MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
524 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
525 }
526 else
527 {
528 MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
529 MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
530 }
531
532 //
533 // We'll extract allocations for all the tracked pools
534 //
535 for (i = 0; i < PoolTrackTableSize; ++i)
536 {
538
540
541 //
542 // We only care about tags which have allocated memory
543 //
544 if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
545 {
546 //
547 // If there's a tag, attempt to do a pretty print
548 // only if it matches the caller's tag, or if
549 // any tag is allowed
550 // For checking whether it matches caller's tag,
551 // use the mask to make sure not to mess with the wildcards
552 //
553 if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
554 (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
555 {
556 CHAR Tag[4];
557
558 //
559 // Extract each 'component' and check whether they are printable
560 //
561 Tag[0] = TableEntry->Key & 0xFF;
562 Tag[1] = TableEntry->Key >> 8 & 0xFF;
563 Tag[2] = TableEntry->Key >> 16 & 0xFF;
564 Tag[3] = TableEntry->Key >> 24 & 0xFF;
565
566 if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
567 {
568 //
569 // Print in direct order to make !poolused TAG usage easier
570 //
571 if (Verbose)
572 {
573 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
574 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
575 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
576 TableEntry->PagedAllocs, TableEntry->PagedFrees,
577 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
578 }
579 else
580 {
581 MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
582 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
583 TableEntry->PagedAllocs, TableEntry->PagedBytes);
584 }
585 }
586 else
587 {
588 if (Verbose)
589 {
590 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
591 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
592 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
593 TableEntry->PagedAllocs, TableEntry->PagedFrees,
594 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
595 }
596 else
597 {
598 MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
599 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
600 TableEntry->PagedAllocs, TableEntry->PagedBytes);
601 }
602 }
603 }
604 else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
605 {
606 if (Verbose)
607 {
608 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
609 TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
610 (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
611 TableEntry->PagedAllocs, TableEntry->PagedFrees,
612 (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
613 }
614 else
615 {
616 MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
617 TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
618 TableEntry->PagedAllocs, TableEntry->PagedBytes);
619 }
620 }
621 }
622 }
623
624 if (!CalledFromDbg)
625 {
626 DPRINT1("---------------------\n");
627 }
628}
629#endif
630
631/* PRIVATE FUNCTIONS **********************************************************/
632
633CODE_SEG("INIT")
634VOID
635NTAPI
637{
638 ULONG i, Key, Hash, Index;
640 ULONG TagList[] =
641 {
642 ' oI',
643 ' laH',
644 'PldM',
645 'LooP',
646 'tSbO',
647 ' prI',
648 'bdDN',
649 'LprI',
650 'pOoI',
651 ' ldM',
652 'eliF',
653 'aVMC',
654 'dSeS',
655 'CFtN',
656 'looP',
657 'rPCT',
658 'bNMC',
659 'dTeS',
660 'sFtN',
661 'TPCT',
662 'CPCT',
663 ' yeK',
664 'qSbO',
665 'mNoI',
666 'aEoI',
667 'cPCT',
668 'aFtN',
669 '0ftN',
670 'tceS',
671 'SprI',
672 'ekoT',
673 ' eS',
674 'lCbO',
675 'cScC',
676 'lFtN',
677 'cAeS',
678 'mfSF',
679 'kWcC',
680 'miSF',
681 'CdfA',
682 'EdfA',
683 'orSF',
684 'nftN',
685 'PRIU',
686 'rFpN',
687 'RFpN',
688 'aPeS',
689 'sUeS',
690 'FpcA',
691 'MpcA',
692 'cSeS',
693 'mNbO',
694 'sFpN',
695 'uLeS',
696 'DPcS',
697 'nevE',
698 'vrqR',
699 'ldaV',
700 ' pP',
701 'SdaV',
702 ' daV',
703 'LdaV',
704 'FdaV',
705 ' GIB',
706 };
707
708 //
709 // Loop all 64 hot tags
710 //
711 ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
712 for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
713 {
714 //
715 // Get the current tag, and compute its hash in the tracker table
716 //
717 Key = TagList[i];
719
720 //
721 // Loop all the hashes in this index/bucket
722 //
723 Index = Hash;
724 while (TRUE)
725 {
726 //
727 // Find an empty entry, and make sure this isn't the last hash that
728 // can fit.
729 //
730 // On checked builds, also make sure this is the first time we are
731 // seeding this tag.
732 //
733 ASSERT(TrackTable[Hash].Key != Key);
734 if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
735 {
736 //
737 // It has been seeded, move on to the next tag
738 //
739 TrackTable[Hash].Key = Key;
740 break;
741 }
742
743 //
744 // This entry was already taken, compute the next possible hash while
745 // making sure we're not back at our initial index.
746 //
747 ASSERT(TrackTable[Hash].Key != Key);
748 Hash = (Hash + 1) & PoolTrackTableMask;
749 if (Hash == Index) break;
750 }
751 }
752}
753
754VOID
755NTAPI
759{
762 SIZE_T TableMask, TableSize;
763
764 //
765 // Remove the PROTECTED_POOL flag which is not part of the tag
766 //
767 Key &= ~PROTECTED_POOL;
768
769 //
770 // With WinDBG you can set a tag you want to break on when an allocation is
771 // attempted
772 //
773 if (Key == PoolHitTag) DbgBreakPoint();
774
775 //
776 // Why the double indirection? Because normally this function is also used
777 // when doing session pool allocations, which has another set of tables,
778 // sizes, and masks that live in session pool. Now we don't support session
779 // pool so we only ever use the regular tables, but I'm keeping the code this
780 // way so that the day we DO support session pool, it won't require that
781 // many changes
782 //
784 TableMask = PoolTrackTableMask;
787
788 //
789 // Compute the hash for this key, and loop all the possible buckets
790 //
791 Hash = ExpComputeHashForTag(Key, TableMask);
792 Index = Hash;
793 while (TRUE)
794 {
795 //
796 // Have we found the entry for this tag? */
797 //
799 if (TableEntry->Key == Key)
800 {
801 //
802 // Decrement the counters depending on if this was paged or nonpaged
803 // pool
804 //
806 {
807 InterlockedIncrement(&TableEntry->NonPagedFrees);
810 return;
811 }
812 InterlockedIncrement(&TableEntry->PagedFrees);
815 return;
816 }
817
818 //
819 // We should have only ended up with an empty entry if we've reached
820 // the last bucket
821 //
822 if (!TableEntry->Key)
823 {
824 DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
825 Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
826 ASSERT(Hash == TableMask);
827 }
828
829 //
830 // This path is hit when we don't have an entry, and the current bucket
831 // is full, so we simply try the next one
832 //
833 Hash = (Hash + 1) & TableMask;
834 if (Hash == Index) break;
835 }
836
837 //
838 // And finally this path is hit when all the buckets are full, and we need
839 // some expansion. This path is not yet supported in ReactOS and so we'll
840 // ignore the tag
841 //
842 DPRINT1("Out of pool tag space, ignoring...\n");
843}
844
845VOID
846NTAPI
850{
854 SIZE_T TableMask, TableSize;
855
856 //
857 // Remove the PROTECTED_POOL flag which is not part of the tag
858 //
859 Key &= ~PROTECTED_POOL;
860
861 //
862 // With WinDBG you can set a tag you want to break on when an allocation is
863 // attempted
864 //
865 if (Key == PoolHitTag) DbgBreakPoint();
866
867 //
868 // There is also an internal flag you can set to break on malformed tags
869 //
870 if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
871
872 //
873 // ASSERT on ReactOS features not yet supported
874 //
877
878 //
879 // Why the double indirection? Because normally this function is also used
880 // when doing session pool allocations, which has another set of tables,
881 // sizes, and masks that live in session pool. Now we don't support session
882 // pool so we only ever use the regular tables, but I'm keeping the code this
883 // way so that the day we DO support session pool, it won't require that
884 // many changes
885 //
887 TableMask = PoolTrackTableMask;
890
891 //
892 // Compute the hash for this key, and loop all the possible buckets
893 //
894 Hash = ExpComputeHashForTag(Key, TableMask);
895 Index = Hash;
896 while (TRUE)
897 {
898 //
899 // Do we already have an entry for this tag? */
900 //
902 if (TableEntry->Key == Key)
903 {
904 //
905 // Increment the counters depending on if this was paged or nonpaged
906 // pool
907 //
909 {
910 InterlockedIncrement(&TableEntry->NonPagedAllocs);
912 return;
913 }
914 InterlockedIncrement(&TableEntry->PagedAllocs);
916 return;
917 }
918
919 //
920 // We don't have an entry yet, but we've found a free bucket for it
921 //
922 if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
923 {
924 //
925 // We need to hold the lock while creating a new entry, since other
926 // processors might be in this code path as well
927 //
929 if (!PoolTrackTable[Hash].Key)
930 {
931 //
932 // We've won the race, so now create this entry in the bucket
933 //
934 ASSERT(Table[Hash].Key == 0);
936 TableEntry->Key = Key;
937 }
939
940 //
941 // Now we force the loop to run again, and we should now end up in
942 // the code path above which does the interlocked increments...
943 //
944 continue;
945 }
946
947 //
948 // This path is hit when we don't have an entry, and the current bucket
949 // is full, so we simply try the next one
950 //
951 Hash = (Hash + 1) & TableMask;
952 if (Hash == Index) break;
953 }
954
955 //
956 // And finally this path is hit when all the buckets are full, and we need
957 // some expansion. This path is not yet supported in ReactOS and so we'll
958 // ignore the tag
959 //
960 DPRINT1("Out of pool tag space, ignoring...\n");
961}
962
963CODE_SEG("INIT")
964VOID
965NTAPI
968 IN ULONG PoolIndex,
969 IN ULONG Threshold,
970 IN PVOID PoolLock)
971{
972 PLIST_ENTRY NextEntry, LastEntry;
973
974 //
975 // Setup the descriptor based on the caller's request
976 //
977 PoolDescriptor->PoolType = PoolType;
978 PoolDescriptor->PoolIndex = PoolIndex;
979 PoolDescriptor->Threshold = Threshold;
980 PoolDescriptor->LockAddress = PoolLock;
981
982 //
983 // Initialize accounting data
984 //
985 PoolDescriptor->RunningAllocs = 0;
986 PoolDescriptor->RunningDeAllocs = 0;
987 PoolDescriptor->TotalPages = 0;
988 PoolDescriptor->TotalBytes = 0;
989 PoolDescriptor->TotalBigPages = 0;
990
991 //
992 // Nothing pending for now
993 //
994 PoolDescriptor->PendingFrees = NULL;
995 PoolDescriptor->PendingFreeDepth = 0;
996
997 //
998 // Loop all the descriptor's allocation lists and initialize them
999 //
1000 NextEntry = PoolDescriptor->ListHeads;
1001 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
1002 while (NextEntry < LastEntry)
1003 {
1004 ExpInitializePoolListHead(NextEntry);
1005 NextEntry++;
1006 }
1007
1008 //
1009 // Note that ReactOS does not support Session Pool Yet
1010 //
1012}
1013
1014CODE_SEG("INIT")
1015VOID
1016NTAPI
1018 IN ULONG Threshold)
1019{
1022 ULONG i;
1023
1024 //
1025 // Check what kind of pool this is
1026 //
1027 if (PoolType == NonPagedPool)
1028 {
1029 //
1030 // Compute the track table size and convert it from a power of two to an
1031 // actual byte size
1032 //
1033 // NOTE: On checked builds, we'll assert if the registry table size was
1034 // invalid, while on retail builds we'll just break out of the loop at
1035 // that point.
1036 //
1038 for (i = 0; i < 32; i++)
1039 {
1040 if (TableSize & 1)
1041 {
1042 ASSERT((TableSize & ~1) == 0);
1043 if (!(TableSize & ~1)) break;
1044 }
1045 TableSize >>= 1;
1046 }
1047
1048 //
1049 // If we hit bit 32, than no size was defined in the registry, so
1050 // we'll use the default size of 2048 entries.
1051 //
1052 // Otherwise, use the size from the registry, as long as it's not
1053 // smaller than 64 entries.
1054 //
1055 if (i == 32)
1056 {
1057 PoolTrackTableSize = 2048;
1058 }
1059 else
1060 {
1061 PoolTrackTableSize = max(1 << i, 64);
1062 }
1063
1064 //
1065 // Loop trying with the biggest specified size first, and cut it down
1066 // by a power of two each iteration in case not enough memory exist
1067 //
1068 while (TRUE)
1069 {
1070 //
1071 // Do not allow overflow
1072 //
1073 if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1074 {
1075 PoolTrackTableSize >>= 1;
1076 continue;
1077 }
1078
1079 //
1080 // Allocate the tracker table and exit the loop if this worked
1081 //
1083 (PoolTrackTableSize + 1) *
1084 sizeof(POOL_TRACKER_TABLE));
1085 if (PoolTrackTable) break;
1086
1087 //
1088 // Otherwise, as long as we're not down to the last bit, keep
1089 // iterating
1090 //
1091 if (PoolTrackTableSize == 1)
1092 {
1093 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1094 TableSize,
1095 0xFFFFFFFF,
1096 0xFFFFFFFF,
1097 0xFFFFFFFF);
1098 }
1099 PoolTrackTableSize >>= 1;
1100 }
1101
1102 //
1103 // Add one entry, compute the hash, and zero the table
1104 //
1107
1110
1111 //
1112 // Finally, add the most used tags to speed up those allocations
1113 //
1115
1116 //
1117 // We now do the exact same thing with the tracker table for big pages
1118 //
1120 for (i = 0; i < 32; i++)
1121 {
1122 if (TableSize & 1)
1123 {
1124 ASSERT((TableSize & ~1) == 0);
1125 if (!(TableSize & ~1)) break;
1126 }
1127 TableSize >>= 1;
1128 }
1129
1130 //
1131 // For big pages, the default tracker table is 4096 entries, while the
1132 // minimum is still 64
1133 //
1134 if (i == 32)
1135 {
1136 PoolBigPageTableSize = 4096;
1137 }
1138 else
1139 {
1140 PoolBigPageTableSize = max(1 << i, 64);
1141 }
1142
1143 //
1144 // Again, run the exact same loop we ran earlier, but this time for the
1145 // big pool tracker instead
1146 //
1147 while (TRUE)
1148 {
1150 {
1152 continue;
1153 }
1154
1157 sizeof(POOL_TRACKER_BIG_PAGES));
1158 if (PoolBigPageTable) break;
1159
1160 if (PoolBigPageTableSize == 1)
1161 {
1162 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1163 TableSize,
1164 0xFFFFFFFF,
1165 0xFFFFFFFF,
1166 0xFFFFFFFF);
1167 }
1168
1170 }
1171
1172 //
1173 // An extra entry is not needed for for the big pool tracker, so just
1174 // compute the hash and zero it
1175 //
1179 for (i = 0; i < PoolBigPageTableSize; i++)
1180 {
1182 }
1183
1184 //
1185 // During development, print this out so we can see what's happening
1186 //
1187 DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1189 DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1191
1192 //
1193 // Insert the generic tracker for all of big pool
1194 //
1195 ExpInsertPoolTracker('looP',
1197 sizeof(POOL_TRACKER_BIG_PAGES)),
1198 NonPagedPool);
1199
1200 //
1201 // No support for NUMA systems at this time
1202 //
1203 ASSERT(KeNumberNodes == 1);
1204
1205 //
1206 // Initialize the tag spinlock
1207 //
1209
1210 //
1211 // Initialize the nonpaged pool descriptor
1212 //
1216 0,
1217 Threshold,
1218 NULL);
1219 }
1220 else
1221 {
1222 //
1223 // No support for NUMA systems at this time
1224 //
1225 ASSERT(KeNumberNodes == 1);
1226
1227 //
1228 // Allocate the pool descriptor
1229 //
1231 sizeof(KGUARDED_MUTEX) +
1232 sizeof(POOL_DESCRIPTOR),
1233 'looP');
1234 if (!Descriptor)
1235 {
1236 //
1237 // This is really bad...
1238 //
1239 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1240 0,
1241 -1,
1242 -1,
1243 -1);
1244 }
1245
1246 //
1247 // Setup the vector and guarded mutex for paged pool
1248 //
1254 PagedPool,
1255 0,
1256 Threshold,
1258
1259 //
1260 // Insert the generic tracker for all of nonpaged pool
1261 //
1262 ExpInsertPoolTracker('looP',
1264 NonPagedPool);
1265 }
1266}
1267
1269KIRQL
1271{
1272 //
1273 // Check if this is nonpaged pool
1274 //
1275 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1276 {
1277 //
1278 // Use the queued spin lock
1279 //
1281 }
1282 else
1283 {
1284 //
1285 // Use the guarded mutex
1286 //
1287 KeAcquireGuardedMutex(Descriptor->LockAddress);
1288 return APC_LEVEL;
1289 }
1290}
1291
1293VOID
1296{
1297 //
1298 // Check if this is nonpaged pool
1299 //
1300 if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1301 {
1302 //
1303 // Use the queued spin lock
1304 //
1306 }
1307 else
1308 {
1309 //
1310 // Use the guarded mutex
1311 //
1312 KeReleaseGuardedMutex(Descriptor->LockAddress);
1313 }
1314}
1315
1316VOID
1317NTAPI
1322{
1326
1327 //
1328 // Make sure we win the race, and if we did, copy the data atomically
1329 //
1331 {
1332 RtlCopyMemory(Context->PoolTrackTable,
1334 Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1335
1336 //
1337 // This is here because ReactOS does not yet support expansion
1338 //
1339 ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1340 }
1341
1342 //
1343 // Regardless of whether we won or not, we must now synchronize and then
1344 // decrement the barrier since this is one more processor that has completed
1345 // the callback.
1346 //
1349}
1350
1352NTAPI
1354 IN ULONG SystemInformationLength,
1356{
1357 ULONG TableSize, CurrentLength;
1358 ULONG EntryCount;
1360 PSYSTEM_POOLTAG TagEntry;
1361 PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1364
1365 //
1366 // Keep track of how much data the caller's buffer must hold
1367 //
1368 CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1369
1370 //
1371 // Initialize the caller's buffer
1372 //
1373 TagEntry = &SystemInformation->TagInfo[0];
1374 SystemInformation->Count = 0;
1375
1376 //
1377 // Capture the number of entries, and the total size needed to make a copy
1378 // of the table
1379 //
1380 EntryCount = (ULONG)PoolTrackTableSize;
1381 TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1382
1383 //
1384 // Allocate the "Generic DPC" temporary buffer
1385 //
1388
1389 //
1390 // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1391 //
1392 Context.PoolTrackTable = Buffer;
1393 Context.PoolTrackTableSize = PoolTrackTableSize;
1394 Context.PoolTrackTableExpansion = NULL;
1395 Context.PoolTrackTableSizeExpansion = 0;
1397
1398 //
1399 // Now parse the results
1400 //
1401 for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1402 {
1403 //
1404 // If the entry is empty, skip it
1405 //
1406 if (!TrackerEntry->Key) continue;
1407
1408 //
1409 // Otherwise, add one more entry to the caller's buffer, and ensure that
1410 // enough space has been allocated in it
1411 //
1412 SystemInformation->Count++;
1413 CurrentLength += sizeof(*TagEntry);
1414 if (SystemInformationLength < CurrentLength)
1415 {
1416 //
1417 // The caller's buffer is too small, so set a failure code. The
1418 // caller will know the count, as well as how much space is needed.
1419 //
1420 // We do NOT break out of the loop, because we want to keep incrementing
1421 // the Count as well as CurrentLength so that the caller can know the
1422 // final numbers
1423 //
1425 }
1426 else
1427 {
1428 //
1429 // Small sanity check that our accounting is working correctly
1430 //
1431 ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1432 ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1433
1434 //
1435 // Return the data into the caller's buffer
1436 //
1437 TagEntry->TagUlong = TrackerEntry->Key;
1438 TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1439 TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1440 TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1441 TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1442 TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1443 TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1444 TagEntry++;
1445 }
1446 }
1447
1448 //
1449 // Free the "Generic DPC" temporary buffer, return the buffer length and status
1450 //
1451 ExFreePoolWithTag(Buffer, 'ofnI');
1452 if (ReturnLength) *ReturnLength = CurrentLength;
1453 return Status;
1454}
1455
1457static
1458BOOLEAN
1459ExpReallocateBigPageTable(
1461 _In_ BOOLEAN Shrink)
1462{
1463 SIZE_T OldSize = PoolBigPageTableSize;
1464 SIZE_T NewSize, NewSizeInBytes;
1465 PPOOL_TRACKER_BIG_PAGES NewTable;
1466 PPOOL_TRACKER_BIG_PAGES OldTable;
1467 ULONG i;
1468 ULONG PagesFreed;
1469 ULONG Hash;
1470 ULONG HashMask;
1471
1472 /* Must be holding ExpLargePoolTableLock */
1474
1475 /* Make sure we don't overflow */
1476 if (Shrink)
1477 {
1478 NewSize = OldSize / 2;
1479
1480 /* Make sure we don't shrink too much. */
1482
1484 ASSERT(NewSize <= OldSize);
1485
1486 /* If there is only one page left, then keep it around. Not a failure either. */
1487 if (NewSize == OldSize)
1488 {
1491 return TRUE;
1492 }
1493 }
1494 else
1495 {
1496 if (!NT_SUCCESS(RtlSIZETMult(2, OldSize, &NewSize)))
1497 {
1498 DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
1500 return FALSE;
1501 }
1502
1503 /* Make sure we don't stupidly waste pages */
1505 ASSERT(NewSize > OldSize);
1506 }
1507
1508 if (!NT_SUCCESS(RtlSIZETMult(sizeof(POOL_TRACKER_BIG_PAGES), NewSize, &NewSizeInBytes)))
1509 {
1510 DPRINT1("Overflow while calculating big page table size. Size=%lu\n", OldSize);
1512 return FALSE;
1513 }
1514
1515 NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes);
1516 if (NewTable == NULL)
1517 {
1518 DPRINT("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes);
1520 return FALSE;
1521 }
1522
1523 DPRINT("%s big pool tracker table to %lu entries\n", Shrink ? "Shrinking" : "Expanding", NewSize);
1524
1525 /* Initialize the new table */
1526 RtlZeroMemory(NewTable, NewSizeInBytes);
1527 for (i = 0; i < NewSize; i++)
1528 {
1529 NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1530 }
1531
1532 /* Copy over all items */
1533 OldTable = PoolBigPageTable;
1534 HashMask = NewSize - 1;
1535 for (i = 0; i < OldSize; i++)
1536 {
1537 /* Skip over empty items */
1538 if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE)
1539 {
1540 continue;
1541 }
1542
1543 /* Recalculate the hash due to the new table size */
1544 Hash = ExpComputePartialHashForAddress(OldTable[i].Va) % HashMask;
1545
1546 /* Find the location in the new table */
1547 while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE))
1548 {
1549 if (++Hash == NewSize)
1550 Hash = 0;
1551 }
1552
1553 /* We must have space */
1555
1556 /* Finally, copy the item */
1557 NewTable[Hash] = OldTable[i];
1558 }
1559
1560 /* Activate the new table */
1561 PoolBigPageTable = NewTable;
1564
1565 /* Release the lock, we're done changing global state */
1567
1568 /* Free the old table and update our tracker */
1569 PagesFreed = MiFreePoolPages(OldTable);
1570 ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0);
1571 ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0);
1572
1573 return TRUE;
1574}
1575
1576BOOLEAN
1577NTAPI
1579 IN ULONG Key,
1580 IN ULONG NumberOfPages,
1582{
1583 ULONG Hash, i = 0;
1584 PVOID OldVa;
1585 KIRQL OldIrql;
1587 PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1590
1591 //
1592 // As the table is expandable, these values must only be read after acquiring
1593 // the lock to avoid a teared access during an expansion
1594 // NOTE: Windows uses a special reader/writer SpinLock to improve
1595 // performance in the common case (add/remove a tracker entry)
1596 //
1597Retry:
1602
1603 //
1604 // We loop from the current hash bucket to the end of the table, and then
1605 // rollover to hash bucket 0 and keep going from there. If we return back
1606 // to the beginning, then we attempt expansion at the bottom of the loop
1607 //
1608 EntryStart = Entry = &PoolBigPageTable[Hash];
1609 EntryEnd = &PoolBigPageTable[TableSize];
1610 do
1611 {
1612 //
1613 // Make sure that this is a free entry and attempt to atomically make the
1614 // entry busy now
1615 // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock
1616 //
1617 OldVa = Entry->Va;
1618 if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1619 (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa)))
1620 {
1621 //
1622 // We now own this entry, write down the size and the pool tag
1623 //
1624 Entry->Key = Key;
1625 Entry->NumberOfPages = NumberOfPages;
1626
1627 //
1628 // Add one more entry to the count, and see if we're getting within
1629 // 75% of the table size, at which point we'll do an expansion now
1630 // to avoid blocking too hard later on.
1631 //
1632 // Note that we only do this if it's also been the 16th time that we
1633 // keep losing the race or that we are not finding a free entry anymore,
1634 // which implies a massive number of concurrent big pool allocations.
1635 //
1638 {
1639 DPRINT("Attempting expansion since we now have %lu entries\n",
1642 ExpReallocateBigPageTable(OldIrql, FALSE);
1643 return TRUE;
1644 }
1645
1646 //
1647 // We have our entry, return
1648 //
1650 return TRUE;
1651 }
1652
1653 //
1654 // We don't have our entry yet, so keep trying, making the entry list
1655 // circular if we reach the last entry. We'll eventually break out of
1656 // the loop once we've rolled over and returned back to our original
1657 // hash bucket
1658 //
1659 i++;
1660 if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1661 } while (Entry != EntryStart);
1662
1663 //
1664 // This means there's no free hash buckets whatsoever, so we now have
1665 // to attempt expanding the table
1666 //
1668 if (ExpReallocateBigPageTable(OldIrql, FALSE))
1669 {
1670 goto Retry;
1671 }
1673 DPRINT1("Big pool table expansion failed\n");
1674 return FALSE;
1675}
1676
1677ULONG
1678NTAPI
1680 OUT PULONG_PTR BigPages,
1682{
1685 KIRQL OldIrql;
1690
1691 //
1692 // As the table is expandable, these values must only be read after acquiring
1693 // the lock to avoid a teared access during an expansion
1694 //
1699
1700 //
1701 // Loop while trying to find this big page allocation
1702 //
1703 while (PoolBigPageTable[Hash].Va != Va)
1704 {
1705 //
1706 // Increment the size until we go past the end of the table
1707 //
1708 if (++Hash >= TableSize)
1709 {
1710 //
1711 // Is this the second time we've tried?
1712 //
1713 if (!FirstTry)
1714 {
1715 //
1716 // This means it was never inserted into the pool table and it
1717 // received the special "BIG" tag -- return that and return 0
1718 // so that the code can ask Mm for the page count instead
1719 //
1721 *BigPages = 0;
1722 return ' GIB';
1723 }
1724
1725 //
1726 // The first time this happens, reset the hash index and try again
1727 //
1728 Hash = 0;
1729 FirstTry = FALSE;
1730 }
1731 }
1732
1733 //
1734 // Now capture all the information we need from the entry, since after we
1735 // release the lock, the data can change
1736 //
1738 *BigPages = Entry->NumberOfPages;
1739 PoolTag = Entry->Key;
1740
1741 //
1742 // Set the free bit, and decrement the number of allocations. Finally, release
1743 // the lock and return the tag that was located
1744 //
1746
1748
1749 /* If reaching 12.5% of the size (or whatever integer rounding gets us to),
1750 * halve the allocation size, which will get us to 25% of space used. */
1752 {
1753 /* Shrink the table. */
1754 ExpReallocateBigPageTable(OldIrql, TRUE);
1755 }
1756 else
1757 {
1759 }
1760 return PoolTag;
1761}
1762
1763VOID
1764NTAPI
1766 OUT PULONG NonPagedPoolPages,
1767 OUT PULONG PagedPoolAllocs,
1768 OUT PULONG PagedPoolFrees,
1769 OUT PULONG PagedPoolLookasideHits,
1770 OUT PULONG NonPagedPoolAllocs,
1771 OUT PULONG NonPagedPoolFrees,
1772 OUT PULONG NonPagedPoolLookasideHits)
1773{
1774 ULONG i;
1775 PPOOL_DESCRIPTOR PoolDesc;
1776
1777 //
1778 // Assume all failures
1779 //
1780 *PagedPoolPages = 0;
1781 *PagedPoolAllocs = 0;
1782 *PagedPoolFrees = 0;
1783
1784 //
1785 // Tally up the totals for all the apged pool
1786 //
1787 for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1788 {
1789 PoolDesc = ExpPagedPoolDescriptor[i];
1790 *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1791 *PagedPoolAllocs += PoolDesc->RunningAllocs;
1792 *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1793 }
1794
1795 //
1796 // The first non-paged pool has a hardcoded well-known descriptor name
1797 //
1798 PoolDesc = &NonPagedPoolDescriptor;
1799 *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1800 *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1801 *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1802
1803 //
1804 // If the system has more than one non-paged pool, copy the other descriptor
1805 // totals as well
1806 //
1807#if 0
1808 if (ExpNumberOfNonPagedPools > 1)
1809 {
1810 for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1811 {
1812 PoolDesc = ExpNonPagedPoolDescriptor[i];
1813 *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1814 *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1815 *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1816 }
1817 }
1818#endif
1819
1820 //
1821 // Get the amount of hits in the system lookaside lists
1822 //
1824 {
1825 PLIST_ENTRY ListEntry;
1826
1827 for (ListEntry = ExPoolLookasideListHead.Flink;
1828 ListEntry != &ExPoolLookasideListHead;
1829 ListEntry = ListEntry->Flink)
1830 {
1832
1833 Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry);
1834
1835 if (Lookaside->Type == NonPagedPool)
1836 {
1837 *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
1838 }
1839 else
1840 {
1841 *PagedPoolLookasideHits += Lookaside->AllocateHits;
1842 }
1843 }
1844 }
1845}
1846
1847VOID
1848NTAPI
1850{
1853 USHORT BlockSize;
1855
1858 {
1859 return;
1860 }
1861
1862 Entry = P;
1863 Entry--;
1865
1866 PoolType = Entry->PoolType - 1;
1867 BlockSize = Entry->BlockSize;
1868
1870 {
1871 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1872 ASSERT(Process != NULL);
1873 if (Process)
1874 {
1875 if (Process->Pcb.Header.Type != ProcessObject)
1876 {
1877 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1878 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1879 KeBugCheckEx(BAD_POOL_CALLER,
1881 (ULONG_PTR)P,
1882 Entry->PoolTag,
1884 }
1885 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1888 BlockSize * POOL_BLOCK_SIZE);
1890 }
1891 }
1892}
1893
1894/* PUBLIC FUNCTIONS ***********************************************************/
1895
1896/*
1897 * @implemented
1898 */
1899PVOID
1900NTAPI
1903 IN ULONG Tag)
1904{
1905 PPOOL_DESCRIPTOR PoolDesc;
1906 PLIST_ENTRY ListHead;
1907 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1908 KIRQL OldIrql;
1909 USHORT BlockSize, i;
1910 ULONG OriginalType;
1911 PKPRCB Prcb = KeGetCurrentPrcb();
1913
1914 //
1915 // Some sanity checks
1916 //
1917 ASSERT(Tag != 0);
1918 ASSERT(Tag != ' GIB');
1919 ASSERT(NumberOfBytes != 0);
1921
1922 //
1923 // Not supported in ReactOS
1924 //
1926
1927 //
1928 // Check if verifier or special pool is enabled
1929 //
1931 {
1932 //
1933 // For verifier, we should call the verification routine
1934 //
1936 {
1937 DPRINT1("Driver Verifier is not yet supported\n");
1938 }
1939
1940 //
1941 // For special pool, we check if this is a suitable allocation and do
1942 // the special allocation if needed
1943 //
1945 {
1946 //
1947 // Check if this is a special pool allocation
1948 //
1950 {
1951 //
1952 // Try to allocate using special pool
1953 //
1955 if (Entry) return Entry;
1956 }
1957 }
1958 }
1959
1960 //
1961 // Get the pool type and its corresponding vector for this request
1962 //
1963 OriginalType = PoolType;
1965 PoolDesc = PoolVector[PoolType];
1966 ASSERT(PoolDesc != NULL);
1967
1968 //
1969 // Check if this is a big page allocation
1970 //
1972 {
1973 //
1974 // Allocate pages for it
1975 //
1976 Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1977 if (!Entry)
1978 {
1979#if DBG
1980 //
1981 // Out of memory, display current consumption
1982 // Let's consider that if the caller wanted more
1983 // than a hundred pages, that's a bogus caller
1984 // and we are not out of memory. Dump at most
1985 // once a second to avoid spamming the log.
1986 //
1987 if (NumberOfBytes < 100 * PAGE_SIZE &&
1989 {
1990 MiDumpPoolConsumers(FALSE, 0, 0, 0);
1992 }
1993#endif
1994
1995 //
1996 // Must succeed pool is deprecated, but still supported. These allocation
1997 // failures must cause an immediate bugcheck
1998 //
1999 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2000 {
2001 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2005 0);
2006 }
2007
2008 //
2009 // Internal debugging
2010 //
2012
2013 //
2014 // This flag requests printing failures, and can also further specify
2015 // breaking on failures
2016 //
2018 {
2019 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2021 OriginalType);
2023 }
2024
2025 //
2026 // Finally, this flag requests an exception, which we are more than
2027 // happy to raise!
2028 //
2029 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2030 {
2032 }
2033
2034 return NULL;
2035 }
2036
2037 //
2038 // Increment required counters
2039 //
2044
2045 //
2046 // Add a tag for the big page allocation and switch to the generic "BIG"
2047 // tag if we failed to do so, then insert a tracker for this alloation.
2048 //
2050 Tag,
2052 OriginalType))
2053 {
2054 Tag = ' GIB';
2055 }
2057 return Entry;
2058 }
2059
2060 //
2061 // Should never request 0 bytes from the pool, but since so many drivers do
2062 // it, we'll just assume they want 1 byte, based on NT's similar behavior
2063 //
2064 if (!NumberOfBytes) NumberOfBytes = 1;
2065
2066 //
2067 // A pool allocation is defined by its data, a linked list to connect it to
2068 // the free list (if necessary), and a pool header to store accounting info.
2069 // Calculate this size, then convert it into a block size (units of pool
2070 // headers)
2071 //
2072 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
2073 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
2074 // the direct allocation of pages.
2075 //
2076 i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
2077 / POOL_BLOCK_SIZE);
2079
2080 //
2081 // Handle lookaside list optimization for both paged and nonpaged pool
2082 //
2084 {
2085 //
2086 // Try popping it from the per-CPU lookaside list
2087 //
2089 Prcb->PPPagedLookasideList[i - 1].P :
2090 Prcb->PPNPagedLookasideList[i - 1].P;
2091 LookasideList->TotalAllocates++;
2093 if (!Entry)
2094 {
2095 //
2096 // We failed, try popping it from the global list
2097 //
2099 Prcb->PPPagedLookasideList[i - 1].L :
2100 Prcb->PPNPagedLookasideList[i - 1].L;
2101 LookasideList->TotalAllocates++;
2103 }
2104
2105 //
2106 // If we were able to pop it, update the accounting and return the block
2107 //
2108 if (Entry)
2109 {
2110 LookasideList->AllocateHits++;
2111
2112 //
2113 // Get the real entry, write down its pool type, and track it
2114 //
2115 Entry--;
2116 Entry->PoolType = OriginalType + 1;
2118 Entry->BlockSize * POOL_BLOCK_SIZE,
2119 OriginalType);
2120
2121 //
2122 // Return the pool allocation
2123 //
2124 Entry->PoolTag = Tag;
2125 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2126 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2127 return POOL_FREE_BLOCK(Entry);
2128 }
2129 }
2130
2131 //
2132 // Loop in the free lists looking for a block if this size. Start with the
2133 // list optimized for this kind of size lookup
2134 //
2135 ListHead = &PoolDesc->ListHeads[i];
2136 do
2137 {
2138 //
2139 // Are there any free entries available on this list?
2140 //
2141 if (!ExpIsPoolListEmpty(ListHead))
2142 {
2143 //
2144 // Acquire the pool lock now
2145 //
2146 OldIrql = ExLockPool(PoolDesc);
2147
2148 //
2149 // And make sure the list still has entries
2150 //
2151 if (ExpIsPoolListEmpty(ListHead))
2152 {
2153 //
2154 // Someone raced us (and won) before we had a chance to acquire
2155 // the lock.
2156 //
2157 // Try again!
2158 //
2159 ExUnlockPool(PoolDesc, OldIrql);
2160 continue;
2161 }
2162
2163 //
2164 // Remove a free entry from the list
2165 // Note that due to the way we insert free blocks into multiple lists
2166 // there is a guarantee that any block on this list will either be
2167 // of the correct size, or perhaps larger.
2168 //
2169 ExpCheckPoolLinks(ListHead);
2171 ExpCheckPoolLinks(ListHead);
2173 ASSERT(Entry->BlockSize >= i);
2174 ASSERT(Entry->PoolType == 0);
2175
2176 //
2177 // Check if this block is larger that what we need. The block could
2178 // not possibly be smaller, due to the reason explained above (and
2179 // we would've asserted on a checked build if this was the case).
2180 //
2181 if (Entry->BlockSize != i)
2182 {
2183 //
2184 // Is there an entry before this one?
2185 //
2186 if (Entry->PreviousSize == 0)
2187 {
2188 //
2189 // There isn't anyone before us, so take the next block and
2190 // turn it into a fragment that contains the leftover data
2191 // that we don't need to satisfy the caller's request
2192 //
2193 FragmentEntry = POOL_BLOCK(Entry, i);
2194 FragmentEntry->BlockSize = Entry->BlockSize - i;
2195
2196 //
2197 // And make it point back to us
2198 //
2199 FragmentEntry->PreviousSize = i;
2200
2201 //
2202 // Now get the block that follows the new fragment and check
2203 // if it's still on the same page as us (and not at the end)
2204 //
2205 NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2206 if (PAGE_ALIGN(NextEntry) != NextEntry)
2207 {
2208 //
2209 // Adjust this next block to point to our newly created
2210 // fragment block
2211 //
2212 NextEntry->PreviousSize = FragmentEntry->BlockSize;
2213 }
2214 }
2215 else
2216 {
2217 //
2218 // There is a free entry before us, which we know is smaller
2219 // so we'll make this entry the fragment instead
2220 //
2221 FragmentEntry = Entry;
2222
2223 //
2224 // And then we'll remove from it the actual size required.
2225 // Now the entry is a leftover free fragment
2226 //
2227 Entry->BlockSize -= i;
2228
2229 //
2230 // Now let's go to the next entry after the fragment (which
2231 // used to point to our original free entry) and make it
2232 // reference the new fragment entry instead.
2233 //
2234 // This is the entry that will actually end up holding the
2235 // allocation!
2236 //
2238 Entry->PreviousSize = FragmentEntry->BlockSize;
2239
2240 //
2241 // And now let's go to the entry after that one and check if
2242 // it's still on the same page, and not at the end
2243 //
2244 NextEntry = POOL_BLOCK(Entry, i);
2245 if (PAGE_ALIGN(NextEntry) != NextEntry)
2246 {
2247 //
2248 // Make it reference the allocation entry
2249 //
2250 NextEntry->PreviousSize = i;
2251 }
2252 }
2253
2254 //
2255 // Now our (allocation) entry is the right size
2256 //
2257 Entry->BlockSize = i;
2258
2259 //
2260 // And the next entry is now the free fragment which contains
2261 // the remaining difference between how big the original entry
2262 // was, and the actual size the caller needs/requested.
2263 //
2264 FragmentEntry->PoolType = 0;
2265 BlockSize = FragmentEntry->BlockSize;
2266
2267 //
2268 // Now check if enough free bytes remained for us to have a
2269 // "full" entry, which contains enough bytes for a linked list
2270 // and thus can be used for allocations (up to 8 bytes...)
2271 //
2272 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2273 if (BlockSize != 1)
2274 {
2275 //
2276 // Insert the free entry into the free list for this size
2277 //
2278 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2279 POOL_FREE_BLOCK(FragmentEntry));
2280 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2281 }
2282 }
2283
2284 //
2285 // We have found an entry for this allocation, so set the pool type
2286 // and release the lock since we're done
2287 //
2288 Entry->PoolType = OriginalType + 1;
2290 ExUnlockPool(PoolDesc, OldIrql);
2291
2292 //
2293 // Increment required counters
2294 //
2297
2298 //
2299 // Track this allocation
2300 //
2302 Entry->BlockSize * POOL_BLOCK_SIZE,
2303 OriginalType);
2304
2305 //
2306 // Return the pool allocation
2307 //
2308 Entry->PoolTag = Tag;
2309 (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2310 (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2311 return POOL_FREE_BLOCK(Entry);
2312 }
2313 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2314
2315 //
2316 // There were no free entries left, so we have to allocate a new fresh page
2317 //
2318 Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2319 if (!Entry)
2320 {
2321#if DBG
2322 //
2323 // Out of memory, display current consumption
2324 // Let's consider that if the caller wanted more
2325 // than a hundred pages, that's a bogus caller
2326 // and we are not out of memory. Dump at most
2327 // once a second to avoid spamming the log.
2328 //
2329 if (NumberOfBytes < 100 * PAGE_SIZE &&
2331 {
2332 MiDumpPoolConsumers(FALSE, 0, 0, 0);
2334 }
2335#endif
2336
2337 //
2338 // Must succeed pool is deprecated, but still supported. These allocation
2339 // failures must cause an immediate bugcheck
2340 //
2341 if (OriginalType & MUST_SUCCEED_POOL_MASK)
2342 {
2343 KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2344 PAGE_SIZE,
2347 0);
2348 }
2349
2350 //
2351 // Internal debugging
2352 //
2354
2355 //
2356 // This flag requests printing failures, and can also further specify
2357 // breaking on failures
2358 //
2360 {
2361 DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2363 OriginalType);
2365 }
2366
2367 //
2368 // Finally, this flag requests an exception, which we are more than
2369 // happy to raise!
2370 //
2371 if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2372 {
2374 }
2375
2376 //
2377 // Return NULL to the caller in all other cases
2378 //
2379 return NULL;
2380 }
2381
2382 //
2383 // Setup the entry data
2384 //
2385 Entry->Ulong1 = 0;
2386 Entry->BlockSize = i;
2387 Entry->PoolType = OriginalType + 1;
2388
2389 //
2390 // This page will have two entries -- one for the allocation (which we just
2391 // created above), and one for the remaining free bytes, which we're about
2392 // to create now. The free bytes are the whole page minus what was allocated
2393 // and then converted into units of block headers.
2394 //
2395 BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2396 FragmentEntry = POOL_BLOCK(Entry, i);
2397 FragmentEntry->Ulong1 = 0;
2398 FragmentEntry->BlockSize = BlockSize;
2399 FragmentEntry->PreviousSize = i;
2400
2401 //
2402 // Increment required counters
2403 //
2406
2407 //
2408 // Now check if enough free bytes remained for us to have a "full" entry,
2409 // which contains enough bytes for a linked list and thus can be used for
2410 // allocations (up to 8 bytes...)
2411 //
2412 if (FragmentEntry->BlockSize != 1)
2413 {
2414 //
2415 // Excellent -- acquire the pool lock
2416 //
2417 OldIrql = ExLockPool(PoolDesc);
2418
2419 //
2420 // And insert the free entry into the free list for this block size
2421 //
2422 ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2423 ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2424 POOL_FREE_BLOCK(FragmentEntry));
2425 ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2426
2427 //
2428 // Release the pool lock
2429 //
2431 ExUnlockPool(PoolDesc, OldIrql);
2432 }
2433 else
2434 {
2435 //
2436 // Simply do a sanity check
2437 //
2439 }
2440
2441 //
2442 // Increment performance counters and track this allocation
2443 //
2446 Entry->BlockSize * POOL_BLOCK_SIZE,
2447 OriginalType);
2448
2449 //
2450 // And return the pool allocation
2451 //
2453 Entry->PoolTag = Tag;
2454 return POOL_FREE_BLOCK(Entry);
2455}
2456
2457/*
2458 * @implemented
2459 */
2460PVOID
2461NTAPI
2464{
2465 ULONG Tag = TAG_NONE;
2466#if 0 && DBG
2467 PLDR_DATA_TABLE_ENTRY LdrEntry;
2468
2469 /* Use the first four letters of the driver name, or "None" if unavailable */
2470 LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2472 : NULL;
2473 if (LdrEntry)
2474 {
2475 ULONG i;
2476 Tag = 0;
2477 for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2478 Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2479 for (; i < 4; i++)
2480 Tag = Tag >> 8 | ' ' << 24;
2481 }
2482#endif
2484}
2485
2486/*
2487 * @implemented
2488 */
2489VOID
2490NTAPI
2492 IN ULONG TagToFree)
2493{
2494 PPOOL_HEADER Entry, NextEntry;
2495 USHORT BlockSize;
2496 KIRQL OldIrql;
2498 PPOOL_DESCRIPTOR PoolDesc;
2499 ULONG Tag;
2500 BOOLEAN Combined = FALSE;
2501 PFN_NUMBER PageCount, RealPageCount;
2502 PKPRCB Prcb = KeGetCurrentPrcb();
2505
2506 //
2507 // Check if any of the debug flags are enabled
2508 //
2515 {
2516 //
2517 // Check if special pool is enabled
2518 //
2520 {
2521 //
2522 // Check if it was allocated from a special pool
2523 //
2525 {
2526 //
2527 // Was deadlock verification also enabled? We can do some extra
2528 // checks at this point
2529 //
2531 {
2532 DPRINT1("Verifier not yet supported\n");
2533 }
2534
2535 //
2536 // It is, so handle it via special pool free routine
2537 //
2539 return;
2540 }
2541 }
2542
2543 //
2544 // For non-big page allocations, we'll do a bunch of checks in here
2545 //
2546 if (PAGE_ALIGN(P) != P)
2547 {
2548 //
2549 // Get the entry for this pool allocation
2550 // The pointer math here may look wrong or confusing, but it is quite right
2551 //
2552 Entry = P;
2553 Entry--;
2554
2555 //
2556 // Get the pool type
2557 //
2558 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2559
2560 //
2561 // FIXME: Many other debugging checks go here
2562 //
2564 }
2565 }
2566
2567 //
2568 // Check if this is a big page allocation
2569 //
2570 if (PAGE_ALIGN(P) == P)
2571 {
2572 //
2573 // We need to find the tag for it, so first we need to find out what
2574 // kind of allocation this was (paged or nonpaged), then we can go
2575 // ahead and try finding the tag for it. Remember to get rid of the
2576 // PROTECTED_POOL tag if it's found.
2577 //
2578 // Note that if at insertion time, we failed to add the tag for a big
2579 // pool allocation, we used a special tag called 'BIG' to identify the
2580 // allocation, and we may get this tag back. In this scenario, we must
2581 // manually get the size of the allocation by actually counting through
2582 // the PFN database.
2583 //
2586 Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2587 if (!Tag)
2588 {
2589 DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2590 ASSERT(Tag == ' GIB');
2591 PageCount = 1; // We are going to lie! This might screw up accounting?
2592 }
2593 else if (Tag & PROTECTED_POOL)
2594 {
2595 Tag &= ~PROTECTED_POOL;
2596 }
2597
2598 //
2599 // Check block tag
2600 //
2601 if (TagToFree && TagToFree != Tag)
2602 {
2603 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2604#if DBG
2605 /* Do not bugcheck in case this is a big allocation for which we didn't manage to insert the tag */
2606 if (Tag != ' GIB')
2607 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2608#endif
2609 }
2610
2611 //
2612 // We have our tag and our page count, so we can go ahead and remove this
2613 // tracker now
2614 //
2616
2617 //
2618 // Check if any of the debug flags are enabled
2619 //
2624 {
2625 //
2626 // Was deadlock verification also enabled? We can do some extra
2627 // checks at this point
2628 //
2630 {
2631 DPRINT1("Verifier not yet supported\n");
2632 }
2633
2634 //
2635 // FIXME: Many debugging checks go here
2636 //
2637 }
2638
2639 //
2640 // Update counters
2641 //
2642 PoolDesc = PoolVector[PoolType];
2645 -(LONG_PTR)(PageCount << PAGE_SHIFT));
2646
2647 //
2648 // Do the real free now and update the last counter with the big page count
2649 //
2650 RealPageCount = MiFreePoolPages(P);
2651 ASSERT(RealPageCount == PageCount);
2653 -(LONG)RealPageCount);
2654 return;
2655 }
2656
2657 //
2658 // Get the entry for this pool allocation
2659 // The pointer math here may look wrong or confusing, but it is quite right
2660 //
2661 Entry = P;
2662 Entry--;
2664
2665 //
2666 // Get the size of the entry, and it's pool type, then load the descriptor
2667 // for this pool type
2668 //
2669 BlockSize = Entry->BlockSize;
2670 PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2671 PoolDesc = PoolVector[PoolType];
2672
2673 //
2674 // Make sure that the IRQL makes sense
2675 //
2677
2678 //
2679 // Get the pool tag and get rid of the PROTECTED_POOL flag
2680 //
2681 Tag = Entry->PoolTag;
2682 if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2683
2684 //
2685 // Check block tag
2686 //
2687 if (TagToFree && TagToFree != Tag)
2688 {
2689 DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2690#if DBG
2691 KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2692#endif
2693 }
2694
2695 //
2696 // Track the removal of this allocation
2697 //
2699 BlockSize * POOL_BLOCK_SIZE,
2700 Entry->PoolType - 1);
2701
2702 //
2703 // Release pool quota, if any
2704 //
2705 if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2706 {
2707 Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2708 if (Process)
2709 {
2710 if (Process->Pcb.Header.Type != ProcessObject)
2711 {
2712 DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2713 Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2714 KeBugCheckEx(BAD_POOL_CALLER,
2716 (ULONG_PTR)P,
2717 Tag,
2719 }
2722 }
2723 }
2724
2725 //
2726 // Is this allocation small enough to have come from a lookaside list?
2727 //
2728 if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2729 {
2730 //
2731 // Try pushing it into the per-CPU lookaside list
2732 //
2734 Prcb->PPPagedLookasideList[BlockSize - 1].P :
2735 Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2736 LookasideList->TotalFrees++;
2737 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2738 {
2739 LookasideList->FreeHits++;
2741 return;
2742 }
2743
2744 //
2745 // We failed, try to push it into the global lookaside list
2746 //
2748 Prcb->PPPagedLookasideList[BlockSize - 1].L :
2749 Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2750 LookasideList->TotalFrees++;
2751 if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2752 {
2753 LookasideList->FreeHits++;
2755 return;
2756 }
2757 }
2758
2759 //
2760 // Get the pointer to the next entry
2761 //
2762 NextEntry = POOL_BLOCK(Entry, BlockSize);
2763
2764 //
2765 // Update performance counters
2766 //
2768 InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2769
2770 //
2771 // Acquire the pool lock
2772 //
2773 OldIrql = ExLockPool(PoolDesc);
2774
2775 //
2776 // Check if the next allocation is at the end of the page
2777 //
2779 if (PAGE_ALIGN(NextEntry) != NextEntry)
2780 {
2781 //
2782 // We may be able to combine the block if it's free
2783 //
2784 if (NextEntry->PoolType == 0)
2785 {
2786 //
2787 // The next block is free, so we'll do a combine
2788 //
2789 Combined = TRUE;
2790
2791 //
2792 // Make sure there's actual data in the block -- anything smaller
2793 // than this means we only have the header, so there's no linked list
2794 // for us to remove
2795 //
2796 if ((NextEntry->BlockSize != 1))
2797 {
2798 //
2799 // The block is at least big enough to have a linked list, so go
2800 // ahead and remove it
2801 //
2806 }
2807
2808 //
2809 // Our entry is now combined with the next entry
2810 //
2811 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2812 }
2813 }
2814
2815 //
2816 // Now check if there was a previous entry on the same page as us
2817 //
2818 if (Entry->PreviousSize)
2819 {
2820 //
2821 // Great, grab that entry and check if it's free
2822 //
2823 NextEntry = POOL_PREV_BLOCK(Entry);
2824 if (NextEntry->PoolType == 0)
2825 {
2826 //
2827 // It is, so we can do a combine
2828 //
2829 Combined = TRUE;
2830
2831 //
2832 // Make sure there's actual data in the block -- anything smaller
2833 // than this means we only have the header so there's no linked list
2834 // for us to remove
2835 //
2836 if ((NextEntry->BlockSize != 1))
2837 {
2838 //
2839 // The block is at least big enough to have a linked list, so go
2840 // ahead and remove it
2841 //
2846 }
2847
2848 //
2849 // Combine our original block (which might've already been combined
2850 // with the next block), into the previous block
2851 //
2852 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2853
2854 //
2855 // And now we'll work with the previous block instead
2856 //
2857 Entry = NextEntry;
2858 }
2859 }
2860
2861 //
2862 // By now, it may have been possible for our combined blocks to actually
2863 // have made up a full page (if there were only 2-3 allocations on the
2864 // page, they could've all been combined).
2865 //
2866 if ((PAGE_ALIGN(Entry) == Entry) &&
2868 {
2869 //
2870 // In this case, release the pool lock, update the performance counter,
2871 // and free the page
2872 //
2873 ExUnlockPool(PoolDesc, OldIrql);
2874 InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2876 return;
2877 }
2878
2879 //
2880 // Otherwise, we now have a free block (or a combination of 2 or 3)
2881 //
2882 Entry->PoolType = 0;
2883 BlockSize = Entry->BlockSize;
2884 ASSERT(BlockSize != 1);
2885
2886 //
2887 // Check if we actually did combine it with anyone
2888 //
2889 if (Combined)
2890 {
2891 //
2892 // Get the first combined block (either our original to begin with, or
2893 // the one after the original, depending if we combined with the previous)
2894 //
2895 NextEntry = POOL_NEXT_BLOCK(Entry);
2896
2897 //
2898 // As long as the next block isn't on a page boundary, have it point
2899 // back to us
2900 //
2901 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2902 }
2903
2904 //
2905 // Insert this new free block, and release the pool lock
2906 //
2907 ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2909 ExUnlockPool(PoolDesc, OldIrql);
2910}
2911
2912/*
2913 * @implemented
2914 */
2915VOID
2916NTAPI
2918{
2919 //
2920 // Just free without checking for the tag
2921 //
2922 ExFreePoolWithTag(P, 0);
2923}
2924
2925/*
2926 * @unimplemented
2927 */
2928SIZE_T
2929NTAPI
2932{
2933 //
2934 // Not implemented
2935 //
2937 return FALSE;
2938}
2939
2940/*
2941 * @implemented
2942 */
2943
2944PVOID
2945NTAPI
2948{
2949 //
2950 // Allocate the pool
2951 //
2953}
2954
2955/*
2956 * @implemented
2957 */
2958PVOID
2959NTAPI
2962 IN ULONG Tag,
2964{
2965 PVOID Buffer;
2966
2967 //
2968 // Allocate the pool
2969 //
2971 if (Buffer == NULL)
2972 {
2974 }
2975
2976 return Buffer;
2977}
2978
2979/*
2980 * @implemented
2981 */
2982PVOID
2983NTAPI
2986 IN ULONG Tag)
2987{
2988 BOOLEAN Raise = TRUE;
2989 PVOID Buffer;
2993
2994 //
2995 // Check if we should fail instead of raising an exception
2996 //
2998 {
2999 Raise = FALSE;
3000 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
3001 }
3002
3003 //
3004 // Inject the pool quota mask
3005 //
3007
3008 //
3009 // Check if we have enough space to add the quota owner process, as long as
3010 // this isn't the system process, which never gets charged quota
3011 //
3012 ASSERT(NumberOfBytes != 0);
3013 if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
3015 {
3016 //
3017 // Add space for our EPROCESS pointer
3018 //
3019 NumberOfBytes += sizeof(PEPROCESS);
3020 }
3021 else
3022 {
3023 //
3024 // We won't be able to store the pointer, so don't use quota for this
3025 //
3027 }
3028
3029 //
3030 // Allocate the pool buffer now
3031 //
3033
3034 //
3035 // If the buffer is page-aligned, this is a large page allocation and we
3036 // won't touch it
3037 //
3038 if (PAGE_ALIGN(Buffer) != Buffer)
3039 {
3040 //
3041 // Also if special pool is enabled, and this was allocated from there,
3042 // we won't touch it either
3043 //
3046 {
3047 return Buffer;
3048 }
3049
3050 //
3051 // If it wasn't actually allocated with quota charges, ignore it too
3052 //
3053 if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
3054
3055 //
3056 // If this is the system process, we don't charge quota, so ignore
3057 //
3058 if (Process == PsInitialSystemProcess) return Buffer;
3059
3060 //
3061 // Actually go and charge quota for the process now
3062 //
3066 Entry->BlockSize * POOL_BLOCK_SIZE);
3067 if (!NT_SUCCESS(Status))
3068 {
3069 //
3070 // Quota failed, back out the allocation, clear the owner, and fail
3071 //
3072 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
3074 if (Raise) RtlRaiseStatus(Status);
3075 return NULL;
3076 }
3077
3078 //
3079 // Quota worked, write the owner and then reference it before returning
3080 //
3081 ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
3083 }
3084 else if (!(Buffer) && (Raise))
3085 {
3086 //
3087 // The allocation failed, raise an error if we are in raise mode
3088 //
3090 }
3091
3092 //
3093 // Return the allocated buffer
3094 //
3095 return Buffer;
3096}
3097
3098/* EOF */
#define BASE_POOL_TYPE_MASK
Definition: ExPools.c:15
#define QUOTA_POOL_MASK
Definition: ExPools.c:16
#define ALIGN_DOWN_BY(size, align)
#define ALIGN_UP_BY(size, align)
unsigned char BOOLEAN
#define InterlockedIncrement
Definition: armddk.h:53
LONG NTSTATUS
Definition: precomp.h:26
#define DPRINT1
Definition: precomp.h:8
BOOL Verbose
Definition: chkdsk.c:72
LONG_PTR SSIZE_T
Definition: basetsd.h:183
#define MAXULONG_PTR
Definition: basetsd.h:103
#define UNIMPLEMENTED
Definition: debug.h:115
Definition: bufpool.h:45
_In_ PSCSI_REQUEST_BLOCK _Out_ NTSTATUS _Inout_ BOOLEAN * Retry
Definition: classpnp.h:312
#define NULL
Definition: types.h:112
#define TRUE
Definition: types.h:120
#define FALSE
Definition: types.h:117
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:32
#define P(row, col)
static int Hash(const char *)
Definition: reader.c:2257
VOID NTAPI KeGenericCallDpc(IN PKDEFERRED_ROUTINE Routine, IN PVOID Context)
Definition: dpc.c:984
BOOLEAN NTAPI KeSignalCallDpcSynchronize(IN PVOID SystemArgument2)
Definition: dpc.c:1026
VOID NTAPI KeSignalCallDpcDone(IN PVOID SystemArgument1)
Definition: dpc.c:1013
#define ULONG_PTR
Definition: config.h:101
#define _IRQL_restores_
Definition: driverspecs.h:233
#define _IRQL_requires_(irql)
Definition: driverspecs.h:229
IN CINT OUT PVOID IN ULONG OUT PULONG ReturnLength
Definition: dumpinfo.c:43
#define ExAllocatePoolWithTag(hernya, size, tag)
Definition: env_spec_w32.h:350
#define IsListEmpty(ListHead)
Definition: env_spec_w32.h:954
#define PASSIVE_LEVEL
Definition: env_spec_w32.h:693
UCHAR KIRQL
Definition: env_spec_w32.h:591
ULONG KSPIN_LOCK
Definition: env_spec_w32.h:72
#define KeReleaseSpinLock(sl, irql)
Definition: env_spec_w32.h:627
#define APC_LEVEL
Definition: env_spec_w32.h:695
#define PAGE_SIZE
Definition: env_spec_w32.h:49
#define PAGE_SHIFT
Definition: env_spec_w32.h:45
#define KeAcquireSpinLock(sl, irql)
Definition: env_spec_w32.h:609
#define KeGetCurrentIrql()
Definition: env_spec_w32.h:706
#define ExFreePool(addr)
Definition: env_spec_w32.h:352
#define NonPagedPool
Definition: env_spec_w32.h:307
#define DISPATCH_LEVEL
Definition: env_spec_w32.h:696
#define KeInitializeSpinLock(sl)
Definition: env_spec_w32.h:604
#define PagedPool
Definition: env_spec_w32.h:308
ULONG ExPoolFailures
Definition: expool.c:57
ULONG ExpPoolBigEntriesInUse
Definition: expool.c:55
#define POOL_BLOCK(x, i)
Definition: expool.c:63
VOID NTAPI ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
Definition: expool.c:114
VOID NTAPI ExpCheckPoolAllocation(PVOID P, POOL_TYPE PoolType, ULONG Tag)
Definition: expool.c:296
ULONG ExpBigTableExpansionFailed
Definition: expool.c:48
#define POOL_FREE_BLOCK(x)
Definition: expool.c:62
VOID NTAPI ExpCheckPoolBlocks(IN PVOID Block)
Definition: expool.c:376
FORCEINLINE ULONG ExpComputeHashForTag(IN ULONG Tag, IN SIZE_T BucketMask)
Definition: expool.c:441
BOOLEAN ExStopBadTags
Definition: expool.c:53
VOID NTAPI ExQueryPoolUsage(OUT PULONG PagedPoolPages, OUT PULONG NonPagedPoolPages, OUT PULONG PagedPoolAllocs, OUT PULONG PagedPoolFrees, OUT PULONG PagedPoolLookasideHits, OUT PULONG NonPagedPoolAllocs, OUT PULONG NonPagedPoolFrees, OUT PULONG NonPagedPoolLookasideHits)
Definition: expool.c:1765
#define POOL_BIG_TABLE_USE_RATE
Definition: expool.c:31
VOID NTAPI ExpRemovePoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:756
VOID NTAPI ExReturnPoolQuota(IN PVOID P)
Definition: expool.c:1849
VOID NTAPI ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, IN POOL_TYPE PoolType, IN ULONG PoolIndex, IN ULONG Threshold, IN PVOID PoolLock)
Definition: expool.c:966
struct _POOL_DPC_CONTEXT POOL_DPC_CONTEXT
PPOOL_DESCRIPTOR PoolVector[2]
Definition: expool.c:44
#define POOL_PREV_BLOCK(x)
Definition: expool.c:65
NTSTATUS NTAPI ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, IN ULONG SystemInformationLength, IN OUT PULONG ReturnLength OPTIONAL)
Definition: expool.c:1353
SIZE_T PoolTrackTableMask
Definition: expool.c:46
VOID NTAPI InitializePool(IN POOL_TYPE PoolType, IN ULONG Threshold)
Definition: expool.c:1017
VOID NTAPI ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:163
VOID NTAPI ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
Definition: expool.c:193
SIZE_T PoolBigPageTableSize
Definition: expool.c:47
struct _POOL_DPC_CONTEXT * PPOOL_DPC_CONTEXT
FORCEINLINE ULONG ExpComputePartialHashForAddress(IN PVOID BaseAddress)
Definition: expool.c:457
VOID NTAPI ExpGetPoolTagInfoTarget(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
Definition: expool.c:1318
ULONG ExpNumberOfPagedPools
Definition: expool.c:41
VOID NTAPI ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
Definition: expool.c:99
SIZE_T NTAPI ExQueryPoolBlockSize(IN PVOID PoolBlock, OUT PBOOLEAN QuotaCharged)
Definition: expool.c:2930
PPOOL_TRACKER_BIG_PAGES PoolBigPageTable
Definition: expool.c:50
PKGUARDED_MUTEX ExpPagedPoolMutex
Definition: expool.c:45
FORCEINLINE VOID ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN PVOID Entry)
Definition: expool.c:416
#define POOL_ENTRY(x)
Definition: expool.c:61
PLIST_ENTRY NTAPI ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
Definition: expool.c:139
PVOID NTAPI ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag, IN EX_POOL_PRIORITY Priority)
Definition: expool.c:2960
PLIST_ENTRY NTAPI ExpDecodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:85
KSPIN_LOCK ExpTaggedPoolLock
Definition: expool.c:51
VOID NTAPI ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
Definition: expool.c:128
VOID NTAPI ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:178
FORCEINLINE VOID ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, IN KIRQL OldIrql)
Definition: expool.c:1294
#define POOL_NEXT_BLOCK(x)
Definition: expool.c:64
POOL_DESCRIPTOR NonPagedPoolDescriptor
Definition: expool.c:42
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:49
PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16+1]
Definition: expool.c:43
BOOLEAN NTAPI ExpAddTagForBigPages(IN PVOID Va, IN ULONG Key, IN ULONG NumberOfPages, IN POOL_TYPE PoolType)
Definition: expool.c:1578
#define POOL_BIG_TABLE_ENTRY_FREE
Definition: expool.c:23
ULONG PoolHitTag
Definition: expool.c:52
FORCEINLINE KIRQL ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
Definition: expool.c:1270
VOID NTAPI ExpSeedHotTags(VOID)
Definition: expool.c:636
SIZE_T PoolTrackTableSize
Definition: expool.c:46
ULONG ExpPoolFlags
Definition: expool.c:56
KSPIN_LOCK ExpLargePoolTableLock
Definition: expool.c:54
PVOID NTAPI ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes)
Definition: expool.c:2946
PLIST_ENTRY NTAPI ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
Definition: expool.c:151
VOID NTAPI ExpInsertPoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:847
ULONGLONG MiLastPoolDumpTime
Definition: expool.c:58
ULONG NTAPI ExpFindAndRemoveTagBigPages(IN PVOID Va, OUT PULONG_PTR BigPages, IN POOL_TYPE PoolType)
Definition: expool.c:1679
SIZE_T PoolBigPageTableHash
Definition: expool.c:47
PLIST_ENTRY NTAPI ExpEncodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:92
BOOLEAN NTAPI ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
Definition: expool.c:121
#define BooleanFlagOn(F, SF)
Definition: ext2fs.h:183
#define ExAllocatePool(type, size)
Definition: fbtusb.h:44
_Must_inspect_result_ _In_ LPCGUID _In_ ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _Inout_ PVOID LookasideList
Definition: fltkernel.h:2554
_Must_inspect_result_ _In_ USHORT NewSize
Definition: fltkernel.h:975
unsigned int Mask
Definition: fpcontrol.c:82
_Must_inspect_result_ _In_ PLARGE_INTEGER _In_ PLARGE_INTEGER _In_ ULONG _In_ PFILE_OBJECT _In_ PVOID Process
Definition: fsrtlfuncs.h:223
Status
Definition: gdiplustypes.h:25
ASMGENDATA Table[]
Definition: genincdata.c:61
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
VOID FASTCALL KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:31
VOID FASTCALL KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:53
VOID FASTCALL KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:42
VOID FASTCALL KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber, IN KIRQL OldIrql)
Definition: spinlock.c:154
KIRQL FASTCALL KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
Definition: spinlock.c:108
NTSYSAPI void WINAPI DbgBreakPoint(void)
#define InterlockedExchangeAdd
Definition: interlocked.h:181
#define InterlockedCompareExchangePointer
Definition: interlocked.h:129
#define InterlockedExchangeAddSizeT(a, b)
Definition: interlocked.h:196
#define _ReturnAddress()
Definition: intrin_arm.h:35
static CODE_SEG("PAGE")
Definition: isapnp.c:1482
VOID KdbpPrint(_In_ PSTR Format, _In_ ...)
Prints the given string with printf-like formatting.
Definition: kdb_cli.c:3224
struct _POOL_HEADER * PPOOL_HEADER
if(dx< 0)
Definition: linetemp.h:194
LIST_ENTRY ExPoolLookasideListHead
Definition: lookas.c:22
struct _POOL_TRACKER_TABLE POOL_TRACKER_TABLE
#define POOL_FREE_IRQL_INVALID
Definition: miarm.h:305
#define POOL_LISTS_PER_PAGE
Definition: miarm.h:271
#define POOL_BILLED_PROCESS_INVALID
Definition: miarm.h:306
#define POOL_FLAG_CHECK_WORKERS
Definition: miarm.h:279
#define POOL_BLOCK_SIZE
Definition: miarm.h:269
#define POOL_FLAG_DBGPRINT_ON_FAILURE
Definition: miarm.h:284
#define POOL_FLAG_CRASH_ON_FAILURE
Definition: miarm.h:285
#define POOL_FLAG_CHECK_RESOURCES
Definition: miarm.h:280
#define POOL_FLAG_CHECK_TIMERS
Definition: miarm.h:278
#define POOL_FLAG_SPECIAL_POOL
Definition: miarm.h:283
#define POOL_MAX_ALLOC
Definition: miarm.h:273
#define POOL_FLAG_VERIFIER
Definition: miarm.h:281
#define POOL_FLAG_CHECK_DEADLOCK
Definition: miarm.h:282
#define POOL_ALLOC_IRQL_INVALID
Definition: miarm.h:304
PLDR_DATA_TABLE_ENTRY NTAPI MiLookupDataTableEntry(IN PVOID Address)
Definition: sysldr.c:3464
#define ASSERT(a)
Definition: mode.c:44
#define ExFreePoolWithTag(_P, _T)
Definition: module.h:1099
#define min(a, b)
Definition: monoChain.cc:55
#define _In_
Definition: ms_sal.h:308
__int3264 LONG_PTR
Definition: mstsclib_h.h:276
#define NUMBER_POOL_LOOKASIDE_LISTS
Definition: ketypes.h:291
FORCEINLINE struct _KPRCB * KeGetCurrentPrcb(VOID)
Definition: ketypes.h:1080
@ ProcessObject
Definition: ketypes.h:442
_In_ HANDLE _Outptr_result_bytebuffer_ ViewSize PVOID * BaseAddress
Definition: mmfuncs.h:404
DECLSPEC_NORETURN NTSYSAPI VOID NTAPI RtlRaiseStatus(_In_ NTSTATUS Status)
struct _EPROCESS * PEPROCESS
Definition: nt_native.h:30
#define DBG_UNREFERENCED_LOCAL_VARIABLE(L)
Definition: ntbasedef.h:319
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:317
@ FirstTry
Definition: copy.c:25
UCHAR KeNumberNodes
Definition: krnlinit.c:40
POOL_TYPE NTAPI MmDeterminePoolType(IN PVOID VirtualAddress)
Definition: pool.c:408
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes)
Definition: pool.c:422
#define MUST_SUCCEED_POOL_MASK
Definition: mm.h:119
#define SESSION_POOL_MASK
Definition: mm.h:122
BOOLEAN NTAPI MmIsSpecialPoolAddress(IN PVOID P)
BOOLEAN NTAPI MmUseSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag)
PVOID NTAPI MmAllocateSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag, IN POOL_TYPE PoolType, IN ULONG SpecialType)
VOID NTAPI MmFreeSpecialPool(IN PVOID P)
ULONG NTAPI MiFreePoolPages(IN PVOID StartingAddress)
Definition: pool.c:918
#define ExRaiseStatus
Definition: ntoskrnl.h:108
ULONG MmSizeOfNonPagedPoolInBytes
Definition: init.c:21
VOID MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
long LONG
Definition: pedump.c:60
unsigned short USHORT
Definition: pedump.c:61
PEPROCESS PsInitialSystemProcess
Definition: psmgr.c:50
NTSTATUS NTAPI PsChargeProcessPoolQuota(_In_ PEPROCESS Process, _In_ POOL_TYPE PoolType, _In_ SIZE_T Amount)
Charges the process' quota pool. The type of quota to be charged depends upon the PoolType parameter.
Definition: quota.c:872
VOID NTAPI PsReturnPoolQuota(_In_ PEPROCESS Process, _In_ POOL_TYPE PoolType, _In_ SIZE_T Amount)
Returns the pool quota that the process was taking up.
Definition: quota.c:907
VOID NTAPI KeBugCheckEx(_In_ ULONG BugCheckCode, _In_ ULONG_PTR BugCheckParameter1, _In_ ULONG_PTR BugCheckParameter2, _In_ ULONG_PTR BugCheckParameter3, _In_ ULONG_PTR BugCheckParameter4)
Definition: rtlcompat.c:108
#define KeQueryInterruptTime()
Definition: ke.h:37
FORCEINLINE ULONG KeGetCurrentProcessorNumber(VOID)
Definition: ke.h:337
ULONG PFN_NUMBER
Definition: ke.h:9
#define STATUS_SUCCESS
Definition: shellext.h:65
#define DPRINT
Definition: sndvol32.h:71
PULONG MinorVersion OPTIONAL
Definition: CrossNt.h:68
base of all file and directory entries
Definition: entries.h:83
Definition: ketypes.h:687
GENERAL_LOOKASIDE_POOL PPNPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:630
GENERAL_LOOKASIDE_POOL PPPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:631
Definition: btrfs_drv.h:1876
UNICODE_STRING BaseDllName
Definition: ldrtypes.h:145
Definition: typedefs.h:120
struct _LIST_ENTRY * Blink
Definition: typedefs.h:122
struct _LIST_ENTRY * Flink
Definition: typedefs.h:121
LIST_ENTRY ListHeads[POOL_LISTS_PER_PAGE]
Definition: miarm.h:323
ULONG RunningAllocs
Definition: miarm.h:313
ULONG TotalPages
Definition: miarm.h:315
SIZE_T TotalBytes
Definition: miarm.h:321
ULONG RunningDeAllocs
Definition: miarm.h:314
ULONG TotalBigPages
Definition: miarm.h:316
SIZE_T PoolTrackTableSize
Definition: expool.c:36
PPOOL_TRACKER_TABLE PoolTrackTableExpansion
Definition: expool.c:37
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:35
SIZE_T PoolTrackTableSizeExpansion
Definition: expool.c:38
USHORT PreviousSize
LONG NonPagedAllocs
Definition: miarm.h:370
SIZE_T NonPagedBytes
Definition: miarm.h:372
LONG NonPagedFrees
Definition: miarm.h:371
SIZE_T PagedBytes
Definition: miarm.h:375
SIZE_T PagedUsed
Definition: extypes.h:1135
ULONG TagUlong
Definition: extypes.h:1131
ULONG PagedFrees
Definition: extypes.h:1134
ULONG PagedAllocs
Definition: extypes.h:1133
ULONG NonPagedAllocs
Definition: extypes.h:1136
SIZE_T NonPagedUsed
Definition: extypes.h:1138
ULONG NonPagedFrees
Definition: extypes.h:1137
#define max(a, b)
Definition: svc.c:63
#define TAG_NONE
Definition: tag.h:110
uint32_t * PULONG_PTR
Definition: typedefs.h:65
uint32_t * PULONG
Definition: typedefs.h:59
#define FIELD_OFFSET(t, f)
Definition: typedefs.h:255
unsigned char * PBOOLEAN
Definition: typedefs.h:53
INT POOL_TYPE
Definition: typedefs.h:78
#define NTAPI
Definition: typedefs.h:36
void * PVOID
Definition: typedefs.h:50
ULONG_PTR SIZE_T
Definition: typedefs.h:80
#define RtlCopyMemory(Destination, Source, Length)
Definition: typedefs.h:263
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
uint32_t ULONG_PTR
Definition: typedefs.h:65
#define IN
Definition: typedefs.h:39
int32_t * PLONG
Definition: typedefs.h:58
#define CONTAINING_RECORD(address, type, field)
Definition: typedefs.h:260
uint32_t ULONG
Definition: typedefs.h:59
uint64_t ULONGLONG
Definition: typedefs.h:67
#define OUT
Definition: typedefs.h:40
#define STATUS_INFO_LENGTH_MISMATCH
Definition: udferr_usr.h:133
#define STATUS_INSUFFICIENT_RESOURCES
Definition: udferr_usr.h:158
static int Link(const char **args)
Definition: vfdcmd.c:2414
_In_ WDFCOLLECTION _In_ ULONG Index
_Must_inspect_result_ _In_ WDFDEVICE _In_ BOOLEAN _In_opt_ PVOID Tag
Definition: wdfdevice.h:4065
_Must_inspect_result_ _In_ WDFDEVICE _In_ DEVICE_REGISTRY_PROPERTY _In_ _Strict_type_match_ POOL_TYPE PoolType
Definition: wdfdevice.h:3815
_Must_inspect_result_ _In_ WDFDEVICE _In_ PWDF_DEVICE_PROPERTY_DATA _In_ DEVPROPTYPE _In_ ULONG Size
Definition: wdfdevice.h:4533
_Must_inspect_result_ _In_ PWDF_DPC_CONFIG _In_ PWDF_OBJECT_ATTRIBUTES _Out_ WDFDPC * Dpc
Definition: wdfdpc.h:112
_In_ WDFINTERRUPT _In_ WDF_INTERRUPT_POLICY _In_ WDF_INTERRUPT_PRIORITY Priority
Definition: wdfinterrupt.h:655
_Must_inspect_result_ _In_opt_ PWDF_OBJECT_ATTRIBUTES _In_ _In_ _Strict_type_match_ POOL_TYPE _In_opt_ PWDF_OBJECT_ATTRIBUTES _In_opt_ ULONG _Out_ WDFLOOKASIDE * Lookaside
Definition: wdfmemory.h:414
_Must_inspect_result_ _In_opt_ PWDF_OBJECT_ATTRIBUTES _In_ _Strict_type_match_ POOL_TYPE _In_opt_ ULONG PoolTag
Definition: wdfmemory.h:164
_Must_inspect_result_ _In_ WDFIORESLIST _In_ PIO_RESOURCE_DESCRIPTOR Descriptor
Definition: wdfresource.h:342
#define FORCEINLINE
Definition: wdftypes.h:67
_At_(*)(_In_ PWSK_CLIENT Client, _In_opt_ PUNICODE_STRING NodeName, _In_opt_ PUNICODE_STRING ServiceName, _In_opt_ ULONG NameSpace, _In_opt_ GUID *Provider, _In_opt_ PADDRINFOEXW Hints, _Outptr_ PADDRINFOEXW *Result, _In_opt_ PEPROCESS OwningProcess, _In_opt_ PETHREAD OwningThread, _Inout_ PIRP Irp Result)(Mem)) NTSTATUS(WSKAPI *PFN_WSK_GET_ADDRESS_INFO
Definition: wsk.h:426
_Must_inspect_result_ _In_ ULONG Flags
Definition: wsk.h:170
FORCEINLINE USHORT ExQueryDepthSList(_In_ PSLIST_HEADER SListHead)
Definition: exfuncs.h:153
#define ExReleaseSpinLock(Lock, OldIrql)
#define ExAllocatePoolWithQuotaTag(a, b, c)
Definition: exfuncs.h:530
#define ExAcquireSpinLock(Lock, OldIrql)
_Out_ PBOOLEAN QuotaCharged
Definition: exfuncs.h:1153
#define PROTECTED_POOL
Definition: extypes.h:340
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE GENERAL_LOOKASIDE
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE * PGENERAL_LOOKASIDE
enum _EX_POOL_PRIORITY EX_POOL_PRIORITY
_Must_inspect_result_ typedef _Out_ PULONG TableSize
Definition: iotypes.h:4327
#define POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
_Must_inspect_result_ typedef _In_ ULONG TableEntry
Definition: iotypes.h:4303
#define POOL_RAISE_IF_ALLOCATION_FAILURE
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _Inout_ PLARGE_INTEGER NumberOfBytes
Definition: iotypes.h:1036
_Requires_lock_held_ Interrupt _Releases_lock_ Interrupt _In_ _IRQL_restores_ KIRQL OldIrql
Definition: kefuncs.h:792
@ PagedPoolSession
Definition: ketypes.h:881
struct _KGUARDED_MUTEX * PKGUARDED_MUTEX
_In_opt_ PVOID _In_opt_ PVOID SystemArgument1
Definition: ketypes.h:676
_In_opt_ PVOID DeferredContext
Definition: ketypes.h:675
@ LockQueueNonPagedPoolLock
Definition: ketypes.h:652
_In_opt_ PVOID _In_opt_ PVOID _In_opt_ PVOID SystemArgument2
Definition: ketypes.h:677
#define ROUND_TO_PAGES(Size)
#define BYTES_TO_PAGES(Size)
#define PAGE_ALIGN(Va)
#define ObDereferenceObject
Definition: obfuncs.h:203
#define ObReferenceObject
Definition: obfuncs.h:204
#define PsGetCurrentProcess
Definition: psfuncs.h:17
#define NT_VERIFY(exp)
Definition: rtlfuncs.h:3287
#define InterlockedPushEntrySList(SListHead, SListEntry)
Definition: rtlfuncs.h:3389
#define InterlockedPopEntrySList(SListHead)
Definition: rtlfuncs.h:3392
__wchar_t WCHAR
Definition: xmlstorage.h:180
char CHAR
Definition: xmlstorage.h:175