ReactOS  0.4.14-dev-552-g2fad488
expool.c
Go to the documentation of this file.
1 /*
2  * PROJECT: ReactOS Kernel
3  * LICENSE: BSD - See COPYING.ARM in the top level directory
4  * FILE: ntoskrnl/mm/ARM3/expool.c
5  * PURPOSE: ARM Memory Manager Executive Pool Manager
6  * PROGRAMMERS: ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20 
21 /* GLOBALS ********************************************************************/
22 
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24 
25 typedef struct _POOL_DPC_CONTEXT
26 {
32 
51 
52 /* Pool block/header/list access macros */
53 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
54 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
55 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
56 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
57 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
58 
59 /*
60  * Pool list access debug macros, similar to Arthur's pfnlist.c work.
61  * Microsoft actually implements similar checks in the Windows Server 2003 SP1
62  * pool code, but only for checked builds.
63  *
64  * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
65  * that these checks are done even on retail builds, due to the increasing
66  * number of kernel-mode attacks which depend on dangling list pointers and other
67  * kinds of list-based attacks.
68  *
69  * For now, I will leave these checks on all the time, but later they are likely
70  * to be DBG-only, at least until there are enough kernel-mode security attacks
71  * against ReactOS to warrant the performance hit.
72  *
73  * For now, these are not made inline, so we can get good stack traces.
74  */
76 NTAPI
78 {
79  return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
80 }
81 
83 NTAPI
85 {
86  return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
87 }
88 
89 VOID
90 NTAPI
92 {
93  if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
94  (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
95  {
96  KeBugCheckEx(BAD_POOL_HEADER,
97  3,
98  (ULONG_PTR)ListHead,
100  (ULONG_PTR)ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink));
101  }
102 }
103 
104 VOID
105 NTAPI
107 {
108  ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
109 }
110 
111 BOOLEAN
112 NTAPI
114 {
115  return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
116 }
117 
118 VOID
119 NTAPI
121 {
122  PLIST_ENTRY Blink, Flink;
123  Flink = ExpDecodePoolLink(Entry->Flink);
124  Blink = ExpDecodePoolLink(Entry->Blink);
125  Flink->Blink = ExpEncodePoolLink(Blink);
126  Blink->Flink = ExpEncodePoolLink(Flink);
127 }
128 
130 NTAPI
132 {
133  PLIST_ENTRY Entry, Flink;
134  Entry = ExpDecodePoolLink(ListHead->Flink);
135  Flink = ExpDecodePoolLink(Entry->Flink);
136  ListHead->Flink = ExpEncodePoolLink(Flink);
137  Flink->Blink = ExpEncodePoolLink(ListHead);
138  return Entry;
139 }
140 
142 NTAPI
144 {
145  PLIST_ENTRY Entry, Blink;
146  Entry = ExpDecodePoolLink(ListHead->Blink);
147  Blink = ExpDecodePoolLink(Entry->Blink);
148  ListHead->Blink = ExpEncodePoolLink(Blink);
149  Blink->Flink = ExpEncodePoolLink(ListHead);
150  return Entry;
151 }
152 
153 VOID
154 NTAPI
157 {
158  PLIST_ENTRY Blink;
159  ExpCheckPoolLinks(ListHead);
160  Blink = ExpDecodePoolLink(ListHead->Blink);
161  Entry->Flink = ExpEncodePoolLink(ListHead);
162  Entry->Blink = ExpEncodePoolLink(Blink);
163  Blink->Flink = ExpEncodePoolLink(Entry);
164  ListHead->Blink = ExpEncodePoolLink(Entry);
165  ExpCheckPoolLinks(ListHead);
166 }
167 
168 VOID
169 NTAPI
172 {
173  PLIST_ENTRY Flink;
174  ExpCheckPoolLinks(ListHead);
175  Flink = ExpDecodePoolLink(ListHead->Flink);
176  Entry->Flink = ExpEncodePoolLink(Flink);
177  Entry->Blink = ExpEncodePoolLink(ListHead);
178  Flink->Blink = ExpEncodePoolLink(Entry);
179  ListHead->Flink = ExpEncodePoolLink(Entry);
180  ExpCheckPoolLinks(ListHead);
181 }
182 
183 VOID
184 NTAPI
186 {
187  PPOOL_HEADER PreviousEntry, NextEntry;
188 
189  /* Is there a block before this one? */
190  if (Entry->PreviousSize)
191  {
192  /* Get it */
193  PreviousEntry = POOL_PREV_BLOCK(Entry);
194 
195  /* The two blocks must be on the same page! */
196  if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
197  {
198  /* Something is awry */
199  KeBugCheckEx(BAD_POOL_HEADER,
200  6,
201  (ULONG_PTR)PreviousEntry,
202  __LINE__,
203  (ULONG_PTR)Entry);
204  }
205 
206  /* This block should also indicate that it's as large as we think it is */
207  if (PreviousEntry->BlockSize != Entry->PreviousSize)
208  {
209  /* Otherwise, someone corrupted one of the sizes */
210  DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
211  PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
212  Entry->PreviousSize, (char *)&Entry->PoolTag);
213  KeBugCheckEx(BAD_POOL_HEADER,
214  5,
215  (ULONG_PTR)PreviousEntry,
216  __LINE__,
217  (ULONG_PTR)Entry);
218  }
219  }
220  else if (PAGE_ALIGN(Entry) != Entry)
221  {
222  /* If there's no block before us, we are the first block, so we should be on a page boundary */
223  KeBugCheckEx(BAD_POOL_HEADER,
224  7,
225  0,
226  __LINE__,
227  (ULONG_PTR)Entry);
228  }
229 
230  /* This block must have a size */
231  if (!Entry->BlockSize)
232  {
233  /* Someone must've corrupted this field */
234  if (Entry->PreviousSize)
235  {
236  PreviousEntry = POOL_PREV_BLOCK(Entry);
237  DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
238  (char *)&PreviousEntry->PoolTag,
239  (char *)&Entry->PoolTag);
240  }
241  else
242  {
243  DPRINT1("Entry tag %.4s\n",
244  (char *)&Entry->PoolTag);
245  }
246  KeBugCheckEx(BAD_POOL_HEADER,
247  8,
248  0,
249  __LINE__,
250  (ULONG_PTR)Entry);
251  }
252 
253  /* Okay, now get the next block */
254  NextEntry = POOL_NEXT_BLOCK(Entry);
255 
256  /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
257  if (PAGE_ALIGN(NextEntry) != NextEntry)
258  {
259  /* The two blocks must be on the same page! */
260  if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
261  {
262  /* Something is messed up */
263  KeBugCheckEx(BAD_POOL_HEADER,
264  9,
265  (ULONG_PTR)NextEntry,
266  __LINE__,
267  (ULONG_PTR)Entry);
268  }
269 
270  /* And this block should think we are as large as we truly are */
271  if (NextEntry->PreviousSize != Entry->BlockSize)
272  {
273  /* Otherwise, someone corrupted the field */
274  DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
275  Entry->BlockSize, (char *)&Entry->PoolTag,
276  NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
277  KeBugCheckEx(BAD_POOL_HEADER,
278  5,
279  (ULONG_PTR)NextEntry,
280  __LINE__,
281  (ULONG_PTR)Entry);
282  }
283  }
284 }
285 
286 VOID
287 NTAPI
289  PVOID P,
291  ULONG Tag)
292 {
294  ULONG i;
295  KIRQL OldIrql;
296  POOL_TYPE RealPoolType;
297 
298  /* Get the pool header */
299  Entry = ((PPOOL_HEADER)P) - 1;
300 
301  /* Check if this is a large allocation */
302  if (PAGE_ALIGN(P) == P)
303  {
304  /* Lock the pool table */
306 
307  /* Find the pool tag */
308  for (i = 0; i < PoolBigPageTableSize; i++)
309  {
310  /* Check if this is our allocation */
311  if (PoolBigPageTable[i].Va == P)
312  {
313  /* Make sure the tag is ok */
314  if (PoolBigPageTable[i].Key != Tag)
315  {
316  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
317  }
318 
319  break;
320  }
321  }
322 
323  /* Release the lock */
325 
326  if (i == PoolBigPageTableSize)
327  {
328  /* Did not find the allocation */
329  //ASSERT(FALSE);
330  }
331 
332  /* Get Pool type by address */
333  RealPoolType = MmDeterminePoolType(P);
334  }
335  else
336  {
337  /* Verify the tag */
338  if (Entry->PoolTag != Tag)
339  {
340  DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
341  &Tag, &Entry->PoolTag, Entry->PoolTag);
342  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
343  }
344 
345  /* Check the rest of the header */
347 
348  /* Get Pool type from entry */
349  RealPoolType = (Entry->PoolType - 1);
350  }
351 
352  /* Should we check the pool type? */
353  if (PoolType != -1)
354  {
355  /* Verify the pool type */
356  if (RealPoolType != PoolType)
357  {
358  DPRINT1("Wrong pool type! Expected %s, got %s\n",
359  PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
360  (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
361  KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
362  }
363  }
364 }
365 
366 VOID
367 NTAPI
369 {
370  BOOLEAN FoundBlock = FALSE;
371  SIZE_T Size = 0;
373 
374  /* Get the first entry for this page, make sure it really is the first */
375  Entry = PAGE_ALIGN(Block);
376  ASSERT(Entry->PreviousSize == 0);
377 
378  /* Now scan each entry */
379  while (TRUE)
380  {
381  /* When we actually found our block, remember this */
382  if (Entry == Block) FoundBlock = TRUE;
383 
384  /* Now validate this block header */
386 
387  /* And go to the next one, keeping track of our size */
388  Size += Entry->BlockSize;
390 
391  /* If we hit the last block, stop */
392  if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
393 
394  /* If we hit the end of the page, stop */
395  if (PAGE_ALIGN(Entry) == Entry) break;
396  }
397 
398  /* We must've found our block, and we must have hit the end of the page */
399  if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
400  {
401  /* Otherwise, the blocks are messed up */
402  KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
403  }
404 }
405 
407 VOID
410  IN PVOID Entry)
411 {
412  //
413  // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
414  // be DISPATCH_LEVEL or lower for Non Paged Pool
415  //
416  if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
419  {
420  //
421  // Take the system down
422  //
423  KeBugCheckEx(BAD_POOL_CALLER,
426  PoolType,
428  }
429 }
430 
432 ULONG
434  IN SIZE_T BucketMask)
435 {
436  //
437  // Compute the hash by multiplying with a large prime number and then XORing
438  // with the HIDWORD of the result.
439  //
440  // Finally, AND with the bucket mask to generate a valid index/bucket into
441  // the table
442  //
443  ULONGLONG Result = (ULONGLONG)40543 * Tag;
444  return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
445 }
446 
448 ULONG
450 {
451  ULONG Result;
452  //
453  // Compute the hash by converting the address into a page number, and then
454  // XORing each nibble with the next one.
455  //
456  // We do *NOT* AND with the bucket mask at this point because big table expansion
457  // might happen. Therefore, the final step of the hash must be performed
458  // while holding the expansion pushlock, and this is why we call this a
459  // "partial" hash only.
460  //
462  return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
463 }
464 
465 #if DBG
466 /*
467  * FORCEINLINE
468  * BOOLEAN
469  * ExpTagAllowPrint(CHAR Tag);
470  */
471 #define ExpTagAllowPrint(Tag) \
472  ((Tag) >= 0x20 /* Space */ && (Tag) <= 0x7E /* Tilde */)
473 
474 #ifdef KDBG
475 #define MiDumperPrint(dbg, fmt, ...) \
476  if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
477  else DPRINT1(fmt, ##__VA_ARGS__)
478 #else
479 #define MiDumperPrint(dbg, fmt, ...) \
480  DPRINT1(fmt, ##__VA_ARGS__)
481 #endif
482 
483 VOID
484 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
485 {
486  SIZE_T i;
488 
489  //
490  // Only print header if called from OOM situation
491  //
492  if (!CalledFromDbg)
493  {
494  DPRINT1("---------------------\n");
495  DPRINT1("Out of memory dumper!\n");
496  }
497 #ifdef KDBG
498  else
499  {
500  KdbpPrint("Pool Used:\n");
501  }
502 #endif
503 
504  //
505  // Remember whether we'll have to be verbose
506  // This is the only supported flag!
507  //
509 
510  //
511  // Print table header
512  //
513  if (Verbose)
514  {
515  MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
516  MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
517  }
518  else
519  {
520  MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
521  MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
522  }
523 
524  //
525  // We'll extract allocations for all the tracked pools
526  //
527  for (i = 0; i < PoolTrackTableSize; ++i)
528  {
530 
532 
533  //
534  // We only care about tags which have allocated memory
535  //
536  if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
537  {
538  //
539  // If there's a tag, attempt to do a pretty print
540  // only if it matches the caller's tag, or if
541  // any tag is allowed
542  // For checking whether it matches caller's tag,
543  // use the mask to make sure not to mess with the wildcards
544  //
545  if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
546  (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
547  {
548  CHAR Tag[4];
549 
550  //
551  // Extract each 'component' and check whether they are printable
552  //
553  Tag[0] = TableEntry->Key & 0xFF;
554  Tag[1] = TableEntry->Key >> 8 & 0xFF;
555  Tag[2] = TableEntry->Key >> 16 & 0xFF;
556  Tag[3] = TableEntry->Key >> 24 & 0xFF;
557 
558  if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
559  {
560  //
561  // Print in direct order to make !poolused TAG usage easier
562  //
563  if (Verbose)
564  {
565  MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
566  TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
567  (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
568  TableEntry->PagedAllocs, TableEntry->PagedFrees,
569  (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
570  }
571  else
572  {
573  MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
574  TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
575  TableEntry->PagedAllocs, TableEntry->PagedBytes);
576  }
577  }
578  else
579  {
580  if (Verbose)
581  {
582  MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
583  TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
584  (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
585  TableEntry->PagedAllocs, TableEntry->PagedFrees,
586  (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
587  }
588  else
589  {
590  MiDumperPrint(CalledFromDbg, "0x%08x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
591  TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
592  TableEntry->PagedAllocs, TableEntry->PagedBytes);
593  }
594  }
595  }
596  else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
597  {
598  if (Verbose)
599  {
600  MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
601  TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
602  (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
603  TableEntry->PagedAllocs, TableEntry->PagedFrees,
604  (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
605  }
606  else
607  {
608  MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
609  TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
610  TableEntry->PagedAllocs, TableEntry->PagedBytes);
611  }
612  }
613  }
614  }
615 
616  if (!CalledFromDbg)
617  {
618  DPRINT1("---------------------\n");
619  }
620 }
621 #endif
622 
623 /* PRIVATE FUNCTIONS **********************************************************/
624 
625 INIT_FUNCTION
626 VOID
627 NTAPI
629 {
630  ULONG i, Key, Hash, Index;
631  PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
632  ULONG TagList[] =
633  {
634  ' oI',
635  ' laH',
636  'PldM',
637  'LooP',
638  'tSbO',
639  ' prI',
640  'bdDN',
641  'LprI',
642  'pOoI',
643  ' ldM',
644  'eliF',
645  'aVMC',
646  'dSeS',
647  'CFtN',
648  'looP',
649  'rPCT',
650  'bNMC',
651  'dTeS',
652  'sFtN',
653  'TPCT',
654  'CPCT',
655  ' yeK',
656  'qSbO',
657  'mNoI',
658  'aEoI',
659  'cPCT',
660  'aFtN',
661  '0ftN',
662  'tceS',
663  'SprI',
664  'ekoT',
665  ' eS',
666  'lCbO',
667  'cScC',
668  'lFtN',
669  'cAeS',
670  'mfSF',
671  'kWcC',
672  'miSF',
673  'CdfA',
674  'EdfA',
675  'orSF',
676  'nftN',
677  'PRIU',
678  'rFpN',
679  'RFpN',
680  'aPeS',
681  'sUeS',
682  'FpcA',
683  'MpcA',
684  'cSeS',
685  'mNbO',
686  'sFpN',
687  'uLeS',
688  'DPcS',
689  'nevE',
690  'vrqR',
691  'ldaV',
692  ' pP',
693  'SdaV',
694  ' daV',
695  'LdaV',
696  'FdaV',
697  ' GIB',
698  };
699 
700  //
701  // Loop all 64 hot tags
702  //
703  ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
704  for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
705  {
706  //
707  // Get the current tag, and compute its hash in the tracker table
708  //
709  Key = TagList[i];
711 
712  //
713  // Loop all the hashes in this index/bucket
714  //
715  Index = Hash;
716  while (TRUE)
717  {
718  //
719  // Find an empty entry, and make sure this isn't the last hash that
720  // can fit.
721  //
722  // On checked builds, also make sure this is the first time we are
723  // seeding this tag.
724  //
725  ASSERT(TrackTable[Hash].Key != Key);
726  if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
727  {
728  //
729  // It has been seeded, move on to the next tag
730  //
731  TrackTable[Hash].Key = Key;
732  break;
733  }
734 
735  //
736  // This entry was already taken, compute the next possible hash while
737  // making sure we're not back at our initial index.
738  //
739  ASSERT(TrackTable[Hash].Key != Key);
740  Hash = (Hash + 1) & PoolTrackTableMask;
741  if (Hash == Index) break;
742  }
743  }
744 }
745 
746 VOID
747 NTAPI
751 {
752  ULONG Hash, Index;
754  SIZE_T TableMask, TableSize;
755 
756  //
757  // Remove the PROTECTED_POOL flag which is not part of the tag
758  //
759  Key &= ~PROTECTED_POOL;
760 
761  //
762  // With WinDBG you can set a tag you want to break on when an allocation is
763  // attempted
764  //
765  if (Key == PoolHitTag) DbgBreakPoint();
766 
767  //
768  // Why the double indirection? Because normally this function is also used
769  // when doing session pool allocations, which has another set of tables,
770  // sizes, and masks that live in session pool. Now we don't support session
771  // pool so we only ever use the regular tables, but I'm keeping the code this
772  // way so that the day we DO support session pool, it won't require that
773  // many changes
774  //
776  TableMask = PoolTrackTableMask;
779 
780  //
781  // Compute the hash for this key, and loop all the possible buckets
782  //
783  Hash = ExpComputeHashForTag(Key, TableMask);
784  Index = Hash;
785  while (TRUE)
786  {
787  //
788  // Have we found the entry for this tag? */
789  //
790  TableEntry = &Table[Hash];
791  if (TableEntry->Key == Key)
792  {
793  //
794  // Decrement the counters depending on if this was paged or nonpaged
795  // pool
796  //
798  {
799  InterlockedIncrement(&TableEntry->NonPagedFrees);
800  InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
802  return;
803  }
804  InterlockedIncrement(&TableEntry->PagedFrees);
807  return;
808  }
809 
810  //
811  // We should have only ended up with an empty entry if we've reached
812  // the last bucket
813  //
814  if (!TableEntry->Key)
815  {
816  DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
817  Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
818  ASSERT(Hash == TableMask);
819  }
820 
821  //
822  // This path is hit when we don't have an entry, and the current bucket
823  // is full, so we simply try the next one
824  //
825  Hash = (Hash + 1) & TableMask;
826  if (Hash == Index) break;
827  }
828 
829  //
830  // And finally this path is hit when all the buckets are full, and we need
831  // some expansion. This path is not yet supported in ReactOS and so we'll
832  // ignore the tag
833  //
834  DPRINT1("Out of pool tag space, ignoring...\n");
835 }
836 
837 VOID
838 NTAPI
842 {
843  ULONG Hash, Index;
844  KIRQL OldIrql;
846  SIZE_T TableMask, TableSize;
847 
848  //
849  // Remove the PROTECTED_POOL flag which is not part of the tag
850  //
851  Key &= ~PROTECTED_POOL;
852 
853  //
854  // With WinDBG you can set a tag you want to break on when an allocation is
855  // attempted
856  //
857  if (Key == PoolHitTag) DbgBreakPoint();
858 
859  //
860  // There is also an internal flag you can set to break on malformed tags
861  //
862  if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
863 
864  //
865  // ASSERT on ReactOS features not yet supported
866  //
869 
870  //
871  // Why the double indirection? Because normally this function is also used
872  // when doing session pool allocations, which has another set of tables,
873  // sizes, and masks that live in session pool. Now we don't support session
874  // pool so we only ever use the regular tables, but I'm keeping the code this
875  // way so that the day we DO support session pool, it won't require that
876  // many changes
877  //
879  TableMask = PoolTrackTableMask;
882 
883  //
884  // Compute the hash for this key, and loop all the possible buckets
885  //
886  Hash = ExpComputeHashForTag(Key, TableMask);
887  Index = Hash;
888  while (TRUE)
889  {
890  //
891  // Do we already have an entry for this tag? */
892  //
893  TableEntry = &Table[Hash];
894  if (TableEntry->Key == Key)
895  {
896  //
897  // Increment the counters depending on if this was paged or nonpaged
898  // pool
899  //
901  {
902  InterlockedIncrement(&TableEntry->NonPagedAllocs);
904  return;
905  }
906  InterlockedIncrement(&TableEntry->PagedAllocs);
908  return;
909  }
910 
911  //
912  // We don't have an entry yet, but we've found a free bucket for it
913  //
914  if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
915  {
916  //
917  // We need to hold the lock while creating a new entry, since other
918  // processors might be in this code path as well
919  //
921  if (!PoolTrackTable[Hash].Key)
922  {
923  //
924  // We've won the race, so now create this entry in the bucket
925  //
926  ASSERT(Table[Hash].Key == 0);
928  TableEntry->Key = Key;
929  }
931 
932  //
933  // Now we force the loop to run again, and we should now end up in
934  // the code path above which does the interlocked increments...
935  //
936  continue;
937  }
938 
939  //
940  // This path is hit when we don't have an entry, and the current bucket
941  // is full, so we simply try the next one
942  //
943  Hash = (Hash + 1) & TableMask;
944  if (Hash == Index) break;
945  }
946 
947  //
948  // And finally this path is hit when all the buckets are full, and we need
949  // some expansion. This path is not yet supported in ReactOS and so we'll
950  // ignore the tag
951  //
952  DPRINT1("Out of pool tag space, ignoring...\n");
953 }
954 
955 INIT_FUNCTION
956 VOID
957 NTAPI
960  IN ULONG PoolIndex,
961  IN ULONG Threshold,
962  IN PVOID PoolLock)
963 {
964  PLIST_ENTRY NextEntry, LastEntry;
965 
966  //
967  // Setup the descriptor based on the caller's request
968  //
969  PoolDescriptor->PoolType = PoolType;
970  PoolDescriptor->PoolIndex = PoolIndex;
971  PoolDescriptor->Threshold = Threshold;
972  PoolDescriptor->LockAddress = PoolLock;
973 
974  //
975  // Initialize accounting data
976  //
977  PoolDescriptor->RunningAllocs = 0;
978  PoolDescriptor->RunningDeAllocs = 0;
979  PoolDescriptor->TotalPages = 0;
980  PoolDescriptor->TotalBytes = 0;
981  PoolDescriptor->TotalBigPages = 0;
982 
983  //
984  // Nothing pending for now
985  //
986  PoolDescriptor->PendingFrees = NULL;
987  PoolDescriptor->PendingFreeDepth = 0;
988 
989  //
990  // Loop all the descriptor's allocation lists and initialize them
991  //
992  NextEntry = PoolDescriptor->ListHeads;
993  LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
994  while (NextEntry < LastEntry)
995  {
996  ExpInitializePoolListHead(NextEntry);
997  NextEntry++;
998  }
999 
1000  //
1001  // Note that ReactOS does not support Session Pool Yet
1002  //
1004 }
1005 
1006 INIT_FUNCTION
1007 VOID
1008 NTAPI
1010  IN ULONG Threshold)
1011 {
1013  SIZE_T TableSize;
1014  ULONG i;
1015 
1016  //
1017  // Check what kind of pool this is
1018  //
1019  if (PoolType == NonPagedPool)
1020  {
1021  //
1022  // Compute the track table size and convert it from a power of two to an
1023  // actual byte size
1024  //
1025  // NOTE: On checked builds, we'll assert if the registry table size was
1026  // invalid, while on retail builds we'll just break out of the loop at
1027  // that point.
1028  //
1030  for (i = 0; i < 32; i++)
1031  {
1032  if (TableSize & 1)
1033  {
1034  ASSERT((TableSize & ~1) == 0);
1035  if (!(TableSize & ~1)) break;
1036  }
1037  TableSize >>= 1;
1038  }
1039 
1040  //
1041  // If we hit bit 32, than no size was defined in the registry, so
1042  // we'll use the default size of 2048 entries.
1043  //
1044  // Otherwise, use the size from the registry, as long as it's not
1045  // smaller than 64 entries.
1046  //
1047  if (i == 32)
1048  {
1049  PoolTrackTableSize = 2048;
1050  }
1051  else
1052  {
1053  PoolTrackTableSize = max(1 << i, 64);
1054  }
1055 
1056  //
1057  // Loop trying with the biggest specified size first, and cut it down
1058  // by a power of two each iteration in case not enough memory exist
1059  //
1060  while (TRUE)
1061  {
1062  //
1063  // Do not allow overflow
1064  //
1065  if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1066  {
1067  PoolTrackTableSize >>= 1;
1068  continue;
1069  }
1070 
1071  //
1072  // Allocate the tracker table and exit the loop if this worked
1073  //
1075  (PoolTrackTableSize + 1) *
1076  sizeof(POOL_TRACKER_TABLE));
1077  if (PoolTrackTable) break;
1078 
1079  //
1080  // Otherwise, as long as we're not down to the last bit, keep
1081  // iterating
1082  //
1083  if (PoolTrackTableSize == 1)
1084  {
1085  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1086  TableSize,
1087  0xFFFFFFFF,
1088  0xFFFFFFFF,
1089  0xFFFFFFFF);
1090  }
1091  PoolTrackTableSize >>= 1;
1092  }
1093 
1094  //
1095  // Add one entry, compute the hash, and zero the table
1096  //
1099 
1102 
1103  //
1104  // Finally, add the most used tags to speed up those allocations
1105  //
1106  ExpSeedHotTags();
1107 
1108  //
1109  // We now do the exact same thing with the tracker table for big pages
1110  //
1112  for (i = 0; i < 32; i++)
1113  {
1114  if (TableSize & 1)
1115  {
1116  ASSERT((TableSize & ~1) == 0);
1117  if (!(TableSize & ~1)) break;
1118  }
1119  TableSize >>= 1;
1120  }
1121 
1122  //
1123  // For big pages, the default tracker table is 4096 entries, while the
1124  // minimum is still 64
1125  //
1126  if (i == 32)
1127  {
1128  PoolBigPageTableSize = 4096;
1129  }
1130  else
1131  {
1132  PoolBigPageTableSize = max(1 << i, 64);
1133  }
1134 
1135  //
1136  // Again, run the exact same loop we ran earlier, but this time for the
1137  // big pool tracker instead
1138  //
1139  while (TRUE)
1140  {
1142  {
1143  PoolBigPageTableSize >>= 1;
1144  continue;
1145  }
1146 
1149  sizeof(POOL_TRACKER_BIG_PAGES));
1150  if (PoolBigPageTable) break;
1151 
1152  if (PoolBigPageTableSize == 1)
1153  {
1154  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1155  TableSize,
1156  0xFFFFFFFF,
1157  0xFFFFFFFF,
1158  0xFFFFFFFF);
1159  }
1160 
1161  PoolBigPageTableSize >>= 1;
1162  }
1163 
1164  //
1165  // An extra entry is not needed for for the big pool tracker, so just
1166  // compute the hash and zero it
1167  //
1171  for (i = 0; i < PoolBigPageTableSize; i++)
1172  {
1174  }
1175 
1176  //
1177  // During development, print this out so we can see what's happening
1178  //
1179  DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1181  DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1183 
1184  //
1185  // Insert the generic tracker for all of big pool
1186  //
1187  ExpInsertPoolTracker('looP',
1189  sizeof(POOL_TRACKER_BIG_PAGES)),
1190  NonPagedPool);
1191 
1192  //
1193  // No support for NUMA systems at this time
1194  //
1195  ASSERT(KeNumberNodes == 1);
1196 
1197  //
1198  // Initialize the tag spinlock
1199  //
1201 
1202  //
1203  // Initialize the nonpaged pool descriptor
1204  //
1207  NonPagedPool,
1208  0,
1209  Threshold,
1210  NULL);
1211  }
1212  else
1213  {
1214  //
1215  // No support for NUMA systems at this time
1216  //
1217  ASSERT(KeNumberNodes == 1);
1218 
1219  //
1220  // Allocate the pool descriptor
1221  //
1223  sizeof(KGUARDED_MUTEX) +
1224  sizeof(POOL_DESCRIPTOR),
1225  'looP');
1226  if (!Descriptor)
1227  {
1228  //
1229  // This is really bad...
1230  //
1231  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1232  0,
1233  -1,
1234  -1,
1235  -1);
1236  }
1237 
1238  //
1239  // Setup the vector and guarded mutex for paged pool
1240  //
1246  PagedPool,
1247  0,
1248  Threshold,
1250 
1251  //
1252  // Insert the generic tracker for all of nonpaged pool
1253  //
1254  ExpInsertPoolTracker('looP',
1256  NonPagedPool);
1257  }
1258 }
1259 
1261 KIRQL
1263 {
1264  //
1265  // Check if this is nonpaged pool
1266  //
1267  if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1268  {
1269  //
1270  // Use the queued spin lock
1271  //
1273  }
1274  else
1275  {
1276  //
1277  // Use the guarded mutex
1278  //
1279  KeAcquireGuardedMutex(Descriptor->LockAddress);
1280  return APC_LEVEL;
1281  }
1282 }
1283 
1285 VOID
1287  IN KIRQL OldIrql)
1288 {
1289  //
1290  // Check if this is nonpaged pool
1291  //
1292  if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1293  {
1294  //
1295  // Use the queued spin lock
1296  //
1298  }
1299  else
1300  {
1301  //
1302  // Use the guarded mutex
1303  //
1304  KeReleaseGuardedMutex(Descriptor->LockAddress);
1305  }
1306 }
1307 
1308 VOID
1309 NTAPI
1314 {
1318 
1319  //
1320  // Make sure we win the race, and if we did, copy the data atomically
1321  //
1323  {
1324  RtlCopyMemory(Context->PoolTrackTable,
1326  Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1327 
1328  //
1329  // This is here because ReactOS does not yet support expansion
1330  //
1331  ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1332  }
1333 
1334  //
1335  // Regardless of whether we won or not, we must now synchronize and then
1336  // decrement the barrier since this is one more processor that has completed
1337  // the callback.
1338  //
1341 }
1342 
1343 NTSTATUS
1344 NTAPI
1346  IN ULONG SystemInformationLength,
1348 {
1349  ULONG TableSize, CurrentLength;
1350  ULONG EntryCount;
1352  PSYSTEM_POOLTAG TagEntry;
1353  PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1356 
1357  //
1358  // Keep track of how much data the caller's buffer must hold
1359  //
1360  CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1361 
1362  //
1363  // Initialize the caller's buffer
1364  //
1365  TagEntry = &SystemInformation->TagInfo[0];
1366  SystemInformation->Count = 0;
1367 
1368  //
1369  // Capture the number of entries, and the total size needed to make a copy
1370  // of the table
1371  //
1372  EntryCount = (ULONG)PoolTrackTableSize;
1373  TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1374 
1375  //
1376  // Allocate the "Generic DPC" temporary buffer
1377  //
1380 
1381  //
1382  // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1383  //
1384  Context.PoolTrackTable = Buffer;
1385  Context.PoolTrackTableSize = PoolTrackTableSize;
1386  Context.PoolTrackTableExpansion = NULL;
1387  Context.PoolTrackTableSizeExpansion = 0;
1389 
1390  //
1391  // Now parse the results
1392  //
1393  for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1394  {
1395  //
1396  // If the entry is empty, skip it
1397  //
1398  if (!TrackerEntry->Key) continue;
1399 
1400  //
1401  // Otherwise, add one more entry to the caller's buffer, and ensure that
1402  // enough space has been allocated in it
1403  //
1404  SystemInformation->Count++;
1405  CurrentLength += sizeof(*TagEntry);
1406  if (SystemInformationLength < CurrentLength)
1407  {
1408  //
1409  // The caller's buffer is too small, so set a failure code. The
1410  // caller will know the count, as well as how much space is needed.
1411  //
1412  // We do NOT break out of the loop, because we want to keep incrementing
1413  // the Count as well as CurrentLength so that the caller can know the
1414  // final numbers
1415  //
1417  }
1418  else
1419  {
1420  //
1421  // Small sanity check that our accounting is working correctly
1422  //
1423  ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1424  ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1425 
1426  //
1427  // Return the data into the caller's buffer
1428  //
1429  TagEntry->TagUlong = TrackerEntry->Key;
1430  TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1431  TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1432  TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1433  TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1434  TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1435  TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1436  TagEntry++;
1437  }
1438  }
1439 
1440  //
1441  // Free the "Generic DPC" temporary buffer, return the buffer length and status
1442  //
1443  ExFreePoolWithTag(Buffer, 'ofnI');
1444  if (ReturnLength) *ReturnLength = CurrentLength;
1445  return Status;
1446 }
1447 
1449 BOOLEAN
1450 NTAPI
1451 ExpExpandBigPageTable(
1453 {
1454  ULONG OldSize = PoolBigPageTableSize;
1455  ULONG NewSize = 2 * OldSize;
1456  ULONG NewSizeInBytes;
1457  PPOOL_TRACKER_BIG_PAGES NewTable;
1458  PPOOL_TRACKER_BIG_PAGES OldTable;
1459  ULONG i;
1460  ULONG PagesFreed;
1461  ULONG Hash;
1462  ULONG HashMask;
1463 
1464  /* Must be holding ExpLargePoolTableLock */
1466 
1467  /* Make sure we don't overflow */
1468  if (!NT_SUCCESS(RtlULongMult(2,
1469  OldSize * sizeof(POOL_TRACKER_BIG_PAGES),
1470  &NewSizeInBytes)))
1471  {
1472  DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
1474  return FALSE;
1475  }
1476 
1477  NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes);
1478  if (NewTable == NULL)
1479  {
1480  DPRINT1("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes);
1482  return FALSE;
1483  }
1484 
1485  DPRINT("Expanding big pool tracker table to %lu entries\n", NewSize);
1486 
1487  /* Initialize the new table */
1488  RtlZeroMemory(NewTable, NewSizeInBytes);
1489  for (i = 0; i < NewSize; i++)
1490  {
1491  NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1492  }
1493 
1494  /* Copy over all items */
1495  OldTable = PoolBigPageTable;
1496  HashMask = NewSize - 1;
1497  for (i = 0; i < OldSize; i++)
1498  {
1499  /* Skip over empty items */
1500  if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE)
1501  {
1502  continue;
1503  }
1504 
1505  /* Recalculate the hash due to the new table size */
1506  Hash = ExpComputePartialHashForAddress(OldTable[i].Va) & HashMask;
1507 
1508  /* Find the location in the new table */
1509  while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE))
1510  {
1511  Hash = (Hash + 1) & HashMask;
1512  }
1513 
1514  /* We just enlarged the table, so we must have space */
1516 
1517  /* Finally, copy the item */
1518  NewTable[Hash] = OldTable[i];
1519  }
1520 
1521  /* Activate the new table */
1522  PoolBigPageTable = NewTable;
1525 
1526  /* Release the lock, we're done changing global state */
1528 
1529  /* Free the old table and update our tracker */
1530  PagesFreed = MiFreePoolPages(OldTable);
1531  ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0);
1532  ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0);
1533 
1534  return TRUE;
1535 }
1536 
1537 BOOLEAN
1538 NTAPI
1540  IN ULONG Key,
1541  IN ULONG NumberOfPages,
1543 {
1544  ULONG Hash, i = 0;
1545  PVOID OldVa;
1546  KIRQL OldIrql;
1547  SIZE_T TableSize;
1548  PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1551 
1552  //
1553  // As the table is expandable, these values must only be read after acquiring
1554  // the lock to avoid a teared access during an expansion
1555  // NOTE: Windows uses a special reader/writer SpinLock to improve
1556  // performance in the common case (add/remove a tracker entry)
1557  //
1558 Retry:
1563 
1564  //
1565  // We loop from the current hash bucket to the end of the table, and then
1566  // rollover to hash bucket 0 and keep going from there. If we return back
1567  // to the beginning, then we attempt expansion at the bottom of the loop
1568  //
1569  EntryStart = Entry = &PoolBigPageTable[Hash];
1570  EntryEnd = &PoolBigPageTable[TableSize];
1571  do
1572  {
1573  //
1574  // Make sure that this is a free entry and attempt to atomically make the
1575  // entry busy now
1576  // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock
1577  //
1578  OldVa = Entry->Va;
1579  if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1580  (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa)))
1581  {
1582  //
1583  // We now own this entry, write down the size and the pool tag
1584  //
1585  Entry->Key = Key;
1586  Entry->NumberOfPages = NumberOfPages;
1587 
1588  //
1589  // Add one more entry to the count, and see if we're getting within
1590  // 25% of the table size, at which point we'll do an expansion now
1591  // to avoid blocking too hard later on.
1592  //
1593  // Note that we only do this if it's also been the 16th time that we
1594  // keep losing the race or that we are not finding a free entry anymore,
1595  // which implies a massive number of concurrent big pool allocations.
1596  //
1598  if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1599  {
1600  DPRINT("Attempting expansion since we now have %lu entries\n",
1603  ExpExpandBigPageTable(OldIrql);
1604  return TRUE;
1605  }
1606 
1607  //
1608  // We have our entry, return
1609  //
1611  return TRUE;
1612  }
1613 
1614  //
1615  // We don't have our entry yet, so keep trying, making the entry list
1616  // circular if we reach the last entry. We'll eventually break out of
1617  // the loop once we've rolled over and returned back to our original
1618  // hash bucket
1619  //
1620  i++;
1621  if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1622  } while (Entry != EntryStart);
1623 
1624  //
1625  // This means there's no free hash buckets whatsoever, so we now have
1626  // to attempt expanding the table
1627  //
1629  if (ExpExpandBigPageTable(OldIrql))
1630  {
1631  goto Retry;
1632  }
1634  DPRINT1("Big pool table expansion failed\n");
1635  return FALSE;
1636 }
1637 
1638 ULONG
1639 NTAPI
1641  OUT PULONG_PTR BigPages,
1643 {
1644  BOOLEAN FirstTry = TRUE;
1645  SIZE_T TableSize;
1646  KIRQL OldIrql;
1647  ULONG PoolTag, Hash;
1651 
1652  //
1653  // As the table is expandable, these values must only be read after acquiring
1654  // the lock to avoid a teared access during an expansion
1655  //
1660 
1661  //
1662  // Loop while trying to find this big page allocation
1663  //
1664  while (PoolBigPageTable[Hash].Va != Va)
1665  {
1666  //
1667  // Increment the size until we go past the end of the table
1668  //
1669  if (++Hash >= TableSize)
1670  {
1671  //
1672  // Is this the second time we've tried?
1673  //
1674  if (!FirstTry)
1675  {
1676  //
1677  // This means it was never inserted into the pool table and it
1678  // received the special "BIG" tag -- return that and return 0
1679  // so that the code can ask Mm for the page count instead
1680  //
1682  *BigPages = 0;
1683  return ' GIB';
1684  }
1685 
1686  //
1687  // The first time this happens, reset the hash index and try again
1688  //
1689  Hash = 0;
1690  FirstTry = FALSE;
1691  }
1692  }
1693 
1694  //
1695  // Now capture all the information we need from the entry, since after we
1696  // release the lock, the data can change
1697  //
1699  *BigPages = Entry->NumberOfPages;
1700  PoolTag = Entry->Key;
1701 
1702  //
1703  // Set the free bit, and decrement the number of allocations. Finally, release
1704  // the lock and return the tag that was located
1705  //
1709  return PoolTag;
1710 }
1711 
1712 VOID
1713 NTAPI
1714 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1715  OUT PULONG NonPagedPoolPages,
1716  OUT PULONG PagedPoolAllocs,
1717  OUT PULONG PagedPoolFrees,
1718  OUT PULONG PagedPoolLookasideHits,
1719  OUT PULONG NonPagedPoolAllocs,
1720  OUT PULONG NonPagedPoolFrees,
1721  OUT PULONG NonPagedPoolLookasideHits)
1722 {
1723  ULONG i;
1724  PPOOL_DESCRIPTOR PoolDesc;
1725 
1726  //
1727  // Assume all failures
1728  //
1729  *PagedPoolPages = 0;
1730  *PagedPoolAllocs = 0;
1731  *PagedPoolFrees = 0;
1732 
1733  //
1734  // Tally up the totals for all the apged pool
1735  //
1736  for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1737  {
1738  PoolDesc = ExpPagedPoolDescriptor[i];
1739  *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1740  *PagedPoolAllocs += PoolDesc->RunningAllocs;
1741  *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1742  }
1743 
1744  //
1745  // The first non-paged pool has a hardcoded well-known descriptor name
1746  //
1747  PoolDesc = &NonPagedPoolDescriptor;
1748  *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1749  *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1750  *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1751 
1752  //
1753  // If the system has more than one non-paged pool, copy the other descriptor
1754  // totals as well
1755  //
1756 #if 0
1757  if (ExpNumberOfNonPagedPools > 1)
1758  {
1759  for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1760  {
1761  PoolDesc = ExpNonPagedPoolDescriptor[i];
1762  *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1763  *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1764  *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1765  }
1766  }
1767 #endif
1768 
1769  //
1770  // Get the amount of hits in the system lookaside lists
1771  //
1773  {
1774  PLIST_ENTRY ListEntry;
1775 
1776  for (ListEntry = ExPoolLookasideListHead.Flink;
1777  ListEntry != &ExPoolLookasideListHead;
1778  ListEntry = ListEntry->Flink)
1779  {
1781 
1782  Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry);
1783 
1784  if (Lookaside->Type == NonPagedPool)
1785  {
1786  *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
1787  }
1788  else
1789  {
1790  *PagedPoolLookasideHits += Lookaside->AllocateHits;
1791  }
1792  }
1793  }
1794 }
1795 
1796 VOID
1797 NTAPI
1799 {
1802  USHORT BlockSize;
1804 
1807  {
1808  return;
1809  }
1810 
1811  Entry = P;
1812  Entry--;
1814 
1815  PoolType = Entry->PoolType - 1;
1816  BlockSize = Entry->BlockSize;
1817 
1818  if (PoolType & QUOTA_POOL_MASK)
1819  {
1820  Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1821  ASSERT(Process != NULL);
1822  if (Process)
1823  {
1824  if (Process->Pcb.Header.Type != ProcessObject)
1825  {
1826  DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1827  Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1828  KeBugCheckEx(BAD_POOL_CALLER,
1829  0x0D,
1830  (ULONG_PTR)P,
1831  Entry->PoolTag,
1832  (ULONG_PTR)Process);
1833  }
1834  ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1837  BlockSize * POOL_BLOCK_SIZE);
1839  }
1840  }
1841 }
1842 
1843 /* PUBLIC FUNCTIONS ***********************************************************/
1844 
1845 /*
1846  * @implemented
1847  */
1848 PVOID
1849 NTAPI
1852  IN ULONG Tag)
1853 {
1854  PPOOL_DESCRIPTOR PoolDesc;
1855  PLIST_ENTRY ListHead;
1856  PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1857  KIRQL OldIrql;
1858  USHORT BlockSize, i;
1859  ULONG OriginalType;
1860  PKPRCB Prcb = KeGetCurrentPrcb();
1862 
1863  //
1864  // Some sanity checks
1865  //
1866  ASSERT(Tag != 0);
1867  ASSERT(Tag != ' GIB');
1868  ASSERT(NumberOfBytes != 0);
1870 
1871  //
1872  // Not supported in ReactOS
1873  //
1875 
1876  //
1877  // Check if verifier or special pool is enabled
1878  //
1880  {
1881  //
1882  // For verifier, we should call the verification routine
1883  //
1885  {
1886  DPRINT1("Driver Verifier is not yet supported\n");
1887  }
1888 
1889  //
1890  // For special pool, we check if this is a suitable allocation and do
1891  // the special allocation if needed
1892  //
1894  {
1895  //
1896  // Check if this is a special pool allocation
1897  //
1899  {
1900  //
1901  // Try to allocate using special pool
1902  //
1904  if (Entry) return Entry;
1905  }
1906  }
1907  }
1908 
1909  //
1910  // Get the pool type and its corresponding vector for this request
1911  //
1912  OriginalType = PoolType;
1914  PoolDesc = PoolVector[PoolType];
1915  ASSERT(PoolDesc != NULL);
1916 
1917  //
1918  // Check if this is a big page allocation
1919  //
1921  {
1922  //
1923  // Allocate pages for it
1924  //
1925  Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1926  if (!Entry)
1927  {
1928 #if DBG
1929  //
1930  // Out of memory, display current consumption
1931  // Let's consider that if the caller wanted more
1932  // than a hundred pages, that's a bogus caller
1933  // and we are not out of memory. Dump at most
1934  // once a second to avoid spamming the log.
1935  //
1936  if (NumberOfBytes < 100 * PAGE_SIZE &&
1937  KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000)
1938  {
1939  MiDumpPoolConsumers(FALSE, 0, 0, 0);
1941  }
1942 #endif
1943 
1944  //
1945  // Must succeed pool is deprecated, but still supported. These allocation
1946  // failures must cause an immediate bugcheck
1947  //
1948  if (OriginalType & MUST_SUCCEED_POOL_MASK)
1949  {
1950  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1951  NumberOfBytes,
1954  0);
1955  }
1956 
1957  //
1958  // Internal debugging
1959  //
1960  ExPoolFailures++;
1961 
1962  //
1963  // This flag requests printing failures, and can also further specify
1964  // breaking on failures
1965  //
1967  {
1968  DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1969  NumberOfBytes,
1970  OriginalType);
1972  }
1973 
1974  //
1975  // Finally, this flag requests an exception, which we are more than
1976  // happy to raise!
1977  //
1978  if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1979  {
1981  }
1982 
1983  return NULL;
1984  }
1985 
1986  //
1987  // Increment required counters
1988  //
1993 
1994  //
1995  // Add a tag for the big page allocation and switch to the generic "BIG"
1996  // tag if we failed to do so, then insert a tracker for this alloation.
1997  //
1999  Tag,
2001  OriginalType))
2002  {
2003  Tag = ' GIB';
2004  }
2006  return Entry;
2007  }
2008 
2009  //
2010  // Should never request 0 bytes from the pool, but since so many drivers do
2011  // it, we'll just assume they want 1 byte, based on NT's similar behavior
2012  //
2013  if (!NumberOfBytes) NumberOfBytes = 1;
2014 
2015  //
2016  // A pool allocation is defined by its data, a linked list to connect it to
2017  // the free list (if necessary), and a pool header to store accounting info.
2018  // Calculate this size, then convert it into a block size (units of pool
2019  // headers)
2020  //
2021  // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
2022  // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
2023  // the direct allocation of pages.
2024  //
2025  i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
2026  / POOL_BLOCK_SIZE);
2028 
2029  //
2030  // Handle lookaside list optimization for both paged and nonpaged pool
2031  //
2033  {
2034  //
2035  // Try popping it from the per-CPU lookaside list
2036  //
2038  Prcb->PPPagedLookasideList[i - 1].P :
2039  Prcb->PPNPagedLookasideList[i - 1].P;
2040  LookasideList->TotalAllocates++;
2042  if (!Entry)
2043  {
2044  //
2045  // We failed, try popping it from the global list
2046  //
2048  Prcb->PPPagedLookasideList[i - 1].L :
2049  Prcb->PPNPagedLookasideList[i - 1].L;
2050  LookasideList->TotalAllocates++;
2052  }
2053 
2054  //
2055  // If we were able to pop it, update the accounting and return the block
2056  //
2057  if (Entry)
2058  {
2059  LookasideList->AllocateHits++;
2060 
2061  //
2062  // Get the real entry, write down its pool type, and track it
2063  //
2064  Entry--;
2065  Entry->PoolType = OriginalType + 1;
2067  Entry->BlockSize * POOL_BLOCK_SIZE,
2068  OriginalType);
2069 
2070  //
2071  // Return the pool allocation
2072  //
2073  Entry->PoolTag = Tag;
2074  (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2075  (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2076  return POOL_FREE_BLOCK(Entry);
2077  }
2078  }
2079 
2080  //
2081  // Loop in the free lists looking for a block if this size. Start with the
2082  // list optimized for this kind of size lookup
2083  //
2084  ListHead = &PoolDesc->ListHeads[i];
2085  do
2086  {
2087  //
2088  // Are there any free entries available on this list?
2089  //
2090  if (!ExpIsPoolListEmpty(ListHead))
2091  {
2092  //
2093  // Acquire the pool lock now
2094  //
2095  OldIrql = ExLockPool(PoolDesc);
2096 
2097  //
2098  // And make sure the list still has entries
2099  //
2100  if (ExpIsPoolListEmpty(ListHead))
2101  {
2102  //
2103  // Someone raced us (and won) before we had a chance to acquire
2104  // the lock.
2105  //
2106  // Try again!
2107  //
2108  ExUnlockPool(PoolDesc, OldIrql);
2109  continue;
2110  }
2111 
2112  //
2113  // Remove a free entry from the list
2114  // Note that due to the way we insert free blocks into multiple lists
2115  // there is a guarantee that any block on this list will either be
2116  // of the correct size, or perhaps larger.
2117  //
2118  ExpCheckPoolLinks(ListHead);
2119  Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
2120  ExpCheckPoolLinks(ListHead);
2122  ASSERT(Entry->BlockSize >= i);
2123  ASSERT(Entry->PoolType == 0);
2124 
2125  //
2126  // Check if this block is larger that what we need. The block could
2127  // not possibly be smaller, due to the reason explained above (and
2128  // we would've asserted on a checked build if this was the case).
2129  //
2130  if (Entry->BlockSize != i)
2131  {
2132  //
2133  // Is there an entry before this one?
2134  //
2135  if (Entry->PreviousSize == 0)
2136  {
2137  //
2138  // There isn't anyone before us, so take the next block and
2139  // turn it into a fragment that contains the leftover data
2140  // that we don't need to satisfy the caller's request
2141  //
2142  FragmentEntry = POOL_BLOCK(Entry, i);
2143  FragmentEntry->BlockSize = Entry->BlockSize - i;
2144 
2145  //
2146  // And make it point back to us
2147  //
2148  FragmentEntry->PreviousSize = i;
2149 
2150  //
2151  // Now get the block that follows the new fragment and check
2152  // if it's still on the same page as us (and not at the end)
2153  //
2154  NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2155  if (PAGE_ALIGN(NextEntry) != NextEntry)
2156  {
2157  //
2158  // Adjust this next block to point to our newly created
2159  // fragment block
2160  //
2161  NextEntry->PreviousSize = FragmentEntry->BlockSize;
2162  }
2163  }
2164  else
2165  {
2166  //
2167  // There is a free entry before us, which we know is smaller
2168  // so we'll make this entry the fragment instead
2169  //
2170  FragmentEntry = Entry;
2171 
2172  //
2173  // And then we'll remove from it the actual size required.
2174  // Now the entry is a leftover free fragment
2175  //
2176  Entry->BlockSize -= i;
2177 
2178  //
2179  // Now let's go to the next entry after the fragment (which
2180  // used to point to our original free entry) and make it
2181  // reference the new fragment entry instead.
2182  //
2183  // This is the entry that will actually end up holding the
2184  // allocation!
2185  //
2187  Entry->PreviousSize = FragmentEntry->BlockSize;
2188 
2189  //
2190  // And now let's go to the entry after that one and check if
2191  // it's still on the same page, and not at the end
2192  //
2193  NextEntry = POOL_BLOCK(Entry, i);
2194  if (PAGE_ALIGN(NextEntry) != NextEntry)
2195  {
2196  //
2197  // Make it reference the allocation entry
2198  //
2199  NextEntry->PreviousSize = i;
2200  }
2201  }
2202 
2203  //
2204  // Now our (allocation) entry is the right size
2205  //
2206  Entry->BlockSize = i;
2207 
2208  //
2209  // And the next entry is now the free fragment which contains
2210  // the remaining difference between how big the original entry
2211  // was, and the actual size the caller needs/requested.
2212  //
2213  FragmentEntry->PoolType = 0;
2214  BlockSize = FragmentEntry->BlockSize;
2215 
2216  //
2217  // Now check if enough free bytes remained for us to have a
2218  // "full" entry, which contains enough bytes for a linked list
2219  // and thus can be used for allocations (up to 8 bytes...)
2220  //
2221  ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2222  if (BlockSize != 1)
2223  {
2224  //
2225  // Insert the free entry into the free list for this size
2226  //
2227  ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2228  POOL_FREE_BLOCK(FragmentEntry));
2229  ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2230  }
2231  }
2232 
2233  //
2234  // We have found an entry for this allocation, so set the pool type
2235  // and release the lock since we're done
2236  //
2237  Entry->PoolType = OriginalType + 1;
2239  ExUnlockPool(PoolDesc, OldIrql);
2240 
2241  //
2242  // Increment required counters
2243  //
2244  InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2246 
2247  //
2248  // Track this allocation
2249  //
2251  Entry->BlockSize * POOL_BLOCK_SIZE,
2252  OriginalType);
2253 
2254  //
2255  // Return the pool allocation
2256  //
2257  Entry->PoolTag = Tag;
2258  (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2259  (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2260  return POOL_FREE_BLOCK(Entry);
2261  }
2262  } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2263 
2264  //
2265  // There were no free entries left, so we have to allocate a new fresh page
2266  //
2267  Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2268  if (!Entry)
2269  {
2270 #if DBG
2271  //
2272  // Out of memory, display current consumption
2273  // Let's consider that if the caller wanted more
2274  // than a hundred pages, that's a bogus caller
2275  // and we are not out of memory. Dump at most
2276  // once a second to avoid spamming the log.
2277  //
2278  if (NumberOfBytes < 100 * PAGE_SIZE &&
2279  KeQueryInterruptTime() >= MiLastPoolDumpTime + 10000000)
2280  {
2281  MiDumpPoolConsumers(FALSE, 0, 0, 0);
2283  }
2284 #endif
2285 
2286  //
2287  // Must succeed pool is deprecated, but still supported. These allocation
2288  // failures must cause an immediate bugcheck
2289  //
2290  if (OriginalType & MUST_SUCCEED_POOL_MASK)
2291  {
2292  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2293  PAGE_SIZE,
2296  0);
2297  }
2298 
2299  //
2300  // Internal debugging
2301  //
2302  ExPoolFailures++;
2303 
2304  //
2305  // This flag requests printing failures, and can also further specify
2306  // breaking on failures
2307  //
2309  {
2310  DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2311  NumberOfBytes,
2312  OriginalType);
2314  }
2315 
2316  //
2317  // Finally, this flag requests an exception, which we are more than
2318  // happy to raise!
2319  //
2320  if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2321  {
2323  }
2324 
2325  //
2326  // Return NULL to the caller in all other cases
2327  //
2328  return NULL;
2329  }
2330 
2331  //
2332  // Setup the entry data
2333  //
2334  Entry->Ulong1 = 0;
2335  Entry->BlockSize = i;
2336  Entry->PoolType = OriginalType + 1;
2337 
2338  //
2339  // This page will have two entries -- one for the allocation (which we just
2340  // created above), and one for the remaining free bytes, which we're about
2341  // to create now. The free bytes are the whole page minus what was allocated
2342  // and then converted into units of block headers.
2343  //
2344  BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2345  FragmentEntry = POOL_BLOCK(Entry, i);
2346  FragmentEntry->Ulong1 = 0;
2347  FragmentEntry->BlockSize = BlockSize;
2348  FragmentEntry->PreviousSize = i;
2349 
2350  //
2351  // Increment required counters
2352  //
2353  InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2354  InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2355 
2356  //
2357  // Now check if enough free bytes remained for us to have a "full" entry,
2358  // which contains enough bytes for a linked list and thus can be used for
2359  // allocations (up to 8 bytes...)
2360  //
2361  if (FragmentEntry->BlockSize != 1)
2362  {
2363  //
2364  // Excellent -- acquire the pool lock
2365  //
2366  OldIrql = ExLockPool(PoolDesc);
2367 
2368  //
2369  // And insert the free entry into the free list for this block size
2370  //
2371  ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2372  ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2373  POOL_FREE_BLOCK(FragmentEntry));
2374  ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2375 
2376  //
2377  // Release the pool lock
2378  //
2380  ExUnlockPool(PoolDesc, OldIrql);
2381  }
2382  else
2383  {
2384  //
2385  // Simply do a sanity check
2386  //
2388  }
2389 
2390  //
2391  // Increment performance counters and track this allocation
2392  //
2395  Entry->BlockSize * POOL_BLOCK_SIZE,
2396  OriginalType);
2397 
2398  //
2399  // And return the pool allocation
2400  //
2402  Entry->PoolTag = Tag;
2403  return POOL_FREE_BLOCK(Entry);
2404 }
2405 
2406 /*
2407  * @implemented
2408  */
2409 PVOID
2410 NTAPI
2413 {
2414  ULONG Tag = TAG_NONE;
2415 #if 0 && DBG
2416  PLDR_DATA_TABLE_ENTRY LdrEntry;
2417 
2418  /* Use the first four letters of the driver name, or "None" if unavailable */
2419  LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2421  : NULL;
2422  if (LdrEntry)
2423  {
2424  ULONG i;
2425  Tag = 0;
2426  for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2427  Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2428  for (; i < 4; i++)
2429  Tag = Tag >> 8 | ' ' << 24;
2430  }
2431 #endif
2433 }
2434 
2435 /*
2436  * @implemented
2437  */
2438 VOID
2439 NTAPI
2441  IN ULONG TagToFree)
2442 {
2443  PPOOL_HEADER Entry, NextEntry;
2444  USHORT BlockSize;
2445  KIRQL OldIrql;
2447  PPOOL_DESCRIPTOR PoolDesc;
2448  ULONG Tag;
2449  BOOLEAN Combined = FALSE;
2450  PFN_NUMBER PageCount, RealPageCount;
2451  PKPRCB Prcb = KeGetCurrentPrcb();
2454 
2455  //
2456  // Check if any of the debug flags are enabled
2457  //
2464  {
2465  //
2466  // Check if special pool is enabled
2467  //
2469  {
2470  //
2471  // Check if it was allocated from a special pool
2472  //
2474  {
2475  //
2476  // Was deadlock verification also enabled? We can do some extra
2477  // checks at this point
2478  //
2480  {
2481  DPRINT1("Verifier not yet supported\n");
2482  }
2483 
2484  //
2485  // It is, so handle it via special pool free routine
2486  //
2488  return;
2489  }
2490  }
2491 
2492  //
2493  // For non-big page allocations, we'll do a bunch of checks in here
2494  //
2495  if (PAGE_ALIGN(P) != P)
2496  {
2497  //
2498  // Get the entry for this pool allocation
2499  // The pointer math here may look wrong or confusing, but it is quite right
2500  //
2501  Entry = P;
2502  Entry--;
2503 
2504  //
2505  // Get the pool type
2506  //
2507  PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2508 
2509  //
2510  // FIXME: Many other debugging checks go here
2511  //
2513  }
2514  }
2515 
2516  //
2517  // Check if this is a big page allocation
2518  //
2519  if (PAGE_ALIGN(P) == P)
2520  {
2521  //
2522  // We need to find the tag for it, so first we need to find out what
2523  // kind of allocation this was (paged or nonpaged), then we can go
2524  // ahead and try finding the tag for it. Remember to get rid of the
2525  // PROTECTED_POOL tag if it's found.
2526  //
2527  // Note that if at insertion time, we failed to add the tag for a big
2528  // pool allocation, we used a special tag called 'BIG' to identify the
2529  // allocation, and we may get this tag back. In this scenario, we must
2530  // manually get the size of the allocation by actually counting through
2531  // the PFN database.
2532  //
2535  Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2536  if (!Tag)
2537  {
2538  DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2539  ASSERT(Tag == ' GIB');
2540  PageCount = 1; // We are going to lie! This might screw up accounting?
2541  }
2542  else if (Tag & PROTECTED_POOL)
2543  {
2544  Tag &= ~PROTECTED_POOL;
2545  }
2546 
2547  //
2548  // Check block tag
2549  //
2550  if (TagToFree && TagToFree != Tag)
2551  {
2552  DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2553 #if DBG
2554  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2555 #endif
2556  }
2557 
2558  //
2559  // We have our tag and our page count, so we can go ahead and remove this
2560  // tracker now
2561  //
2562  ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2563 
2564  //
2565  // Check if any of the debug flags are enabled
2566  //
2571  {
2572  //
2573  // Was deadlock verification also enabled? We can do some extra
2574  // checks at this point
2575  //
2577  {
2578  DPRINT1("Verifier not yet supported\n");
2579  }
2580 
2581  //
2582  // FIXME: Many debugging checks go here
2583  //
2584  }
2585 
2586  //
2587  // Update counters
2588  //
2589  PoolDesc = PoolVector[PoolType];
2592  -(LONG_PTR)(PageCount << PAGE_SHIFT));
2593 
2594  //
2595  // Do the real free now and update the last counter with the big page count
2596  //
2597  RealPageCount = MiFreePoolPages(P);
2598  ASSERT(RealPageCount == PageCount);
2600  -(LONG)RealPageCount);
2601  return;
2602  }
2603 
2604  //
2605  // Get the entry for this pool allocation
2606  // The pointer math here may look wrong or confusing, but it is quite right
2607  //
2608  Entry = P;
2609  Entry--;
2611 
2612  //
2613  // Get the size of the entry, and it's pool type, then load the descriptor
2614  // for this pool type
2615  //
2616  BlockSize = Entry->BlockSize;
2617  PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2618  PoolDesc = PoolVector[PoolType];
2619 
2620  //
2621  // Make sure that the IRQL makes sense
2622  //
2624 
2625  //
2626  // Get the pool tag and get rid of the PROTECTED_POOL flag
2627  //
2628  Tag = Entry->PoolTag;
2629  if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2630 
2631  //
2632  // Check block tag
2633  //
2634  if (TagToFree && TagToFree != Tag)
2635  {
2636  DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2637 #if DBG
2638  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2639 #endif
2640  }
2641 
2642  //
2643  // Track the removal of this allocation
2644  //
2646  BlockSize * POOL_BLOCK_SIZE,
2647  Entry->PoolType - 1);
2648 
2649  //
2650  // Release pool quota, if any
2651  //
2652  if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2653  {
2654  Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2655  if (Process)
2656  {
2657  if (Process->Pcb.Header.Type != ProcessObject)
2658  {
2659  DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2660  Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2661  KeBugCheckEx(BAD_POOL_CALLER,
2662  0x0D,
2663  (ULONG_PTR)P,
2664  Tag,
2665  (ULONG_PTR)Process);
2666  }
2669  }
2670  }
2671 
2672  //
2673  // Is this allocation small enough to have come from a lookaside list?
2674  //
2675  if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2676  {
2677  //
2678  // Try pushing it into the per-CPU lookaside list
2679  //
2681  Prcb->PPPagedLookasideList[BlockSize - 1].P :
2682  Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2683  LookasideList->TotalFrees++;
2684  if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2685  {
2686  LookasideList->FreeHits++;
2688  return;
2689  }
2690 
2691  //
2692  // We failed, try to push it into the global lookaside list
2693  //
2695  Prcb->PPPagedLookasideList[BlockSize - 1].L :
2696  Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2697  LookasideList->TotalFrees++;
2698  if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2699  {
2700  LookasideList->FreeHits++;
2702  return;
2703  }
2704  }
2705 
2706  //
2707  // Get the pointer to the next entry
2708  //
2709  NextEntry = POOL_BLOCK(Entry, BlockSize);
2710 
2711  //
2712  // Update performance counters
2713  //
2715  InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2716 
2717  //
2718  // Acquire the pool lock
2719  //
2720  OldIrql = ExLockPool(PoolDesc);
2721 
2722  //
2723  // Check if the next allocation is at the end of the page
2724  //
2726  if (PAGE_ALIGN(NextEntry) != NextEntry)
2727  {
2728  //
2729  // We may be able to combine the block if it's free
2730  //
2731  if (NextEntry->PoolType == 0)
2732  {
2733  //
2734  // The next block is free, so we'll do a combine
2735  //
2736  Combined = TRUE;
2737 
2738  //
2739  // Make sure there's actual data in the block -- anything smaller
2740  // than this means we only have the header, so there's no linked list
2741  // for us to remove
2742  //
2743  if ((NextEntry->BlockSize != 1))
2744  {
2745  //
2746  // The block is at least big enough to have a linked list, so go
2747  // ahead and remove it
2748  //
2749  ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2751  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2752  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2753  }
2754 
2755  //
2756  // Our entry is now combined with the next entry
2757  //
2758  Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2759  }
2760  }
2761 
2762  //
2763  // Now check if there was a previous entry on the same page as us
2764  //
2765  if (Entry->PreviousSize)
2766  {
2767  //
2768  // Great, grab that entry and check if it's free
2769  //
2770  NextEntry = POOL_PREV_BLOCK(Entry);
2771  if (NextEntry->PoolType == 0)
2772  {
2773  //
2774  // It is, so we can do a combine
2775  //
2776  Combined = TRUE;
2777 
2778  //
2779  // Make sure there's actual data in the block -- anything smaller
2780  // than this means we only have the header so there's no linked list
2781  // for us to remove
2782  //
2783  if ((NextEntry->BlockSize != 1))
2784  {
2785  //
2786  // The block is at least big enough to have a linked list, so go
2787  // ahead and remove it
2788  //
2789  ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2791  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2792  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2793  }
2794 
2795  //
2796  // Combine our original block (which might've already been combined
2797  // with the next block), into the previous block
2798  //
2799  NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2800 
2801  //
2802  // And now we'll work with the previous block instead
2803  //
2804  Entry = NextEntry;
2805  }
2806  }
2807 
2808  //
2809  // By now, it may have been possible for our combined blocks to actually
2810  // have made up a full page (if there were only 2-3 allocations on the
2811  // page, they could've all been combined).
2812  //
2813  if ((PAGE_ALIGN(Entry) == Entry) &&
2815  {
2816  //
2817  // In this case, release the pool lock, update the performance counter,
2818  // and free the page
2819  //
2820  ExUnlockPool(PoolDesc, OldIrql);
2821  InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2823  return;
2824  }
2825 
2826  //
2827  // Otherwise, we now have a free block (or a combination of 2 or 3)
2828  //
2829  Entry->PoolType = 0;
2830  BlockSize = Entry->BlockSize;
2831  ASSERT(BlockSize != 1);
2832 
2833  //
2834  // Check if we actually did combine it with anyone
2835  //
2836  if (Combined)
2837  {
2838  //
2839  // Get the first combined block (either our original to begin with, or
2840  // the one after the original, depending if we combined with the previous)
2841  //
2842  NextEntry = POOL_NEXT_BLOCK(Entry);
2843 
2844  //
2845  // As long as the next block isn't on a page boundary, have it point
2846  // back to us
2847  //
2848  if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2849  }
2850 
2851  //
2852  // Insert this new free block, and release the pool lock
2853  //
2854  ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2856  ExUnlockPool(PoolDesc, OldIrql);
2857 }
2858 
2859 /*
2860  * @implemented
2861  */
2862 VOID
2863 NTAPI
2865 {
2866  //
2867  // Just free without checking for the tag
2868  //
2869  ExFreePoolWithTag(P, 0);
2870 }
2871 
2872 /*
2873  * @unimplemented
2874  */
2875 SIZE_T
2876 NTAPI
2879 {
2880  //
2881  // Not implemented
2882  //
2883  UNIMPLEMENTED;
2884  return FALSE;
2885 }
2886 
2887 /*
2888  * @implemented
2889  */
2890 
2891 PVOID
2892 NTAPI
2895 {
2896  //
2897  // Allocate the pool
2898  //
2900 }
2901 
2902 /*
2903  * @implemented
2904  */
2905 PVOID
2906 NTAPI
2909  IN ULONG Tag,
2911 {
2912  PVOID Buffer;
2913 
2914  //
2915  // Allocate the pool
2916  //
2918  if (Buffer == NULL)
2919  {
2920  UNIMPLEMENTED;
2921  }
2922 
2923  return Buffer;
2924 }
2925 
2926 /*
2927  * @implemented
2928  */
2929 PVOID
2930 NTAPI
2933  IN ULONG Tag)
2934 {
2935  BOOLEAN Raise = TRUE;
2936  PVOID Buffer;
2938  NTSTATUS Status;
2940 
2941  //
2942  // Check if we should fail instead of raising an exception
2943  //
2945  {
2946  Raise = FALSE;
2948  }
2949 
2950  //
2951  // Inject the pool quota mask
2952  //
2954 
2955  //
2956  // Check if we have enough space to add the quota owner process, as long as
2957  // this isn't the system process, which never gets charged quota
2958  //
2959  ASSERT(NumberOfBytes != 0);
2960  if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2962  {
2963  //
2964  // Add space for our EPROCESS pointer
2965  //
2966  NumberOfBytes += sizeof(PEPROCESS);
2967  }
2968  else
2969  {
2970  //
2971  // We won't be able to store the pointer, so don't use quota for this
2972  //
2974  }
2975 
2976  //
2977  // Allocate the pool buffer now
2978  //
2980 
2981  //
2982  // If the buffer is page-aligned, this is a large page allocation and we
2983  // won't touch it
2984  //
2985  if (PAGE_ALIGN(Buffer) != Buffer)
2986  {
2987  //
2988  // Also if special pool is enabled, and this was allocated from there,
2989  // we won't touch it either
2990  //
2993  {
2994  return Buffer;
2995  }
2996 
2997  //
2998  // If it wasn't actually allocated with quota charges, ignore it too
2999  //
3000  if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
3001 
3002  //
3003  // If this is the system process, we don't charge quota, so ignore
3004  //
3005  if (Process == PsInitialSystemProcess) return Buffer;
3006 
3007  //
3008  // Actually go and charge quota for the process now
3009  //
3010  Entry = POOL_ENTRY(Buffer);
3013  Entry->BlockSize * POOL_BLOCK_SIZE);
3014  if (!NT_SUCCESS(Status))
3015  {
3016  //
3017  // Quota failed, back out the allocation, clear the owner, and fail
3018  //
3019  ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
3021  if (Raise) RtlRaiseStatus(Status);
3022  return NULL;
3023  }
3024 
3025  //
3026  // Quota worked, write the owner and then reference it before returning
3027  //
3028  ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
3030  }
3031  else if (!(Buffer) && (Raise))
3032  {
3033  //
3034  // The allocation failed, raise an error if we are in raise mode
3035  //
3037  }
3038 
3039  //
3040  // Return the allocated buffer
3041  //
3042  return Buffer;
3043 }
3044 
3045 /* EOF */
_IRQL_requires_(DISPATCH_LEVEL)
Definition: expool.c:1448
#define KeGetCurrentIrql()
Definition: env_spec_w32.h:706
PVOID NTAPI ExAllocatePoolWithTag(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag)
Definition: expool.c:1850
static int Hash(const char *)
Definition: reader.c:2257
INIT_FUNCTION VOID NTAPI InitializePool(IN POOL_TYPE PoolType, IN ULONG Threshold)
Definition: expool.c:1009
IN CINT OUT PVOID IN ULONG OUT PULONG ReturnLength
Definition: dumpinfo.c:39
#define PAGE_SHIFT
Definition: env_spec_w32.h:45
#define IN
Definition: typedefs.h:38
DECLSPEC_NORETURN NTSYSAPI VOID NTAPI RtlRaiseStatus(_In_ NTSTATUS Status)
GENERAL_LOOKASIDE_POOL PPNPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:625
#define max(a, b)
Definition: svc.c:63
ASMGENDATA Table[]
Definition: genincdata.c:61
#define TRUE
Definition: types.h:120
NTSYSAPI VOID NTAPI RtlCopyMemory(VOID UNALIGNED *Destination, CONST VOID UNALIGNED *Source, ULONG Length)
VOID NTAPI ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
Definition: expool.c:91
PLIST_ENTRY NTAPI ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
Definition: expool.c:131
ULONG PagedAllocs
Definition: extypes.h:1129
BOOLEAN NTAPI ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
Definition: expool.c:113
#define STATUS_INSUFFICIENT_RESOURCES
Definition: udferr_usr.h:158
SIZE_T PoolTrackTableMask
Definition: expool.c:38
#define SESSION_POOL_MASK
Definition: mm.h:102
VOID MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
IN PLARGE_INTEGER IN PLARGE_INTEGER PEPROCESS ULONG Key
Definition: fatprocs.h:2697
PVOID NTAPI ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag)
Definition: expool.c:2931
#define STATUS_INFO_LENGTH_MISMATCH
Definition: udferr_usr.h:133
NTSTATUS NTAPI PsChargeProcessPoolQuota(IN PEPROCESS Process, IN POOL_TYPE PoolType, IN SIZE_T Amount)
Definition: quota.c:219
LIST_ENTRY ExPoolLookasideListHead
Definition: lookas.c:26
#define POOL_FLAG_VERIFIER
Definition: miarm.h:295
struct _Entry Entry
Definition: kefuncs.h:640
SIZE_T PoolTrackTableSize
Definition: expool.c:28
struct _KGUARDED_MUTEX * PKGUARDED_MUTEX
VOID NTAPI ExpCheckPoolAllocation(PVOID P, POOL_TYPE PoolType, ULONG Tag)
Definition: expool.c:288
VOID FASTCALL KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:42
BOOL Verbose
Definition: chkdsk.c:72
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:323
#define BooleanFlagOn(F, SF)
Definition: ext2fs.h:183
#define POOL_FLAG_SPECIAL_POOL
Definition: miarm.h:297
struct _LIST_ENTRY * Blink
Definition: typedefs.h:120
#define ExReleaseSpinLock(Lock, OldIrql)
char CHAR
Definition: xmlstorage.h:175
LONG NTSTATUS
Definition: precomp.h:26
FORCEINLINE struct _KPRCB * KeGetCurrentPrcb(VOID)
Definition: ketypes.h:1062
struct _EPROCESS * PEPROCESS
Definition: nt_native.h:30
PLIST_ENTRY NTAPI ExpEncodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:84
_In_ KPRIORITY Priority
Definition: kefuncs.h:516
SIZE_T PoolTrackTableSize
Definition: expool.c:38
PLDR_DATA_TABLE_ENTRY NTAPI MiLookupDataTableEntry(IN PVOID Address)
Definition: sysldr.c:3330
#define POOL_FLAG_CRASH_ON_FAILURE
Definition: miarm.h:299
#define ExRaiseStatus
Definition: ntoskrnl.h:96
#define MAXULONG_PTR
Definition: basetsd.h:103
#define POOL_PREV_BLOCK(x)
Definition: expool.c:57
VOID NTAPI ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
Definition: expool.c:120
SIZE_T TotalBytes
Definition: miarm.h:335
LONG_PTR SSIZE_T
Definition: basetsd.h:183
VOID NTAPI ExReturnPoolQuota(IN PVOID P)
Definition: expool.c:1798
VOID NTAPI ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:170
#define POOL_FREE_IRQL_INVALID
Definition: miarm.h:319
VOID NTAPI ObDereferenceObject(IN PVOID Object)
Definition: obref.c:375
ULONG NonPagedFrees
Definition: extypes.h:1133
#define NT_VERIFY(exp)
Definition: rtlfuncs.h:3289
PKGUARDED_MUTEX ExpPagedPoolMutex
Definition: expool.c:37
VOID NTAPI ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
Definition: expool.c:106
void DbgBreakPoint()
Definition: mach.c:553
#define TAG_NONE
Definition: tag.h:127
ULONG RunningDeAllocs
Definition: miarm.h:328
LIST_ENTRY ListHeads[POOL_LISTS_PER_PAGE]
Definition: miarm.h:337
PVOID NTAPI MmAllocateSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag, IN POOL_TYPE PoolType, IN ULONG SpecialType)
#define ExAcquireSpinLock(Lock, OldIrql)
_Must_inspect_result_ FORCEINLINE BOOLEAN IsListEmpty(_In_ const LIST_ENTRY *ListHead)
Definition: rtlfuncs.h:57
ULONG PoolHitTag
Definition: expool.c:44
static int Link(const char **args)
Definition: vfdcmd.c:2414
PSLIST_ENTRY WINAPI InterlockedPopEntrySList(PSLIST_HEADER ListHead)
Definition: interlocked.c:55
BOOLEAN NTAPI ExpAddTagForBigPages(IN PVOID Va, IN ULONG Key, IN ULONG NumberOfPages, IN POOL_TYPE PoolType)
Definition: expool.c:1539
NTSTATUS NTAPI ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, IN ULONG SystemInformationLength, IN OUT PULONG ReturnLength OPTIONAL)
Definition: expool.c:1345
uint32_t ULONG_PTR
Definition: typedefs.h:63
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE GENERAL_LOOKASIDE
KSPIN_LOCK ExpLargePoolTableLock
Definition: expool.c:46
FORCEINLINE ULONG KeGetCurrentProcessorNumber(VOID)
Definition: ke.h:325
#define POOL_FLAG_CHECK_TIMERS
Definition: miarm.h:292
UCHAR KIRQL
Definition: env_spec_w32.h:591
#define POOL_FREE_BLOCK(x)
Definition: expool.c:54
_Must_inspect_result_ _In_ ULONG Flags
Definition: wsk.h:170
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
struct _POOL_DPC_CONTEXT POOL_DPC_CONTEXT
ULONG PFN_NUMBER
Definition: ke.h:8
SIZE_T PoolTrackTableSizeExpansion
Definition: expool.c:30
NTSTATUS(* NTAPI)(IN PFILE_FULL_EA_INFORMATION EaBuffer, IN ULONG EaLength, OUT PULONG ErrorOffset)
Definition: IoEaTest.cpp:117
PEPROCESS PsInitialSystemProcess
Definition: psmgr.c:50
PPOOL_DESCRIPTOR PoolVector[2]
Definition: expool.c:36
long LONG
Definition: pedump.c:60
#define POOL_NEXT_BLOCK(x)
Definition: expool.c:56
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE * PGENERAL_LOOKASIDE
enum _EX_POOL_PRIORITY EX_POOL_PRIORITY
SIZE_T PoolBigPageTableSize
Definition: expool.c:39
#define InterlockedCompareExchangePointer
Definition: interlocked.h:129
VOID NTAPI PsReturnPoolQuota(IN PEPROCESS Process, IN POOL_TYPE PoolType, IN SIZE_T Amount)
Definition: quota.c:236
#define POOL_BLOCK(x, i)
Definition: expool.c:55
#define PsGetCurrentProcess
Definition: psfuncs.h:17
PPOOL_TRACKER_TABLE PoolTrackTableExpansion
Definition: expool.c:29
FORCEINLINE VOID KeInitializeSpinLock(_Out_ PKSPIN_LOCK SpinLock)
Definition: kefuncs.h:251
_Must_inspect_result_ _In_ LPCGUID ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _In_ ULONG PoolTag
Definition: fltkernel.h:2520
unsigned char BOOLEAN
struct _POOL_DPC_CONTEXT * PPOOL_DPC_CONTEXT
VOID NTAPI ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
Definition: expool.c:185
smooth NULL
Definition: ftsmooth.c:416
#define POOL_ENTRY(x)
Definition: expool.c:53
ULONG ExpNumberOfPagedPools
Definition: expool.c:33
#define FORCEINLINE
Definition: ntbasedef.h:221
_At_(*)(_In_ PWSK_CLIENT Client, _In_opt_ PUNICODE_STRING NodeName, _In_opt_ PUNICODE_STRING ServiceName, _In_opt_ ULONG NameSpace, _In_opt_ GUID *Provider, _In_opt_ PADDRINFOEXW Hints, _Outptr_ PADDRINFOEXW *Result, _In_opt_ PEPROCESS OwningProcess, _In_opt_ PETHREAD OwningThread, _Inout_ PIRP Irp Result)(Mem)) NTSTATUS(WSKAPI *PFN_WSK_GET_ADDRESS_INFO
Definition: wsk.h:426
IN PSCSI_REQUEST_BLOCK IN OUT NTSTATUS IN OUT BOOLEAN * Retry
Definition: class2.h:49
void DPRINT(...)
Definition: polytest.cpp:61
ULONG ExpPoolBigEntriesInUse
Definition: expool.c:47
SIZE_T NonPagedBytes
Definition: miarm.h:386
Definition: bufpool.h:45
ULONG NonPagedAllocs
Definition: extypes.h:1132
ULONG NTAPI ExpFindAndRemoveTagBigPages(IN PVOID Va, OUT PULONG_PTR BigPages, IN POOL_TYPE PoolType)
Definition: expool.c:1640
#define BASE_POOL_TYPE_MASK
Definition: ExPools.c:15
struct _POOL_TRACKER_TABLE POOL_TRACKER_TABLE
void * PVOID
Definition: retypes.h:9
struct _POOL_HEADER * PPOOL_HEADER
#define InterlockedExchangeAdd
Definition: interlocked.h:181
PFLT_MESSAGE_WAITER_QUEUE CONTAINING_RECORD(Csq, DEVICE_EXTENSION, IrpQueue)) -> WaiterQ.mLock) _IRQL_raises_(DISPATCH_LEVEL) VOID NTAPI FltpAcquireMessageWaiterLock(_In_ PIO_CSQ Csq, _Out_ PKIRQL Irql)
Definition: Messaging.c:560
#define POOL_ALLOC_IRQL_INVALID
Definition: miarm.h:318
PVOID NTAPI ExAllocatePool(POOL_TYPE PoolType, SIZE_T NumberOfBytes)
Definition: expool.c:2411
SIZE_T NonPagedUsed
Definition: extypes.h:1134
VOID NTAPI ExpCheckPoolBlocks(IN PVOID Block)
Definition: expool.c:368
#define DBG_UNREFERENCED_LOCAL_VARIABLE(L)
Definition: ntbasedef.h:326
_In_opt_ PVOID _In_opt_ PVOID SystemArgument1
Definition: ketypes.h:675
PLIST_ENTRY NTAPI ExpDecodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:77
struct _LIST_ENTRY * Flink
Definition: typedefs.h:119
_In_ HANDLE _Outptr_result_bytebuffer_ ViewSize PVOID * BaseAddress
Definition: mmfuncs.h:404
_In_ LARGE_INTEGER _In_opt_ PKDPC Dpc
Definition: kefuncs.h:524
BOOLEAN ExStopBadTags
Definition: expool.c:45
POOL_TYPE NTAPI MmDeterminePoolType(IN PVOID VirtualAddress)
Definition: pool.c:406
VOID NTAPI ExpRemovePoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:748
_Inout_ PVOID Lookaside
Definition: fltkernel.h:2532
_Out_ PBOOLEAN QuotaCharged
Definition: exfuncs.h:945
ULONG RunningAllocs
Definition: miarm.h:327
if(!(yy_init))
Definition: macro.lex.yy.c:714
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:41
__wchar_t WCHAR
Definition: xmlstorage.h:180
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:32
PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16+1]
Definition: expool.c:35
INT POOL_TYPE
Definition: typedefs.h:76
VOID NTAPI ExpGetPoolTagInfoTarget(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
Definition: expool.c:1310
uint64_t ULONGLONG
Definition: typedefs.h:65
#define KeAcquireSpinLock(sl, irql)
Definition: env_spec_w32.h:609
POOL_DESCRIPTOR NonPagedPoolDescriptor
Definition: expool.c:34
VOID KdbpPrint(IN PCHAR Format, IN ... OPTIONAL)
Prints the given string with printf-like formatting.
Definition: kdb_cli.c:2651
ULONG ExpBigTableExpansionFailed
Definition: expool.c:40
SIZE_T PagedUsed
Definition: extypes.h:1131
static const UCHAR Index[8]
Definition: usbohci.c:18
#define PROTECTED_POOL
Definition: extypes.h:294
VOID FASTCALL KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber, IN KIRQL OldIrql)
Definition: spinlock.c:154
#define PAGE_ALIGN(Va)
LONG NonPagedFrees
Definition: miarm.h:385
FORCEINLINE USHORT ExQueryDepthSList(_In_ PSLIST_HEADER SListHead)
Definition: exfuncs.h:153
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:27
#define MUST_SUCCEED_POOL_MASK
Definition: mm.h:99
SIZE_T PagedBytes
Definition: miarm.h:389
ULONG TotalPages
Definition: miarm.h:329
ASSERT((InvokeOnSuccess||InvokeOnError||InvokeOnCancel) ?(CompletionRoutine !=NULL) :TRUE)
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes)
Definition: pool.c:420
#define BYTES_TO_PAGES(Size)
ULONG NTAPI MiFreePoolPages(IN PVOID StartingAddress)
Definition: pool.c:905
VOID NTAPI ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:155
char * PBOOLEAN
Definition: retypes.h:11
#define InterlockedExchangeAddSizeT(a, b)
Definition: interlocked.h:196
FORCEINLINE KIRQL ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
Definition: expool.c:1262
_Requires_lock_held_ Interrupt _Releases_lock_ Interrupt _In_ _IRQL_restores_ KIRQL OldIrql
Definition: kefuncs.h:803
Definition: ketypes.h:687
FORCEINLINE VOID ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN PVOID Entry)
Definition: expool.c:408
Definition: btrfs_drv.h:1853
#define InterlockedDecrementUL(Addend)
Definition: ex.h:1510
ULONG PagedFrees
Definition: extypes.h:1130
_Must_inspect_result_ _In_ USHORT NewSize
Definition: fltkernel.h:975
#define PAGE_SIZE
Definition: env_spec_w32.h:49
Definition: typedefs.h:117
NTKERNELAPI PSLIST_ENTRY FASTCALL InterlockedPushEntrySList(IN PSLIST_HEADER ListHead, IN PSLIST_ENTRY ListEntry)
Definition: interlocked.c:82
Definition: copy.c:32
IN PVOID IN PVOID IN USHORT IN USHORT Size
Definition: pci.h:359
#define PASSIVE_LEVEL
Definition: env_spec_w32.h:693
KIRQL FASTCALL KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
Definition: spinlock.c:108
PVOID NTAPI ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes)
Definition: expool.c:2893
#define P(row, col)
UCHAR KeNumberNodes
Definition: krnlinit.c:40
Status
Definition: gdiplustypes.h:24
_In_opt_ PVOID _In_opt_ PVOID _In_opt_ PVOID SystemArgument2
Definition: ketypes.h:675
INIT_FUNCTION VOID NTAPI ExpSeedHotTags(VOID)
Definition: expool.c:628
#define DISPATCH_LEVEL
Definition: env_spec_w32.h:696
ULONG TotalBigPages
Definition: miarm.h:330
#define _In_
Definition: no_sal2.h:204
#define POOL_FLAG_CHECK_DEADLOCK
Definition: miarm.h:296
ULONG_PTR SIZE_T
Definition: typedefs.h:78
VOID FASTCALL KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:31
#define QUOTA_POOL_MASK
Definition: ExPools.c:16
#define InterlockedIncrement
Definition: armddk.h:53
VOID NTAPI KeSignalCallDpcDone(IN PVOID SystemArgument1)
Definition: dpc.c:1012
USHORT PreviousSize
#define NUMBER_POOL_LOOKASIDE_LISTS
Definition: ketypes.h:286
#define ROUND_TO_PAGES(Size)
unsigned short USHORT
Definition: pedump.c:61
VOID NTAPI KeGenericCallDpc(IN PKDEFERRED_ROUTINE Routine, IN PVOID Context)
Definition: dpc.c:983
#define InterlockedIncrementUL(Addend)
Definition: ex.h:1513
SIZE_T NTAPI ExQueryPoolBlockSize(IN PVOID PoolBlock, OUT PBOOLEAN QuotaCharged)
Definition: expool.c:2877
#define POOL_FLAG_CHECK_WORKERS
Definition: miarm.h:293
ULONG KSPIN_LOCK
Definition: env_spec_w32.h:72
UNICODE_STRING BaseDllName
Definition: ldrtypes.h:145
#define FIELD_OFFSET(t, f)
Definition: typedefs.h:254
PVOID NTAPI ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag, IN EX_POOL_PRIORITY Priority)
Definition: expool.c:2907
ULONG ExPoolFailures
Definition: expool.c:49
VOID NTAPI ExpInsertPoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:839
__int3264 LONG_PTR
Definition: mstsclib_h.h:276
unsigned int * PULONG
Definition: retypes.h:1
#define min(a, b)
Definition: monoChain.cc:55
#define KeReleaseSpinLock(sl, irql)
Definition: env_spec_w32.h:627
FORCEINLINE ULONG ExpComputeHashForTag(IN ULONG Tag, IN SIZE_T BucketMask)
Definition: expool.c:433
INIT_FUNCTION VOID NTAPI ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, IN POOL_TYPE PoolType, IN ULONG PoolIndex, IN ULONG Threshold, IN PVOID PoolLock)
Definition: expool.c:958
#define POOL_FLAG_DBGPRINT_ON_FAILURE
Definition: miarm.h:298
VOID FASTCALL KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:53
FORCEINLINE ULONG ExpComputePartialHashForAddress(IN PVOID BaseAddress)
Definition: expool.c:449
#define DPRINT1
Definition: precomp.h:8
_Must_inspect_result_ typedef _In_ ULONG TableEntry
Definition: iotypes.h:3947
#define POOL_LISTS_PER_PAGE
Definition: miarm.h:285
IN ULONG IN ULONG Tag
Definition: evtlib.h:159
PPOOL_TRACKER_BIG_PAGES PoolBigPageTable
Definition: expool.c:42
SIZE_T PoolBigPageTableHash
Definition: expool.c:39
#define POOL_BIG_TABLE_ENTRY_FREE
Definition: expool.c:23
_Must_inspect_result_ _In_ PLARGE_INTEGER _In_ PLARGE_INTEGER _In_ ULONG _In_ PFILE_OBJECT _In_ PVOID Process
Definition: fsrtlfuncs.h:219
void * _ReturnAddress(void)
LONG NonPagedAllocs
Definition: miarm.h:384
#define OUT
Definition: typedefs.h:39
#define ObReferenceObject
Definition: obfuncs.h:204
_Must_inspect_result_ typedef _Out_ PULONG TableSize
Definition: iotypes.h:3971
FORCEINLINE VOID ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, IN KIRQL OldIrql)
Definition: expool.c:1286
ULONG ExpPoolFlags
Definition: expool.c:48
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _Inout_ PLARGE_INTEGER NumberOfBytes
Definition: iotypes.h:998
struct tagContext Context
Definition: acpixf.h:1030
unsigned int ULONG
Definition: retypes.h:1
VOID NTAPI ExFreePool(PVOID P)
Definition: expool.c:2864
#define UNIMPLEMENTED
Definition: debug.h:114
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:261
VOID NTAPI MmFreeSpecialPool(IN PVOID P)
#define ULONG_PTR
Definition: config.h:101
uint32_t * PULONG_PTR
Definition: typedefs.h:63
#define ALIGN_UP_BY(size, align)
BOOLEAN NTAPI MmUseSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag)
ULONG TagUlong
Definition: extypes.h:1127
#define POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
#define POOL_MAX_ALLOC
Definition: miarm.h:287
#define POOL_RAISE_IF_ALLOCATION_FAILURE
#define POOL_BLOCK_SIZE
Definition: miarm.h:283
ULONG MmSizeOfNonPagedPoolInBytes
Definition: init.c:21
PLIST_ENTRY NTAPI ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
Definition: expool.c:143
IN BOOLEAN OUT PSTR Buffer
Definition: progress.h:34
VOID NTAPI ExFreePoolWithTag(IN PVOID P, IN ULONG TagToFree)
Definition: expool.c:2440
return STATUS_SUCCESS
Definition: btrfs.c:2938
_Must_inspect_result_ _In_ FLT_CONTEXT_TYPE _In_ SIZE_T _In_ POOL_TYPE PoolType
Definition: fltkernel.h:1444
signed int * PLONG
Definition: retypes.h:5
BOOLEAN NTAPI MmIsSpecialPoolAddress(IN PVOID P)
#define APC_LEVEL
Definition: env_spec_w32.h:695
ULONGLONG NTAPI KeQueryInterruptTime(VOID)
Definition: clock.c:203
base of all file and directory entries
Definition: entries.h:82
VOID NTAPI KeBugCheckEx(_In_ ULONG BugCheckCode, _In_ ULONG_PTR BugCheckParameter1, _In_ ULONG_PTR BugCheckParameter2, _In_ ULONG_PTR BugCheckParameter3, _In_ ULONG_PTR BugCheckParameter4)
Definition: rtlcompat.c:107
ULONGLONG MiLastPoolDumpTime
Definition: expool.c:50
VOID NTAPI ExQueryPoolUsage(OUT PULONG PagedPoolPages, OUT PULONG NonPagedPoolPages, OUT PULONG PagedPoolAllocs, OUT PULONG PagedPoolFrees, OUT PULONG PagedPoolLookasideHits, OUT PULONG NonPagedPoolAllocs, OUT PULONG NonPagedPoolFrees, OUT PULONG NonPagedPoolLookasideHits)
Definition: expool.c:1714
#define _IRQL_restores_
Definition: no_sal2.h:653
_Must_inspect_result_ _In_ LPCGUID _In_ ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _Inout_ PVOID LookasideList
Definition: fltkernel.h:2551
BOOLEAN NTAPI KeSignalCallDpcSynchronize(IN PVOID SystemArgument2)
Definition: dpc.c:1025
#define POOL_FLAG_CHECK_RESOURCES
Definition: miarm.h:294
GENERAL_LOOKASIDE_POOL PPPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:626
KSPIN_LOCK ExpTaggedPoolLock
Definition: expool.c:43
_In_ PSTORAGE_PROPERTY_ID _Outptr_ PSTORAGE_DESCRIPTOR_HEADER * Descriptor
Definition: classpnp.h:966
PULONG MinorVersion OPTIONAL
Definition: CrossNt.h:68
_In_opt_ PVOID DeferredContext
Definition: ketypes.h:675