ReactOS  0.4.12-dev-375-g61fed54
expool.c
Go to the documentation of this file.
1 /*
2  * PROJECT: ReactOS Kernel
3  * LICENSE: BSD - See COPYING.ARM in the top level directory
4  * FILE: ntoskrnl/mm/ARM3/expool.c
5  * PURPOSE: ARM Memory Manager Executive Pool Manager
6  * PROGRAMMERS: ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20 
21 /* GLOBALS ********************************************************************/
22 
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24 
25 typedef struct _POOL_DPC_CONTEXT
26 {
32 
50 
51 /* Pool block/header/list access macros */
52 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
53 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
54 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
55 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
56 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
57 
58 /*
59  * Pool list access debug macros, similar to Arthur's pfnlist.c work.
60  * Microsoft actually implements similar checks in the Windows Server 2003 SP1
61  * pool code, but only for checked builds.
62  *
63  * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
64  * that these checks are done even on retail builds, due to the increasing
65  * number of kernel-mode attacks which depend on dangling list pointers and other
66  * kinds of list-based attacks.
67  *
68  * For now, I will leave these checks on all the time, but later they are likely
69  * to be DBG-only, at least until there are enough kernel-mode security attacks
70  * against ReactOS to warrant the performance hit.
71  *
72  * For now, these are not made inline, so we can get good stack traces.
73  */
75 NTAPI
77 {
78  return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
79 }
80 
82 NTAPI
84 {
85  return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
86 }
87 
88 VOID
89 NTAPI
91 {
92  if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
93  (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
94  {
95  KeBugCheckEx(BAD_POOL_HEADER,
96  3,
97  (ULONG_PTR)ListHead,
100  }
101 }
102 
103 VOID
104 NTAPI
106 {
107  ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
108 }
109 
110 BOOLEAN
111 NTAPI
113 {
114  return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
115 }
116 
117 VOID
118 NTAPI
120 {
121  PLIST_ENTRY Blink, Flink;
122  Flink = ExpDecodePoolLink(Entry->Flink);
123  Blink = ExpDecodePoolLink(Entry->Blink);
124  Flink->Blink = ExpEncodePoolLink(Blink);
125  Blink->Flink = ExpEncodePoolLink(Flink);
126 }
127 
129 NTAPI
131 {
132  PLIST_ENTRY Entry, Flink;
133  Entry = ExpDecodePoolLink(ListHead->Flink);
134  Flink = ExpDecodePoolLink(Entry->Flink);
135  ListHead->Flink = ExpEncodePoolLink(Flink);
136  Flink->Blink = ExpEncodePoolLink(ListHead);
137  return Entry;
138 }
139 
141 NTAPI
143 {
144  PLIST_ENTRY Entry, Blink;
145  Entry = ExpDecodePoolLink(ListHead->Blink);
146  Blink = ExpDecodePoolLink(Entry->Blink);
147  ListHead->Blink = ExpEncodePoolLink(Blink);
148  Blink->Flink = ExpEncodePoolLink(ListHead);
149  return Entry;
150 }
151 
152 VOID
153 NTAPI
156 {
157  PLIST_ENTRY Blink;
158  ExpCheckPoolLinks(ListHead);
159  Blink = ExpDecodePoolLink(ListHead->Blink);
160  Entry->Flink = ExpEncodePoolLink(ListHead);
161  Entry->Blink = ExpEncodePoolLink(Blink);
162  Blink->Flink = ExpEncodePoolLink(Entry);
163  ListHead->Blink = ExpEncodePoolLink(Entry);
164  ExpCheckPoolLinks(ListHead);
165 }
166 
167 VOID
168 NTAPI
171 {
172  PLIST_ENTRY Flink;
173  ExpCheckPoolLinks(ListHead);
174  Flink = ExpDecodePoolLink(ListHead->Flink);
175  Entry->Flink = ExpEncodePoolLink(Flink);
176  Entry->Blink = ExpEncodePoolLink(ListHead);
177  Flink->Blink = ExpEncodePoolLink(Entry);
178  ListHead->Flink = ExpEncodePoolLink(Entry);
179  ExpCheckPoolLinks(ListHead);
180 }
181 
182 VOID
183 NTAPI
185 {
186  PPOOL_HEADER PreviousEntry, NextEntry;
187 
188  /* Is there a block before this one? */
189  if (Entry->PreviousSize)
190  {
191  /* Get it */
192  PreviousEntry = POOL_PREV_BLOCK(Entry);
193 
194  /* The two blocks must be on the same page! */
195  if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
196  {
197  /* Something is awry */
198  KeBugCheckEx(BAD_POOL_HEADER,
199  6,
200  (ULONG_PTR)PreviousEntry,
201  __LINE__,
202  (ULONG_PTR)Entry);
203  }
204 
205  /* This block should also indicate that it's as large as we think it is */
206  if (PreviousEntry->BlockSize != Entry->PreviousSize)
207  {
208  /* Otherwise, someone corrupted one of the sizes */
209  DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
210  PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
211  Entry->PreviousSize, (char *)&Entry->PoolTag);
212  KeBugCheckEx(BAD_POOL_HEADER,
213  5,
214  (ULONG_PTR)PreviousEntry,
215  __LINE__,
216  (ULONG_PTR)Entry);
217  }
218  }
219  else if (PAGE_ALIGN(Entry) != Entry)
220  {
221  /* If there's no block before us, we are the first block, so we should be on a page boundary */
222  KeBugCheckEx(BAD_POOL_HEADER,
223  7,
224  0,
225  __LINE__,
226  (ULONG_PTR)Entry);
227  }
228 
229  /* This block must have a size */
230  if (!Entry->BlockSize)
231  {
232  /* Someone must've corrupted this field */
233  if (Entry->PreviousSize)
234  {
235  PreviousEntry = POOL_PREV_BLOCK(Entry);
236  DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
237  (char *)&PreviousEntry->PoolTag,
238  (char *)&Entry->PoolTag);
239  }
240  else
241  {
242  DPRINT1("Entry tag %.4s\n",
243  (char *)&Entry->PoolTag);
244  }
245  KeBugCheckEx(BAD_POOL_HEADER,
246  8,
247  0,
248  __LINE__,
249  (ULONG_PTR)Entry);
250  }
251 
252  /* Okay, now get the next block */
253  NextEntry = POOL_NEXT_BLOCK(Entry);
254 
255  /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
256  if (PAGE_ALIGN(NextEntry) != NextEntry)
257  {
258  /* The two blocks must be on the same page! */
259  if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
260  {
261  /* Something is messed up */
262  KeBugCheckEx(BAD_POOL_HEADER,
263  9,
264  (ULONG_PTR)NextEntry,
265  __LINE__,
266  (ULONG_PTR)Entry);
267  }
268 
269  /* And this block should think we are as large as we truly are */
270  if (NextEntry->PreviousSize != Entry->BlockSize)
271  {
272  /* Otherwise, someone corrupted the field */
273  DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
274  Entry->BlockSize, (char *)&Entry->PoolTag,
275  NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
276  KeBugCheckEx(BAD_POOL_HEADER,
277  5,
278  (ULONG_PTR)NextEntry,
279  __LINE__,
280  (ULONG_PTR)Entry);
281  }
282  }
283 }
284 
285 VOID
286 NTAPI
288  PVOID P,
290  ULONG Tag)
291 {
293  ULONG i;
294  KIRQL OldIrql;
295  POOL_TYPE RealPoolType;
296 
297  /* Get the pool header */
298  Entry = ((PPOOL_HEADER)P) - 1;
299 
300  /* Check if this is a large allocation */
301  if (PAGE_ALIGN(P) == P)
302  {
303  /* Lock the pool table */
305 
306  /* Find the pool tag */
307  for (i = 0; i < PoolBigPageTableSize; i++)
308  {
309  /* Check if this is our allocation */
310  if (PoolBigPageTable[i].Va == P)
311  {
312  /* Make sure the tag is ok */
313  if (PoolBigPageTable[i].Key != Tag)
314  {
315  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
316  }
317 
318  break;
319  }
320  }
321 
322  /* Release the lock */
324 
325  if (i == PoolBigPageTableSize)
326  {
327  /* Did not find the allocation */
328  //ASSERT(FALSE);
329  }
330 
331  /* Get Pool type by address */
332  RealPoolType = MmDeterminePoolType(P);
333  }
334  else
335  {
336  /* Verify the tag */
337  if (Entry->PoolTag != Tag)
338  {
339  DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
340  &Tag, &Entry->PoolTag, Entry->PoolTag);
341  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
342  }
343 
344  /* Check the rest of the header */
346 
347  /* Get Pool type from entry */
348  RealPoolType = (Entry->PoolType - 1);
349  }
350 
351  /* Should we check the pool type? */
352  if (PoolType != -1)
353  {
354  /* Verify the pool type */
355  if (RealPoolType != PoolType)
356  {
357  DPRINT1("Wrong pool type! Expected %s, got %s\n",
358  PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
359  (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
360  KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
361  }
362  }
363 }
364 
365 VOID
366 NTAPI
368 {
369  BOOLEAN FoundBlock = FALSE;
370  SIZE_T Size = 0;
372 
373  /* Get the first entry for this page, make sure it really is the first */
374  Entry = PAGE_ALIGN(Block);
375  ASSERT(Entry->PreviousSize == 0);
376 
377  /* Now scan each entry */
378  while (TRUE)
379  {
380  /* When we actually found our block, remember this */
381  if (Entry == Block) FoundBlock = TRUE;
382 
383  /* Now validate this block header */
385 
386  /* And go to the next one, keeping track of our size */
387  Size += Entry->BlockSize;
389 
390  /* If we hit the last block, stop */
391  if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
392 
393  /* If we hit the end of the page, stop */
394  if (PAGE_ALIGN(Entry) == Entry) break;
395  }
396 
397  /* We must've found our block, and we must have hit the end of the page */
398  if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
399  {
400  /* Otherwise, the blocks are messed up */
401  KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
402  }
403 }
404 
406 VOID
409  IN PVOID Entry)
410 {
411  //
412  // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
413  // be DISPATCH_LEVEL or lower for Non Paged Pool
414  //
415  if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
418  {
419  //
420  // Take the system down
421  //
422  KeBugCheckEx(BAD_POOL_CALLER,
425  PoolType,
427  }
428 }
429 
431 ULONG
433  IN SIZE_T BucketMask)
434 {
435  //
436  // Compute the hash by multiplying with a large prime number and then XORing
437  // with the HIDWORD of the result.
438  //
439  // Finally, AND with the bucket mask to generate a valid index/bucket into
440  // the table
441  //
442  ULONGLONG Result = (ULONGLONG)40543 * Tag;
443  return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
444 }
445 
447 ULONG
449 {
450  ULONG Result;
451  //
452  // Compute the hash by converting the address into a page number, and then
453  // XORing each nibble with the next one.
454  //
455  // We do *NOT* AND with the bucket mask at this point because big table expansion
456  // might happen. Therefore, the final step of the hash must be performed
457  // while holding the expansion pushlock, and this is why we call this a
458  // "partial" hash only.
459  //
461  return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
462 }
463 
464 #if DBG
466 BOOLEAN
467 ExpTagAllowPrint(CHAR Tag)
468 {
469  if ((Tag >= 'a' && Tag <= 'z') ||
470  (Tag >= 'A' && Tag <= 'Z') ||
471  (Tag >= '0' && Tag <= '9') ||
472  Tag == ' ' || Tag == '=' ||
473  Tag == '?' || Tag == '@')
474  {
475  return TRUE;
476  }
477 
478  return FALSE;
479 }
480 
481 #ifdef KDBG
482 #define MiDumperPrint(dbg, fmt, ...) \
483  if (dbg) KdbpPrint(fmt, ##__VA_ARGS__); \
484  else DPRINT1(fmt, ##__VA_ARGS__)
485 #else
486 #define MiDumperPrint(dbg, fmt, ...) \
487  DPRINT1(fmt, ##__VA_ARGS__)
488 #endif
489 
490 VOID
491 MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
492 {
493  SIZE_T i;
495 
496  //
497  // Only print header if called from OOM situation
498  //
499  if (!CalledFromDbg)
500  {
501  DPRINT1("---------------------\n");
502  DPRINT1("Out of memory dumper!\n");
503  }
504 #ifdef KDBG
505  else
506  {
507  KdbpPrint("Pool Used:\n");
508  }
509 #endif
510 
511  //
512  // Remember whether we'll have to be verbose
513  // This is the only supported flag!
514  //
516 
517  //
518  // Print table header
519  //
520  if (Verbose)
521  {
522  MiDumperPrint(CalledFromDbg, "\t\t\t\tNonPaged\t\t\t\t\t\t\tPaged\n");
523  MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\t\tAllocs\t\tFrees\t\tDiff\t\tUsed\n");
524  }
525  else
526  {
527  MiDumperPrint(CalledFromDbg, "\t\tNonPaged\t\t\tPaged\n");
528  MiDumperPrint(CalledFromDbg, "Tag\t\tAllocs\t\tUsed\t\tAllocs\t\tUsed\n");
529  }
530 
531  //
532  // We'll extract allocations for all the tracked pools
533  //
534  for (i = 0; i < PoolTrackTableSize; ++i)
535  {
537 
539 
540  //
541  // We only care about tags which have allocated memory
542  //
543  if (TableEntry->NonPagedBytes != 0 || TableEntry->PagedBytes != 0)
544  {
545  //
546  // If there's a tag, attempt to do a pretty print
547  // only if it matches the caller's tag, or if
548  // any tag is allowed
549  // For checking whether it matches caller's tag,
550  // use the mask to make sure not to mess with the wildcards
551  //
552  if (TableEntry->Key != 0 && TableEntry->Key != TAG_NONE &&
553  (Tag == 0 || (TableEntry->Key & Mask) == (Tag & Mask)))
554  {
555  CHAR Tag[4];
556 
557  //
558  // Extract each 'component' and check whether they are printable
559  //
560  Tag[0] = TableEntry->Key & 0xFF;
561  Tag[1] = TableEntry->Key >> 8 & 0xFF;
562  Tag[2] = TableEntry->Key >> 16 & 0xFF;
563  Tag[3] = TableEntry->Key >> 24 & 0xFF;
564 
565  if (ExpTagAllowPrint(Tag[0]) && ExpTagAllowPrint(Tag[1]) && ExpTagAllowPrint(Tag[2]) && ExpTagAllowPrint(Tag[3]))
566  {
567  //
568  // Print in direct order to make !poolused TAG usage easier
569  //
570  if (Verbose)
571  {
572  MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
573  TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
574  (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
575  TableEntry->PagedAllocs, TableEntry->PagedFrees,
576  (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
577  }
578  else
579  {
580  MiDumperPrint(CalledFromDbg, "'%c%c%c%c'\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", Tag[0], Tag[1], Tag[2], Tag[3],
581  TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
582  TableEntry->PagedAllocs, TableEntry->PagedBytes);
583  }
584  }
585  else
586  {
587  if (Verbose)
588  {
589  MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
590  TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
591  (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
592  TableEntry->PagedAllocs, TableEntry->PagedFrees,
593  (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
594  }
595  else
596  {
597  MiDumperPrint(CalledFromDbg, "%x\t%ld\t\t%ld\t\t%ld\t\t%ld\n", TableEntry->Key,
598  TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
599  TableEntry->PagedAllocs, TableEntry->PagedBytes);
600  }
601  }
602  }
603  else if (Tag == 0 || (Tag & Mask) == (TAG_NONE & Mask))
604  {
605  if (Verbose)
606  {
607  MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
608  TableEntry->NonPagedAllocs, TableEntry->NonPagedFrees,
609  (TableEntry->NonPagedAllocs - TableEntry->NonPagedFrees), TableEntry->NonPagedBytes,
610  TableEntry->PagedAllocs, TableEntry->PagedFrees,
611  (TableEntry->PagedAllocs - TableEntry->PagedFrees), TableEntry->PagedBytes);
612  }
613  else
614  {
615  MiDumperPrint(CalledFromDbg, "Anon\t\t%ld\t\t%ld\t\t%ld\t\t%ld\n",
616  TableEntry->NonPagedAllocs, TableEntry->NonPagedBytes,
617  TableEntry->PagedAllocs, TableEntry->PagedBytes);
618  }
619  }
620  }
621  }
622 
623  if (!CalledFromDbg)
624  {
625  DPRINT1("---------------------\n");
626  }
627 }
628 #endif
629 
630 /* PRIVATE FUNCTIONS **********************************************************/
631 
633 VOID
634 NTAPI
636 {
637  ULONG i, Key, Hash, Index;
638  PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
639  ULONG TagList[] =
640  {
641  ' oI',
642  ' laH',
643  'PldM',
644  'LooP',
645  'tSbO',
646  ' prI',
647  'bdDN',
648  'LprI',
649  'pOoI',
650  ' ldM',
651  'eliF',
652  'aVMC',
653  'dSeS',
654  'CFtN',
655  'looP',
656  'rPCT',
657  'bNMC',
658  'dTeS',
659  'sFtN',
660  'TPCT',
661  'CPCT',
662  ' yeK',
663  'qSbO',
664  'mNoI',
665  'aEoI',
666  'cPCT',
667  'aFtN',
668  '0ftN',
669  'tceS',
670  'SprI',
671  'ekoT',
672  ' eS',
673  'lCbO',
674  'cScC',
675  'lFtN',
676  'cAeS',
677  'mfSF',
678  'kWcC',
679  'miSF',
680  'CdfA',
681  'EdfA',
682  'orSF',
683  'nftN',
684  'PRIU',
685  'rFpN',
686  'RFpN',
687  'aPeS',
688  'sUeS',
689  'FpcA',
690  'MpcA',
691  'cSeS',
692  'mNbO',
693  'sFpN',
694  'uLeS',
695  'DPcS',
696  'nevE',
697  'vrqR',
698  'ldaV',
699  ' pP',
700  'SdaV',
701  ' daV',
702  'LdaV',
703  'FdaV',
704  ' GIB',
705  };
706 
707  //
708  // Loop all 64 hot tags
709  //
710  ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
711  for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
712  {
713  //
714  // Get the current tag, and compute its hash in the tracker table
715  //
716  Key = TagList[i];
718 
719  //
720  // Loop all the hashes in this index/bucket
721  //
722  Index = Hash;
723  while (TRUE)
724  {
725  //
726  // Find an empty entry, and make sure this isn't the last hash that
727  // can fit.
728  //
729  // On checked builds, also make sure this is the first time we are
730  // seeding this tag.
731  //
732  ASSERT(TrackTable[Hash].Key != Key);
733  if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
734  {
735  //
736  // It has been seeded, move on to the next tag
737  //
738  TrackTable[Hash].Key = Key;
739  break;
740  }
741 
742  //
743  // This entry was already taken, compute the next possible hash while
744  // making sure we're not back at our initial index.
745  //
746  ASSERT(TrackTable[Hash].Key != Key);
747  Hash = (Hash + 1) & PoolTrackTableMask;
748  if (Hash == Index) break;
749  }
750  }
751 }
752 
753 VOID
754 NTAPI
758 {
759  ULONG Hash, Index;
761  SIZE_T TableMask, TableSize;
762 
763  //
764  // Remove the PROTECTED_POOL flag which is not part of the tag
765  //
766  Key &= ~PROTECTED_POOL;
767 
768  //
769  // With WinDBG you can set a tag you want to break on when an allocation is
770  // attempted
771  //
772  if (Key == PoolHitTag) DbgBreakPoint();
773 
774  //
775  // Why the double indirection? Because normally this function is also used
776  // when doing session pool allocations, which has another set of tables,
777  // sizes, and masks that live in session pool. Now we don't support session
778  // pool so we only ever use the regular tables, but I'm keeping the code this
779  // way so that the day we DO support session pool, it won't require that
780  // many changes
781  //
783  TableMask = PoolTrackTableMask;
786 
787  //
788  // Compute the hash for this key, and loop all the possible buckets
789  //
790  Hash = ExpComputeHashForTag(Key, TableMask);
791  Index = Hash;
792  while (TRUE)
793  {
794  //
795  // Have we found the entry for this tag? */
796  //
797  TableEntry = &Table[Hash];
798  if (TableEntry->Key == Key)
799  {
800  //
801  // Decrement the counters depending on if this was paged or nonpaged
802  // pool
803  //
805  {
806  InterlockedIncrement(&TableEntry->NonPagedFrees);
807  InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes,
809  return;
810  }
811  InterlockedIncrement(&TableEntry->PagedFrees);
814  return;
815  }
816 
817  //
818  // We should have only ended up with an empty entry if we've reached
819  // the last bucket
820  //
821  if (!TableEntry->Key)
822  {
823  DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
824  Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
825  ASSERT(Hash == TableMask);
826  }
827 
828  //
829  // This path is hit when we don't have an entry, and the current bucket
830  // is full, so we simply try the next one
831  //
832  Hash = (Hash + 1) & TableMask;
833  if (Hash == Index) break;
834  }
835 
836  //
837  // And finally this path is hit when all the buckets are full, and we need
838  // some expansion. This path is not yet supported in ReactOS and so we'll
839  // ignore the tag
840  //
841  DPRINT1("Out of pool tag space, ignoring...\n");
842 }
843 
844 VOID
845 NTAPI
849 {
850  ULONG Hash, Index;
851  KIRQL OldIrql;
853  SIZE_T TableMask, TableSize;
854 
855  //
856  // Remove the PROTECTED_POOL flag which is not part of the tag
857  //
858  Key &= ~PROTECTED_POOL;
859 
860  //
861  // With WinDBG you can set a tag you want to break on when an allocation is
862  // attempted
863  //
864  if (Key == PoolHitTag) DbgBreakPoint();
865 
866  //
867  // There is also an internal flag you can set to break on malformed tags
868  //
869  if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
870 
871  //
872  // ASSERT on ReactOS features not yet supported
873  //
876 
877  //
878  // Why the double indirection? Because normally this function is also used
879  // when doing session pool allocations, which has another set of tables,
880  // sizes, and masks that live in session pool. Now we don't support session
881  // pool so we only ever use the regular tables, but I'm keeping the code this
882  // way so that the day we DO support session pool, it won't require that
883  // many changes
884  //
886  TableMask = PoolTrackTableMask;
889 
890  //
891  // Compute the hash for this key, and loop all the possible buckets
892  //
893  Hash = ExpComputeHashForTag(Key, TableMask);
894  Index = Hash;
895  while (TRUE)
896  {
897  //
898  // Do we already have an entry for this tag? */
899  //
900  TableEntry = &Table[Hash];
901  if (TableEntry->Key == Key)
902  {
903  //
904  // Increment the counters depending on if this was paged or nonpaged
905  // pool
906  //
908  {
909  InterlockedIncrement(&TableEntry->NonPagedAllocs);
911  return;
912  }
913  InterlockedIncrement(&TableEntry->PagedAllocs);
915  return;
916  }
917 
918  //
919  // We don't have an entry yet, but we've found a free bucket for it
920  //
921  if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
922  {
923  //
924  // We need to hold the lock while creating a new entry, since other
925  // processors might be in this code path as well
926  //
928  if (!PoolTrackTable[Hash].Key)
929  {
930  //
931  // We've won the race, so now create this entry in the bucket
932  //
933  ASSERT(Table[Hash].Key == 0);
935  TableEntry->Key = Key;
936  }
938 
939  //
940  // Now we force the loop to run again, and we should now end up in
941  // the code path above which does the interlocked increments...
942  //
943  continue;
944  }
945 
946  //
947  // This path is hit when we don't have an entry, and the current bucket
948  // is full, so we simply try the next one
949  //
950  Hash = (Hash + 1) & TableMask;
951  if (Hash == Index) break;
952  }
953 
954  //
955  // And finally this path is hit when all the buckets are full, and we need
956  // some expansion. This path is not yet supported in ReactOS and so we'll
957  // ignore the tag
958  //
959  DPRINT1("Out of pool tag space, ignoring...\n");
960 }
961 
963 VOID
964 NTAPI
967  IN ULONG PoolIndex,
968  IN ULONG Threshold,
969  IN PVOID PoolLock)
970 {
971  PLIST_ENTRY NextEntry, LastEntry;
972 
973  //
974  // Setup the descriptor based on the caller's request
975  //
976  PoolDescriptor->PoolType = PoolType;
977  PoolDescriptor->PoolIndex = PoolIndex;
978  PoolDescriptor->Threshold = Threshold;
979  PoolDescriptor->LockAddress = PoolLock;
980 
981  //
982  // Initialize accounting data
983  //
984  PoolDescriptor->RunningAllocs = 0;
985  PoolDescriptor->RunningDeAllocs = 0;
986  PoolDescriptor->TotalPages = 0;
987  PoolDescriptor->TotalBytes = 0;
988  PoolDescriptor->TotalBigPages = 0;
989 
990  //
991  // Nothing pending for now
992  //
993  PoolDescriptor->PendingFrees = NULL;
994  PoolDescriptor->PendingFreeDepth = 0;
995 
996  //
997  // Loop all the descriptor's allocation lists and initialize them
998  //
999  NextEntry = PoolDescriptor->ListHeads;
1000  LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
1001  while (NextEntry < LastEntry)
1002  {
1003  ExpInitializePoolListHead(NextEntry);
1004  NextEntry++;
1005  }
1006 
1007  //
1008  // Note that ReactOS does not support Session Pool Yet
1009  //
1011 }
1012 
1014 VOID
1015 NTAPI
1017  IN ULONG Threshold)
1018 {
1020  SIZE_T TableSize;
1021  ULONG i;
1022 
1023  //
1024  // Check what kind of pool this is
1025  //
1026  if (PoolType == NonPagedPool)
1027  {
1028  //
1029  // Compute the track table size and convert it from a power of two to an
1030  // actual byte size
1031  //
1032  // NOTE: On checked builds, we'll assert if the registry table size was
1033  // invalid, while on retail builds we'll just break out of the loop at
1034  // that point.
1035  //
1037  for (i = 0; i < 32; i++)
1038  {
1039  if (TableSize & 1)
1040  {
1041  ASSERT((TableSize & ~1) == 0);
1042  if (!(TableSize & ~1)) break;
1043  }
1044  TableSize >>= 1;
1045  }
1046 
1047  //
1048  // If we hit bit 32, than no size was defined in the registry, so
1049  // we'll use the default size of 2048 entries.
1050  //
1051  // Otherwise, use the size from the registry, as long as it's not
1052  // smaller than 64 entries.
1053  //
1054  if (i == 32)
1055  {
1056  PoolTrackTableSize = 2048;
1057  }
1058  else
1059  {
1060  PoolTrackTableSize = max(1 << i, 64);
1061  }
1062 
1063  //
1064  // Loop trying with the biggest specified size first, and cut it down
1065  // by a power of two each iteration in case not enough memory exist
1066  //
1067  while (TRUE)
1068  {
1069  //
1070  // Do not allow overflow
1071  //
1072  if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
1073  {
1074  PoolTrackTableSize >>= 1;
1075  continue;
1076  }
1077 
1078  //
1079  // Allocate the tracker table and exit the loop if this worked
1080  //
1082  (PoolTrackTableSize + 1) *
1083  sizeof(POOL_TRACKER_TABLE));
1084  if (PoolTrackTable) break;
1085 
1086  //
1087  // Otherwise, as long as we're not down to the last bit, keep
1088  // iterating
1089  //
1090  if (PoolTrackTableSize == 1)
1091  {
1092  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1093  TableSize,
1094  0xFFFFFFFF,
1095  0xFFFFFFFF,
1096  0xFFFFFFFF);
1097  }
1098  PoolTrackTableSize >>= 1;
1099  }
1100 
1101  //
1102  // Add one entry, compute the hash, and zero the table
1103  //
1106 
1109 
1110  //
1111  // Finally, add the most used tags to speed up those allocations
1112  //
1113  ExpSeedHotTags();
1114 
1115  //
1116  // We now do the exact same thing with the tracker table for big pages
1117  //
1119  for (i = 0; i < 32; i++)
1120  {
1121  if (TableSize & 1)
1122  {
1123  ASSERT((TableSize & ~1) == 0);
1124  if (!(TableSize & ~1)) break;
1125  }
1126  TableSize >>= 1;
1127  }
1128 
1129  //
1130  // For big pages, the default tracker table is 4096 entries, while the
1131  // minimum is still 64
1132  //
1133  if (i == 32)
1134  {
1135  PoolBigPageTableSize = 4096;
1136  }
1137  else
1138  {
1139  PoolBigPageTableSize = max(1 << i, 64);
1140  }
1141 
1142  //
1143  // Again, run the exact same loop we ran earlier, but this time for the
1144  // big pool tracker instead
1145  //
1146  while (TRUE)
1147  {
1149  {
1150  PoolBigPageTableSize >>= 1;
1151  continue;
1152  }
1153 
1156  sizeof(POOL_TRACKER_BIG_PAGES));
1157  if (PoolBigPageTable) break;
1158 
1159  if (PoolBigPageTableSize == 1)
1160  {
1161  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1162  TableSize,
1163  0xFFFFFFFF,
1164  0xFFFFFFFF,
1165  0xFFFFFFFF);
1166  }
1167 
1168  PoolBigPageTableSize >>= 1;
1169  }
1170 
1171  //
1172  // An extra entry is not needed for for the big pool tracker, so just
1173  // compute the hash and zero it
1174  //
1178  for (i = 0; i < PoolBigPageTableSize; i++)
1179  {
1181  }
1182 
1183  //
1184  // During development, print this out so we can see what's happening
1185  //
1186  DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1188  DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1190 
1191  //
1192  // Insert the generic tracker for all of big pool
1193  //
1194  ExpInsertPoolTracker('looP',
1196  sizeof(POOL_TRACKER_BIG_PAGES)),
1197  NonPagedPool);
1198 
1199  //
1200  // No support for NUMA systems at this time
1201  //
1202  ASSERT(KeNumberNodes == 1);
1203 
1204  //
1205  // Initialize the tag spinlock
1206  //
1208 
1209  //
1210  // Initialize the nonpaged pool descriptor
1211  //
1214  NonPagedPool,
1215  0,
1216  Threshold,
1217  NULL);
1218  }
1219  else
1220  {
1221  //
1222  // No support for NUMA systems at this time
1223  //
1224  ASSERT(KeNumberNodes == 1);
1225 
1226  //
1227  // Allocate the pool descriptor
1228  //
1230  sizeof(KGUARDED_MUTEX) +
1231  sizeof(POOL_DESCRIPTOR),
1232  'looP');
1233  if (!Descriptor)
1234  {
1235  //
1236  // This is really bad...
1237  //
1238  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1239  0,
1240  -1,
1241  -1,
1242  -1);
1243  }
1244 
1245  //
1246  // Setup the vector and guarded mutex for paged pool
1247  //
1253  PagedPool,
1254  0,
1255  Threshold,
1257 
1258  //
1259  // Insert the generic tracker for all of nonpaged pool
1260  //
1261  ExpInsertPoolTracker('looP',
1263  NonPagedPool);
1264  }
1265 }
1266 
1268 KIRQL
1270 {
1271  //
1272  // Check if this is nonpaged pool
1273  //
1274  if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1275  {
1276  //
1277  // Use the queued spin lock
1278  //
1280  }
1281  else
1282  {
1283  //
1284  // Use the guarded mutex
1285  //
1286  KeAcquireGuardedMutex(Descriptor->LockAddress);
1287  return APC_LEVEL;
1288  }
1289 }
1290 
1292 VOID
1294  IN KIRQL OldIrql)
1295 {
1296  //
1297  // Check if this is nonpaged pool
1298  //
1299  if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1300  {
1301  //
1302  // Use the queued spin lock
1303  //
1305  }
1306  else
1307  {
1308  //
1309  // Use the guarded mutex
1310  //
1311  KeReleaseGuardedMutex(Descriptor->LockAddress);
1312  }
1313 }
1314 
1315 VOID
1316 NTAPI
1321 {
1325 
1326  //
1327  // Make sure we win the race, and if we did, copy the data atomically
1328  //
1330  {
1331  RtlCopyMemory(Context->PoolTrackTable,
1333  Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1334 
1335  //
1336  // This is here because ReactOS does not yet support expansion
1337  //
1338  ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1339  }
1340 
1341  //
1342  // Regardless of whether we won or not, we must now synchronize and then
1343  // decrement the barrier since this is one more processor that has completed
1344  // the callback.
1345  //
1348 }
1349 
1350 NTSTATUS
1351 NTAPI
1353  IN ULONG SystemInformationLength,
1355 {
1356  ULONG TableSize, CurrentLength;
1357  ULONG EntryCount;
1359  PSYSTEM_POOLTAG TagEntry;
1360  PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1363 
1364  //
1365  // Keep track of how much data the caller's buffer must hold
1366  //
1367  CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1368 
1369  //
1370  // Initialize the caller's buffer
1371  //
1372  TagEntry = &SystemInformation->TagInfo[0];
1373  SystemInformation->Count = 0;
1374 
1375  //
1376  // Capture the number of entries, and the total size needed to make a copy
1377  // of the table
1378  //
1379  EntryCount = (ULONG)PoolTrackTableSize;
1380  TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1381 
1382  //
1383  // Allocate the "Generic DPC" temporary buffer
1384  //
1387 
1388  //
1389  // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1390  //
1391  Context.PoolTrackTable = Buffer;
1392  Context.PoolTrackTableSize = PoolTrackTableSize;
1393  Context.PoolTrackTableExpansion = NULL;
1394  Context.PoolTrackTableSizeExpansion = 0;
1396 
1397  //
1398  // Now parse the results
1399  //
1400  for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1401  {
1402  //
1403  // If the entry is empty, skip it
1404  //
1405  if (!TrackerEntry->Key) continue;
1406 
1407  //
1408  // Otherwise, add one more entry to the caller's buffer, and ensure that
1409  // enough space has been allocated in it
1410  //
1411  SystemInformation->Count++;
1412  CurrentLength += sizeof(*TagEntry);
1413  if (SystemInformationLength < CurrentLength)
1414  {
1415  //
1416  // The caller's buffer is too small, so set a failure code. The
1417  // caller will know the count, as well as how much space is needed.
1418  //
1419  // We do NOT break out of the loop, because we want to keep incrementing
1420  // the Count as well as CurrentLength so that the caller can know the
1421  // final numbers
1422  //
1424  }
1425  else
1426  {
1427  //
1428  // Small sanity check that our accounting is working correctly
1429  //
1430  ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1431  ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1432 
1433  //
1434  // Return the data into the caller's buffer
1435  //
1436  TagEntry->TagUlong = TrackerEntry->Key;
1437  TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1438  TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1439  TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1440  TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1441  TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1442  TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1443  TagEntry++;
1444  }
1445  }
1446 
1447  //
1448  // Free the "Generic DPC" temporary buffer, return the buffer length and status
1449  //
1450  ExFreePoolWithTag(Buffer, 'ofnI');
1451  if (ReturnLength) *ReturnLength = CurrentLength;
1452  return Status;
1453 }
1454 
1456 BOOLEAN
1457 NTAPI
1458 ExpExpandBigPageTable(
1460 {
1461  ULONG OldSize = PoolBigPageTableSize;
1462  ULONG NewSize = 2 * OldSize;
1463  ULONG NewSizeInBytes;
1464  PPOOL_TRACKER_BIG_PAGES NewTable;
1465  PPOOL_TRACKER_BIG_PAGES OldTable;
1466  ULONG i;
1467  ULONG PagesFreed;
1468  ULONG Hash;
1469  ULONG HashMask;
1470 
1471  /* Must be holding ExpLargePoolTableLock */
1473 
1474  /* Make sure we don't overflow */
1475  if (!NT_SUCCESS(RtlULongMult(2,
1476  OldSize * sizeof(POOL_TRACKER_BIG_PAGES),
1477  &NewSizeInBytes)))
1478  {
1479  DPRINT1("Overflow expanding big page table. Size=%lu\n", OldSize);
1481  return FALSE;
1482  }
1483 
1484  NewTable = MiAllocatePoolPages(NonPagedPool, NewSizeInBytes);
1485  if (NewTable == NULL)
1486  {
1487  DPRINT1("Could not allocate %lu bytes for new big page table\n", NewSizeInBytes);
1489  return FALSE;
1490  }
1491 
1492  DPRINT("Expanding big pool tracker table to %lu entries\n", NewSize);
1493 
1494  /* Initialize the new table */
1495  RtlZeroMemory(NewTable, NewSizeInBytes);
1496  for (i = 0; i < NewSize; i++)
1497  {
1498  NewTable[i].Va = (PVOID)POOL_BIG_TABLE_ENTRY_FREE;
1499  }
1500 
1501  /* Copy over all items */
1502  OldTable = PoolBigPageTable;
1503  HashMask = NewSize - 1;
1504  for (i = 0; i < OldSize; i++)
1505  {
1506  /* Skip over empty items */
1507  if ((ULONG_PTR)OldTable[i].Va & POOL_BIG_TABLE_ENTRY_FREE)
1508  {
1509  continue;
1510  }
1511 
1512  /* Recalculate the hash due to the new table size */
1513  Hash = ExpComputePartialHashForAddress(OldTable[i].Va) & HashMask;
1514 
1515  /* Find the location in the new table */
1516  while (!((ULONG_PTR)NewTable[Hash].Va & POOL_BIG_TABLE_ENTRY_FREE))
1517  {
1518  Hash = (Hash + 1) & HashMask;
1519  }
1520 
1521  /* We just enlarged the table, so we must have space */
1523 
1524  /* Finally, copy the item */
1525  NewTable[Hash] = OldTable[i];
1526  }
1527 
1528  /* Activate the new table */
1529  PoolBigPageTable = NewTable;
1532 
1533  /* Release the lock, we're done changing global state */
1535 
1536  /* Free the old table and update our tracker */
1537  PagesFreed = MiFreePoolPages(OldTable);
1538  ExpRemovePoolTracker('looP', PagesFreed << PAGE_SHIFT, 0);
1539  ExpInsertPoolTracker('looP', ALIGN_UP_BY(NewSizeInBytes, PAGE_SIZE), 0);
1540 
1541  return TRUE;
1542 }
1543 
1544 BOOLEAN
1545 NTAPI
1547  IN ULONG Key,
1548  IN ULONG NumberOfPages,
1550 {
1551  ULONG Hash, i = 0;
1552  PVOID OldVa;
1553  KIRQL OldIrql;
1554  SIZE_T TableSize;
1555  PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1558 
1559  //
1560  // As the table is expandable, these values must only be read after acquiring
1561  // the lock to avoid a teared access during an expansion
1562  // NOTE: Windows uses a special reader/writer SpinLock to improve
1563  // performance in the common case (add/remove a tracker entry)
1564  //
1565 Retry:
1570 
1571  //
1572  // We loop from the current hash bucket to the end of the table, and then
1573  // rollover to hash bucket 0 and keep going from there. If we return back
1574  // to the beginning, then we attempt expansion at the bottom of the loop
1575  //
1576  EntryStart = Entry = &PoolBigPageTable[Hash];
1577  EntryEnd = &PoolBigPageTable[TableSize];
1578  do
1579  {
1580  //
1581  // Make sure that this is a free entry and attempt to atomically make the
1582  // entry busy now
1583  // NOTE: the Interlocked operation cannot fail with an exclusive SpinLock
1584  //
1585  OldVa = Entry->Va;
1586  if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1587  (NT_VERIFY(InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa)))
1588  {
1589  //
1590  // We now own this entry, write down the size and the pool tag
1591  //
1592  Entry->Key = Key;
1593  Entry->NumberOfPages = NumberOfPages;
1594 
1595  //
1596  // Add one more entry to the count, and see if we're getting within
1597  // 25% of the table size, at which point we'll do an expansion now
1598  // to avoid blocking too hard later on.
1599  //
1600  // Note that we only do this if it's also been the 16th time that we
1601  // keep losing the race or that we are not finding a free entry anymore,
1602  // which implies a massive number of concurrent big pool allocations.
1603  //
1605  if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1606  {
1607  DPRINT("Attempting expansion since we now have %lu entries\n",
1610  ExpExpandBigPageTable(OldIrql);
1611  return TRUE;
1612  }
1613 
1614  //
1615  // We have our entry, return
1616  //
1618  return TRUE;
1619  }
1620 
1621  //
1622  // We don't have our entry yet, so keep trying, making the entry list
1623  // circular if we reach the last entry. We'll eventually break out of
1624  // the loop once we've rolled over and returned back to our original
1625  // hash bucket
1626  //
1627  i++;
1628  if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1629  } while (Entry != EntryStart);
1630 
1631  //
1632  // This means there's no free hash buckets whatsoever, so we now have
1633  // to attempt expanding the table
1634  //
1636  if (ExpExpandBigPageTable(OldIrql))
1637  {
1638  goto Retry;
1639  }
1641  DPRINT1("Big pool table expansion failed\n");
1642  return FALSE;
1643 }
1644 
1645 ULONG
1646 NTAPI
1648  OUT PULONG_PTR BigPages,
1650 {
1651  BOOLEAN FirstTry = TRUE;
1652  SIZE_T TableSize;
1653  KIRQL OldIrql;
1654  ULONG PoolTag, Hash;
1658 
1659  //
1660  // As the table is expandable, these values must only be read after acquiring
1661  // the lock to avoid a teared access during an expansion
1662  //
1667 
1668  //
1669  // Loop while trying to find this big page allocation
1670  //
1671  while (PoolBigPageTable[Hash].Va != Va)
1672  {
1673  //
1674  // Increment the size until we go past the end of the table
1675  //
1676  if (++Hash >= TableSize)
1677  {
1678  //
1679  // Is this the second time we've tried?
1680  //
1681  if (!FirstTry)
1682  {
1683  //
1684  // This means it was never inserted into the pool table and it
1685  // received the special "BIG" tag -- return that and return 0
1686  // so that the code can ask Mm for the page count instead
1687  //
1689  *BigPages = 0;
1690  return ' GIB';
1691  }
1692 
1693  //
1694  // The first time this happens, reset the hash index and try again
1695  //
1696  Hash = 0;
1697  FirstTry = FALSE;
1698  }
1699  }
1700 
1701  //
1702  // Now capture all the information we need from the entry, since after we
1703  // release the lock, the data can change
1704  //
1706  *BigPages = Entry->NumberOfPages;
1707  PoolTag = Entry->Key;
1708 
1709  //
1710  // Set the free bit, and decrement the number of allocations. Finally, release
1711  // the lock and return the tag that was located
1712  //
1716  return PoolTag;
1717 }
1718 
1719 VOID
1720 NTAPI
1721 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1722  OUT PULONG NonPagedPoolPages,
1723  OUT PULONG PagedPoolAllocs,
1724  OUT PULONG PagedPoolFrees,
1725  OUT PULONG PagedPoolLookasideHits,
1726  OUT PULONG NonPagedPoolAllocs,
1727  OUT PULONG NonPagedPoolFrees,
1728  OUT PULONG NonPagedPoolLookasideHits)
1729 {
1730  ULONG i;
1731  PPOOL_DESCRIPTOR PoolDesc;
1732 
1733  //
1734  // Assume all failures
1735  //
1736  *PagedPoolPages = 0;
1737  *PagedPoolAllocs = 0;
1738  *PagedPoolFrees = 0;
1739 
1740  //
1741  // Tally up the totals for all the apged pool
1742  //
1743  for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1744  {
1745  PoolDesc = ExpPagedPoolDescriptor[i];
1746  *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1747  *PagedPoolAllocs += PoolDesc->RunningAllocs;
1748  *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1749  }
1750 
1751  //
1752  // The first non-paged pool has a hardcoded well-known descriptor name
1753  //
1754  PoolDesc = &NonPagedPoolDescriptor;
1755  *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1756  *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1757  *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1758 
1759  //
1760  // If the system has more than one non-paged pool, copy the other descriptor
1761  // totals as well
1762  //
1763 #if 0
1764  if (ExpNumberOfNonPagedPools > 1)
1765  {
1766  for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1767  {
1768  PoolDesc = ExpNonPagedPoolDescriptor[i];
1769  *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1770  *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1771  *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1772  }
1773  }
1774 #endif
1775 
1776  //
1777  // Get the amount of hits in the system lookaside lists
1778  //
1780  {
1781  PLIST_ENTRY ListEntry;
1782 
1783  for (ListEntry = ExPoolLookasideListHead.Flink;
1784  ListEntry != &ExPoolLookasideListHead;
1785  ListEntry = ListEntry->Flink)
1786  {
1788 
1789  Lookaside = CONTAINING_RECORD(ListEntry, GENERAL_LOOKASIDE, ListEntry);
1790 
1791  if (Lookaside->Type == NonPagedPool)
1792  {
1793  *NonPagedPoolLookasideHits += Lookaside->AllocateHits;
1794  }
1795  else
1796  {
1797  *PagedPoolLookasideHits += Lookaside->AllocateHits;
1798  }
1799  }
1800  }
1801 }
1802 
1803 VOID
1804 NTAPI
1806 {
1809  USHORT BlockSize;
1811 
1814  {
1815  return;
1816  }
1817 
1818  Entry = P;
1819  Entry--;
1821 
1822  PoolType = Entry->PoolType - 1;
1823  BlockSize = Entry->BlockSize;
1824 
1825  if (PoolType & QUOTA_POOL_MASK)
1826  {
1827  Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1828  ASSERT(Process != NULL);
1829  if (Process)
1830  {
1831  if (Process->Pcb.Header.Type != ProcessObject)
1832  {
1833  DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1834  Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1835  KeBugCheckEx(BAD_POOL_CALLER,
1836  0x0D,
1837  (ULONG_PTR)P,
1838  Entry->PoolTag,
1839  (ULONG_PTR)Process);
1840  }
1841  ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1844  BlockSize * POOL_BLOCK_SIZE);
1846  }
1847  }
1848 }
1849 
1850 /* PUBLIC FUNCTIONS ***********************************************************/
1851 
1852 /*
1853  * @implemented
1854  */
1855 PVOID
1856 NTAPI
1859  IN ULONG Tag)
1860 {
1861  PPOOL_DESCRIPTOR PoolDesc;
1862  PLIST_ENTRY ListHead;
1863  PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1864  KIRQL OldIrql;
1865  USHORT BlockSize, i;
1866  ULONG OriginalType;
1867  PKPRCB Prcb = KeGetCurrentPrcb();
1869 
1870  //
1871  // Some sanity checks
1872  //
1873  ASSERT(Tag != 0);
1874  ASSERT(Tag != ' GIB');
1875  ASSERT(NumberOfBytes != 0);
1877 
1878  //
1879  // Not supported in ReactOS
1880  //
1882 
1883  //
1884  // Check if verifier or special pool is enabled
1885  //
1887  {
1888  //
1889  // For verifier, we should call the verification routine
1890  //
1892  {
1893  DPRINT1("Driver Verifier is not yet supported\n");
1894  }
1895 
1896  //
1897  // For special pool, we check if this is a suitable allocation and do
1898  // the special allocation if needed
1899  //
1901  {
1902  //
1903  // Check if this is a special pool allocation
1904  //
1906  {
1907  //
1908  // Try to allocate using special pool
1909  //
1911  if (Entry) return Entry;
1912  }
1913  }
1914  }
1915 
1916  //
1917  // Get the pool type and its corresponding vector for this request
1918  //
1919  OriginalType = PoolType;
1921  PoolDesc = PoolVector[PoolType];
1922  ASSERT(PoolDesc != NULL);
1923 
1924  //
1925  // Check if this is a big page allocation
1926  //
1928  {
1929  //
1930  // Allocate pages for it
1931  //
1932  Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1933  if (!Entry)
1934  {
1935 #if DBG
1936  //
1937  // Out of memory, display current consumption
1938  // Let's consider that if the caller wanted more
1939  // than a hundred pages, that's a bogus caller
1940  // and we are not out of memory
1941  //
1942  if (NumberOfBytes < 100 * PAGE_SIZE)
1943  {
1944  MiDumpPoolConsumers(FALSE, 0, 0, 0);
1945  }
1946 #endif
1947 
1948  //
1949  // Must succeed pool is deprecated, but still supported. These allocation
1950  // failures must cause an immediate bugcheck
1951  //
1952  if (OriginalType & MUST_SUCCEED_POOL_MASK)
1953  {
1954  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1955  NumberOfBytes,
1958  0);
1959  }
1960 
1961  //
1962  // Internal debugging
1963  //
1964  ExPoolFailures++;
1965 
1966  //
1967  // This flag requests printing failures, and can also further specify
1968  // breaking on failures
1969  //
1971  {
1972  DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1973  NumberOfBytes,
1974  OriginalType);
1976  }
1977 
1978  //
1979  // Finally, this flag requests an exception, which we are more than
1980  // happy to raise!
1981  //
1982  if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1983  {
1985  }
1986 
1987  return NULL;
1988  }
1989 
1990  //
1991  // Increment required counters
1992  //
1997 
1998  //
1999  // Add a tag for the big page allocation and switch to the generic "BIG"
2000  // tag if we failed to do so, then insert a tracker for this alloation.
2001  //
2003  Tag,
2005  OriginalType))
2006  {
2007  Tag = ' GIB';
2008  }
2010  return Entry;
2011  }
2012 
2013  //
2014  // Should never request 0 bytes from the pool, but since so many drivers do
2015  // it, we'll just assume they want 1 byte, based on NT's similar behavior
2016  //
2017  if (!NumberOfBytes) NumberOfBytes = 1;
2018 
2019  //
2020  // A pool allocation is defined by its data, a linked list to connect it to
2021  // the free list (if necessary), and a pool header to store accounting info.
2022  // Calculate this size, then convert it into a block size (units of pool
2023  // headers)
2024  //
2025  // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
2026  // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
2027  // the direct allocation of pages.
2028  //
2029  i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
2030  / POOL_BLOCK_SIZE);
2032 
2033  //
2034  // Handle lookaside list optimization for both paged and nonpaged pool
2035  //
2037  {
2038  //
2039  // Try popping it from the per-CPU lookaside list
2040  //
2042  Prcb->PPPagedLookasideList[i - 1].P :
2043  Prcb->PPNPagedLookasideList[i - 1].P;
2044  LookasideList->TotalAllocates++;
2046  if (!Entry)
2047  {
2048  //
2049  // We failed, try popping it from the global list
2050  //
2052  Prcb->PPPagedLookasideList[i - 1].L :
2053  Prcb->PPNPagedLookasideList[i - 1].L;
2054  LookasideList->TotalAllocates++;
2056  }
2057 
2058  //
2059  // If we were able to pop it, update the accounting and return the block
2060  //
2061  if (Entry)
2062  {
2063  LookasideList->AllocateHits++;
2064 
2065  //
2066  // Get the real entry, write down its pool type, and track it
2067  //
2068  Entry--;
2069  Entry->PoolType = OriginalType + 1;
2071  Entry->BlockSize * POOL_BLOCK_SIZE,
2072  OriginalType);
2073 
2074  //
2075  // Return the pool allocation
2076  //
2077  Entry->PoolTag = Tag;
2078  (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2079  (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2080  return POOL_FREE_BLOCK(Entry);
2081  }
2082  }
2083 
2084  //
2085  // Loop in the free lists looking for a block if this size. Start with the
2086  // list optimized for this kind of size lookup
2087  //
2088  ListHead = &PoolDesc->ListHeads[i];
2089  do
2090  {
2091  //
2092  // Are there any free entries available on this list?
2093  //
2094  if (!ExpIsPoolListEmpty(ListHead))
2095  {
2096  //
2097  // Acquire the pool lock now
2098  //
2099  OldIrql = ExLockPool(PoolDesc);
2100 
2101  //
2102  // And make sure the list still has entries
2103  //
2104  if (ExpIsPoolListEmpty(ListHead))
2105  {
2106  //
2107  // Someone raced us (and won) before we had a chance to acquire
2108  // the lock.
2109  //
2110  // Try again!
2111  //
2112  ExUnlockPool(PoolDesc, OldIrql);
2113  continue;
2114  }
2115 
2116  //
2117  // Remove a free entry from the list
2118  // Note that due to the way we insert free blocks into multiple lists
2119  // there is a guarantee that any block on this list will either be
2120  // of the correct size, or perhaps larger.
2121  //
2122  ExpCheckPoolLinks(ListHead);
2123  Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
2124  ExpCheckPoolLinks(ListHead);
2126  ASSERT(Entry->BlockSize >= i);
2127  ASSERT(Entry->PoolType == 0);
2128 
2129  //
2130  // Check if this block is larger that what we need. The block could
2131  // not possibly be smaller, due to the reason explained above (and
2132  // we would've asserted on a checked build if this was the case).
2133  //
2134  if (Entry->BlockSize != i)
2135  {
2136  //
2137  // Is there an entry before this one?
2138  //
2139  if (Entry->PreviousSize == 0)
2140  {
2141  //
2142  // There isn't anyone before us, so take the next block and
2143  // turn it into a fragment that contains the leftover data
2144  // that we don't need to satisfy the caller's request
2145  //
2146  FragmentEntry = POOL_BLOCK(Entry, i);
2147  FragmentEntry->BlockSize = Entry->BlockSize - i;
2148 
2149  //
2150  // And make it point back to us
2151  //
2152  FragmentEntry->PreviousSize = i;
2153 
2154  //
2155  // Now get the block that follows the new fragment and check
2156  // if it's still on the same page as us (and not at the end)
2157  //
2158  NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
2159  if (PAGE_ALIGN(NextEntry) != NextEntry)
2160  {
2161  //
2162  // Adjust this next block to point to our newly created
2163  // fragment block
2164  //
2165  NextEntry->PreviousSize = FragmentEntry->BlockSize;
2166  }
2167  }
2168  else
2169  {
2170  //
2171  // There is a free entry before us, which we know is smaller
2172  // so we'll make this entry the fragment instead
2173  //
2174  FragmentEntry = Entry;
2175 
2176  //
2177  // And then we'll remove from it the actual size required.
2178  // Now the entry is a leftover free fragment
2179  //
2180  Entry->BlockSize -= i;
2181 
2182  //
2183  // Now let's go to the next entry after the fragment (which
2184  // used to point to our original free entry) and make it
2185  // reference the new fragment entry instead.
2186  //
2187  // This is the entry that will actually end up holding the
2188  // allocation!
2189  //
2191  Entry->PreviousSize = FragmentEntry->BlockSize;
2192 
2193  //
2194  // And now let's go to the entry after that one and check if
2195  // it's still on the same page, and not at the end
2196  //
2197  NextEntry = POOL_BLOCK(Entry, i);
2198  if (PAGE_ALIGN(NextEntry) != NextEntry)
2199  {
2200  //
2201  // Make it reference the allocation entry
2202  //
2203  NextEntry->PreviousSize = i;
2204  }
2205  }
2206 
2207  //
2208  // Now our (allocation) entry is the right size
2209  //
2210  Entry->BlockSize = i;
2211 
2212  //
2213  // And the next entry is now the free fragment which contains
2214  // the remaining difference between how big the original entry
2215  // was, and the actual size the caller needs/requested.
2216  //
2217  FragmentEntry->PoolType = 0;
2218  BlockSize = FragmentEntry->BlockSize;
2219 
2220  //
2221  // Now check if enough free bytes remained for us to have a
2222  // "full" entry, which contains enough bytes for a linked list
2223  // and thus can be used for allocations (up to 8 bytes...)
2224  //
2225  ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2226  if (BlockSize != 1)
2227  {
2228  //
2229  // Insert the free entry into the free list for this size
2230  //
2231  ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2232  POOL_FREE_BLOCK(FragmentEntry));
2233  ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2234  }
2235  }
2236 
2237  //
2238  // We have found an entry for this allocation, so set the pool type
2239  // and release the lock since we're done
2240  //
2241  Entry->PoolType = OriginalType + 1;
2243  ExUnlockPool(PoolDesc, OldIrql);
2244 
2245  //
2246  // Increment required counters
2247  //
2248  InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2250 
2251  //
2252  // Track this allocation
2253  //
2255  Entry->BlockSize * POOL_BLOCK_SIZE,
2256  OriginalType);
2257 
2258  //
2259  // Return the pool allocation
2260  //
2261  Entry->PoolTag = Tag;
2262  (POOL_FREE_BLOCK(Entry))->Flink = NULL;
2263  (POOL_FREE_BLOCK(Entry))->Blink = NULL;
2264  return POOL_FREE_BLOCK(Entry);
2265  }
2266  } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
2267 
2268  //
2269  // There were no free entries left, so we have to allocate a new fresh page
2270  //
2271  Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
2272  if (!Entry)
2273  {
2274 #if DBG
2275  //
2276  // Out of memory, display current consumption
2277  // Let's consider that if the caller wanted more
2278  // than a hundred pages, that's a bogus caller
2279  // and we are not out of memory
2280  //
2281  if (NumberOfBytes < 100 * PAGE_SIZE)
2282  {
2283  MiDumpPoolConsumers(FALSE, 0, 0, 0);
2284  }
2285 #endif
2286 
2287  //
2288  // Must succeed pool is deprecated, but still supported. These allocation
2289  // failures must cause an immediate bugcheck
2290  //
2291  if (OriginalType & MUST_SUCCEED_POOL_MASK)
2292  {
2293  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
2294  PAGE_SIZE,
2297  0);
2298  }
2299 
2300  //
2301  // Internal debugging
2302  //
2303  ExPoolFailures++;
2304 
2305  //
2306  // This flag requests printing failures, and can also further specify
2307  // breaking on failures
2308  //
2310  {
2311  DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
2312  NumberOfBytes,
2313  OriginalType);
2315  }
2316 
2317  //
2318  // Finally, this flag requests an exception, which we are more than
2319  // happy to raise!
2320  //
2321  if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2322  {
2324  }
2325 
2326  //
2327  // Return NULL to the caller in all other cases
2328  //
2329  return NULL;
2330  }
2331 
2332  //
2333  // Setup the entry data
2334  //
2335  Entry->Ulong1 = 0;
2336  Entry->BlockSize = i;
2337  Entry->PoolType = OriginalType + 1;
2338 
2339  //
2340  // This page will have two entries -- one for the allocation (which we just
2341  // created above), and one for the remaining free bytes, which we're about
2342  // to create now. The free bytes are the whole page minus what was allocated
2343  // and then converted into units of block headers.
2344  //
2345  BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2346  FragmentEntry = POOL_BLOCK(Entry, i);
2347  FragmentEntry->Ulong1 = 0;
2348  FragmentEntry->BlockSize = BlockSize;
2349  FragmentEntry->PreviousSize = i;
2350 
2351  //
2352  // Increment required counters
2353  //
2354  InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2355  InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, Entry->BlockSize * POOL_BLOCK_SIZE);
2356 
2357  //
2358  // Now check if enough free bytes remained for us to have a "full" entry,
2359  // which contains enough bytes for a linked list and thus can be used for
2360  // allocations (up to 8 bytes...)
2361  //
2362  if (FragmentEntry->BlockSize != 1)
2363  {
2364  //
2365  // Excellent -- acquire the pool lock
2366  //
2367  OldIrql = ExLockPool(PoolDesc);
2368 
2369  //
2370  // And insert the free entry into the free list for this block size
2371  //
2372  ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2373  ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2374  POOL_FREE_BLOCK(FragmentEntry));
2375  ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2376 
2377  //
2378  // Release the pool lock
2379  //
2381  ExUnlockPool(PoolDesc, OldIrql);
2382  }
2383  else
2384  {
2385  //
2386  // Simply do a sanity check
2387  //
2389  }
2390 
2391  //
2392  // Increment performance counters and track this allocation
2393  //
2396  Entry->BlockSize * POOL_BLOCK_SIZE,
2397  OriginalType);
2398 
2399  //
2400  // And return the pool allocation
2401  //
2403  Entry->PoolTag = Tag;
2404  return POOL_FREE_BLOCK(Entry);
2405 }
2406 
2407 /*
2408  * @implemented
2409  */
2410 PVOID
2411 NTAPI
2414 {
2415  ULONG Tag = TAG_NONE;
2416 #if 0 && DBG
2417  PLDR_DATA_TABLE_ENTRY LdrEntry;
2418 
2419  /* Use the first four letters of the driver name, or "None" if unavailable */
2420  LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2422  : NULL;
2423  if (LdrEntry)
2424  {
2425  ULONG i;
2426  Tag = 0;
2427  for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2428  Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2429  for (; i < 4; i++)
2430  Tag = Tag >> 8 | ' ' << 24;
2431  }
2432 #endif
2434 }
2435 
2436 /*
2437  * @implemented
2438  */
2439 VOID
2440 NTAPI
2442  IN ULONG TagToFree)
2443 {
2444  PPOOL_HEADER Entry, NextEntry;
2445  USHORT BlockSize;
2446  KIRQL OldIrql;
2448  PPOOL_DESCRIPTOR PoolDesc;
2449  ULONG Tag;
2450  BOOLEAN Combined = FALSE;
2451  PFN_NUMBER PageCount, RealPageCount;
2452  PKPRCB Prcb = KeGetCurrentPrcb();
2455 
2456  //
2457  // Check if any of the debug flags are enabled
2458  //
2465  {
2466  //
2467  // Check if special pool is enabled
2468  //
2470  {
2471  //
2472  // Check if it was allocated from a special pool
2473  //
2475  {
2476  //
2477  // Was deadlock verification also enabled? We can do some extra
2478  // checks at this point
2479  //
2481  {
2482  DPRINT1("Verifier not yet supported\n");
2483  }
2484 
2485  //
2486  // It is, so handle it via special pool free routine
2487  //
2489  return;
2490  }
2491  }
2492 
2493  //
2494  // For non-big page allocations, we'll do a bunch of checks in here
2495  //
2496  if (PAGE_ALIGN(P) != P)
2497  {
2498  //
2499  // Get the entry for this pool allocation
2500  // The pointer math here may look wrong or confusing, but it is quite right
2501  //
2502  Entry = P;
2503  Entry--;
2504 
2505  //
2506  // Get the pool type
2507  //
2508  PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2509 
2510  //
2511  // FIXME: Many other debugging checks go here
2512  //
2514  }
2515  }
2516 
2517  //
2518  // Check if this is a big page allocation
2519  //
2520  if (PAGE_ALIGN(P) == P)
2521  {
2522  //
2523  // We need to find the tag for it, so first we need to find out what
2524  // kind of allocation this was (paged or nonpaged), then we can go
2525  // ahead and try finding the tag for it. Remember to get rid of the
2526  // PROTECTED_POOL tag if it's found.
2527  //
2528  // Note that if at insertion time, we failed to add the tag for a big
2529  // pool allocation, we used a special tag called 'BIG' to identify the
2530  // allocation, and we may get this tag back. In this scenario, we must
2531  // manually get the size of the allocation by actually counting through
2532  // the PFN database.
2533  //
2536  Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2537  if (!Tag)
2538  {
2539  DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2540  ASSERT(Tag == ' GIB');
2541  PageCount = 1; // We are going to lie! This might screw up accounting?
2542  }
2543  else if (Tag & PROTECTED_POOL)
2544  {
2545  Tag &= ~PROTECTED_POOL;
2546  }
2547 
2548  //
2549  // Check block tag
2550  //
2551  if (TagToFree && TagToFree != Tag)
2552  {
2553  DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2554  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2555  }
2556 
2557  //
2558  // We have our tag and our page count, so we can go ahead and remove this
2559  // tracker now
2560  //
2561  ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2562 
2563  //
2564  // Check if any of the debug flags are enabled
2565  //
2570  {
2571  //
2572  // Was deadlock verification also enabled? We can do some extra
2573  // checks at this point
2574  //
2576  {
2577  DPRINT1("Verifier not yet supported\n");
2578  }
2579 
2580  //
2581  // FIXME: Many debugging checks go here
2582  //
2583  }
2584 
2585  //
2586  // Update counters
2587  //
2588  PoolDesc = PoolVector[PoolType];
2591  -(LONG_PTR)(PageCount << PAGE_SHIFT));
2592 
2593  //
2594  // Do the real free now and update the last counter with the big page count
2595  //
2596  RealPageCount = MiFreePoolPages(P);
2597  ASSERT(RealPageCount == PageCount);
2599  -(LONG)RealPageCount);
2600  return;
2601  }
2602 
2603  //
2604  // Get the entry for this pool allocation
2605  // The pointer math here may look wrong or confusing, but it is quite right
2606  //
2607  Entry = P;
2608  Entry--;
2610 
2611  //
2612  // Get the size of the entry, and it's pool type, then load the descriptor
2613  // for this pool type
2614  //
2615  BlockSize = Entry->BlockSize;
2616  PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2617  PoolDesc = PoolVector[PoolType];
2618 
2619  //
2620  // Make sure that the IRQL makes sense
2621  //
2623 
2624  //
2625  // Get the pool tag and get rid of the PROTECTED_POOL flag
2626  //
2627  Tag = Entry->PoolTag;
2628  if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2629 
2630  //
2631  // Check block tag
2632  //
2633  if (TagToFree && TagToFree != Tag)
2634  {
2635  DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2636  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2637  }
2638 
2639  //
2640  // Track the removal of this allocation
2641  //
2643  BlockSize * POOL_BLOCK_SIZE,
2644  Entry->PoolType - 1);
2645 
2646  //
2647  // Release pool quota, if any
2648  //
2649  if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2650  {
2651  Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2652  if (Process)
2653  {
2654  if (Process->Pcb.Header.Type != ProcessObject)
2655  {
2656  DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2657  Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2658  KeBugCheckEx(BAD_POOL_CALLER,
2659  0x0D,
2660  (ULONG_PTR)P,
2661  Tag,
2662  (ULONG_PTR)Process);
2663  }
2666  }
2667  }
2668 
2669  //
2670  // Is this allocation small enough to have come from a lookaside list?
2671  //
2672  if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2673  {
2674  //
2675  // Try pushing it into the per-CPU lookaside list
2676  //
2678  Prcb->PPPagedLookasideList[BlockSize - 1].P :
2679  Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2680  LookasideList->TotalFrees++;
2681  if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2682  {
2683  LookasideList->FreeHits++;
2685  return;
2686  }
2687 
2688  //
2689  // We failed, try to push it into the global lookaside list
2690  //
2692  Prcb->PPPagedLookasideList[BlockSize - 1].L :
2693  Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2694  LookasideList->TotalFrees++;
2695  if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2696  {
2697  LookasideList->FreeHits++;
2699  return;
2700  }
2701  }
2702 
2703  //
2704  // Get the pointer to the next entry
2705  //
2706  NextEntry = POOL_BLOCK(Entry, BlockSize);
2707 
2708  //
2709  // Update performance counters
2710  //
2712  InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2713 
2714  //
2715  // Acquire the pool lock
2716  //
2717  OldIrql = ExLockPool(PoolDesc);
2718 
2719  //
2720  // Check if the next allocation is at the end of the page
2721  //
2723  if (PAGE_ALIGN(NextEntry) != NextEntry)
2724  {
2725  //
2726  // We may be able to combine the block if it's free
2727  //
2728  if (NextEntry->PoolType == 0)
2729  {
2730  //
2731  // The next block is free, so we'll do a combine
2732  //
2733  Combined = TRUE;
2734 
2735  //
2736  // Make sure there's actual data in the block -- anything smaller
2737  // than this means we only have the header, so there's no linked list
2738  // for us to remove
2739  //
2740  if ((NextEntry->BlockSize != 1))
2741  {
2742  //
2743  // The block is at least big enough to have a linked list, so go
2744  // ahead and remove it
2745  //
2746  ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2748  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2749  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2750  }
2751 
2752  //
2753  // Our entry is now combined with the next entry
2754  //
2755  Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2756  }
2757  }
2758 
2759  //
2760  // Now check if there was a previous entry on the same page as us
2761  //
2762  if (Entry->PreviousSize)
2763  {
2764  //
2765  // Great, grab that entry and check if it's free
2766  //
2767  NextEntry = POOL_PREV_BLOCK(Entry);
2768  if (NextEntry->PoolType == 0)
2769  {
2770  //
2771  // It is, so we can do a combine
2772  //
2773  Combined = TRUE;
2774 
2775  //
2776  // Make sure there's actual data in the block -- anything smaller
2777  // than this means we only have the header so there's no linked list
2778  // for us to remove
2779  //
2780  if ((NextEntry->BlockSize != 1))
2781  {
2782  //
2783  // The block is at least big enough to have a linked list, so go
2784  // ahead and remove it
2785  //
2786  ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2788  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2789  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2790  }
2791 
2792  //
2793  // Combine our original block (which might've already been combined
2794  // with the next block), into the previous block
2795  //
2796  NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2797 
2798  //
2799  // And now we'll work with the previous block instead
2800  //
2801  Entry = NextEntry;
2802  }
2803  }
2804 
2805  //
2806  // By now, it may have been possible for our combined blocks to actually
2807  // have made up a full page (if there were only 2-3 allocations on the
2808  // page, they could've all been combined).
2809  //
2810  if ((PAGE_ALIGN(Entry) == Entry) &&
2812  {
2813  //
2814  // In this case, release the pool lock, update the performance counter,
2815  // and free the page
2816  //
2817  ExUnlockPool(PoolDesc, OldIrql);
2818  InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2820  return;
2821  }
2822 
2823  //
2824  // Otherwise, we now have a free block (or a combination of 2 or 3)
2825  //
2826  Entry->PoolType = 0;
2827  BlockSize = Entry->BlockSize;
2828  ASSERT(BlockSize != 1);
2829 
2830  //
2831  // Check if we actually did combine it with anyone
2832  //
2833  if (Combined)
2834  {
2835  //
2836  // Get the first combined block (either our original to begin with, or
2837  // the one after the original, depending if we combined with the previous)
2838  //
2839  NextEntry = POOL_NEXT_BLOCK(Entry);
2840 
2841  //
2842  // As long as the next block isn't on a page boundary, have it point
2843  // back to us
2844  //
2845  if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2846  }
2847 
2848  //
2849  // Insert this new free block, and release the pool lock
2850  //
2851  ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2853  ExUnlockPool(PoolDesc, OldIrql);
2854 }
2855 
2856 /*
2857  * @implemented
2858  */
2859 VOID
2860 NTAPI
2862 {
2863  //
2864  // Just free without checking for the tag
2865  //
2866  ExFreePoolWithTag(P, 0);
2867 }
2868 
2869 /*
2870  * @unimplemented
2871  */
2872 SIZE_T
2873 NTAPI
2876 {
2877  //
2878  // Not implemented
2879  //
2880  UNIMPLEMENTED;
2881  return FALSE;
2882 }
2883 
2884 /*
2885  * @implemented
2886  */
2887 
2888 PVOID
2889 NTAPI
2892 {
2893  //
2894  // Allocate the pool
2895  //
2897 }
2898 
2899 /*
2900  * @implemented
2901  */
2902 PVOID
2903 NTAPI
2906  IN ULONG Tag,
2908 {
2909  PVOID Buffer;
2910 
2911  //
2912  // Allocate the pool
2913  //
2915  if (Buffer == NULL)
2916  {
2917  UNIMPLEMENTED;
2918  }
2919 
2920  return Buffer;
2921 }
2922 
2923 /*
2924  * @implemented
2925  */
2926 PVOID
2927 NTAPI
2930  IN ULONG Tag)
2931 {
2932  BOOLEAN Raise = TRUE;
2933  PVOID Buffer;
2935  NTSTATUS Status;
2937 
2938  //
2939  // Check if we should fail instead of raising an exception
2940  //
2942  {
2943  Raise = FALSE;
2945  }
2946 
2947  //
2948  // Inject the pool quota mask
2949  //
2951 
2952  //
2953  // Check if we have enough space to add the quota owner process, as long as
2954  // this isn't the system process, which never gets charged quota
2955  //
2956  ASSERT(NumberOfBytes != 0);
2957  if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2959  {
2960  //
2961  // Add space for our EPROCESS pointer
2962  //
2963  NumberOfBytes += sizeof(PEPROCESS);
2964  }
2965  else
2966  {
2967  //
2968  // We won't be able to store the pointer, so don't use quota for this
2969  //
2971  }
2972 
2973  //
2974  // Allocate the pool buffer now
2975  //
2977 
2978  //
2979  // If the buffer is page-aligned, this is a large page allocation and we
2980  // won't touch it
2981  //
2982  if (PAGE_ALIGN(Buffer) != Buffer)
2983  {
2984  //
2985  // Also if special pool is enabled, and this was allocated from there,
2986  // we won't touch it either
2987  //
2990  {
2991  return Buffer;
2992  }
2993 
2994  //
2995  // If it wasn't actually allocated with quota charges, ignore it too
2996  //
2997  if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2998 
2999  //
3000  // If this is the system process, we don't charge quota, so ignore
3001  //
3002  if (Process == PsInitialSystemProcess) return Buffer;
3003 
3004  //
3005  // Actually go and charge quota for the process now
3006  //
3007  Entry = POOL_ENTRY(Buffer);
3010  Entry->BlockSize * POOL_BLOCK_SIZE);
3011  if (!NT_SUCCESS(Status))
3012  {
3013  //
3014  // Quota failed, back out the allocation, clear the owner, and fail
3015  //
3016  ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
3018  if (Raise) RtlRaiseStatus(Status);
3019  return NULL;
3020  }
3021 
3022  //
3023  // Quota worked, write the owner and then reference it before returning
3024  //
3025  ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
3027  }
3028  else if (!(Buffer) && (Raise))
3029  {
3030  //
3031  // The allocation failed, raise an error if we are in raise mode
3032  //
3034  }
3035 
3036  //
3037  // Return the allocated buffer
3038  //
3039  return Buffer;
3040 }
3041 
3042 /* EOF */
_IRQL_requires_(DISPATCH_LEVEL)
Definition: expool.c:1455
#define KeGetCurrentIrql()
Definition: env_spec_w32.h:706
PVOID NTAPI ExAllocatePoolWithTag(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag)
Definition: expool.c:1857
static int Hash(const char *)
Definition: reader.c:2258
INIT_FUNCTION VOID NTAPI InitializePool(IN POOL_TYPE PoolType, IN ULONG Threshold)
Definition: expool.c:1016
IN CINT OUT PVOID IN ULONG OUT PULONG ReturnLength
Definition: dumpinfo.c:39
#define PAGE_SHIFT
Definition: env_spec_w32.h:45
#define IN
Definition: typedefs.h:38
DECLSPEC_NORETURN NTSYSAPI VOID NTAPI RtlRaiseStatus(_In_ NTSTATUS Status)
GENERAL_LOOKASIDE_POOL PPNPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:625
#define max(a, b)
Definition: svc.c:63
ASMGENDATA Table[]
Definition: genincdata.c:61
#define TRUE
Definition: types.h:120
NTSYSAPI VOID NTAPI RtlCopyMemory(VOID UNALIGNED *Destination, CONST VOID UNALIGNED *Source, ULONG Length)
VOID NTAPI ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
Definition: expool.c:90
PLIST_ENTRY NTAPI ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
Definition: expool.c:130
ULONG PagedAllocs
Definition: extypes.h:1129
BOOLEAN NTAPI ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
Definition: expool.c:112
#define STATUS_INSUFFICIENT_RESOURCES
Definition: udferr_usr.h:158
SIZE_T PoolTrackTableMask
Definition: expool.c:38
#define SESSION_POOL_MASK
Definition: mm.h:102
VOID MiDumpPoolConsumers(BOOLEAN CalledFromDbg, ULONG Tag, ULONG Mask, ULONG Flags)
IN PLARGE_INTEGER IN PLARGE_INTEGER PEPROCESS ULONG Key
Definition: fatprocs.h:2697
PVOID NTAPI ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag)
Definition: expool.c:2928
#define STATUS_INFO_LENGTH_MISMATCH
Definition: udferr_usr.h:133
NTSTATUS NTAPI PsChargeProcessPoolQuota(IN PEPROCESS Process, IN POOL_TYPE PoolType, IN SIZE_T Amount)
Definition: quota.c:219
LIST_ENTRY ExPoolLookasideListHead
Definition: lookas.c:26
#define POOL_FLAG_VERIFIER
Definition: miarm.h:260
struct _Entry Entry
Definition: kefuncs.h:640
SIZE_T PoolTrackTableSize
Definition: expool.c:28
struct _KGUARDED_MUTEX * PKGUARDED_MUTEX
VOID NTAPI ExpCheckPoolAllocation(PVOID P, POOL_TYPE PoolType, ULONG Tag)
Definition: expool.c:287
VOID FASTCALL KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:42
BOOL Verbose
Definition: chkdsk.c:72
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:323
__wchar_t WCHAR
Definition: xmlstorage.h:180
#define BooleanFlagOn(F, SF)
Definition: ext2fs.h:183
#define POOL_FLAG_SPECIAL_POOL
Definition: miarm.h:262
struct _LIST_ENTRY * Blink
Definition: typedefs.h:120
#define ExReleaseSpinLock(Lock, OldIrql)
char CHAR
Definition: xmlstorage.h:175
LONG NTSTATUS
Definition: precomp.h:26
FORCEINLINE struct _KPRCB * KeGetCurrentPrcb(VOID)
Definition: ketypes.h:1062
struct _EPROCESS * PEPROCESS
Definition: nt_native.h:30
PLIST_ENTRY NTAPI ExpEncodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:83
_In_ KPRIORITY Priority
Definition: kefuncs.h:516
SIZE_T PoolTrackTableSize
Definition: expool.c:38
PLDR_DATA_TABLE_ENTRY NTAPI MiLookupDataTableEntry(IN PVOID Address)
Definition: sysldr.c:3377
#define POOL_FLAG_CRASH_ON_FAILURE
Definition: miarm.h:264
#define ExRaiseStatus
Definition: ntoskrnl.h:95
#define MAXULONG_PTR
Definition: basetsd.h:103
#define POOL_PREV_BLOCK(x)
Definition: expool.c:56
VOID NTAPI ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
Definition: expool.c:119
SIZE_T TotalBytes
Definition: miarm.h:300
LONG_PTR SSIZE_T
Definition: basetsd.h:183
VOID NTAPI ExReturnPoolQuota(IN PVOID P)
Definition: expool.c:1805
VOID NTAPI ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:169
#define POOL_FREE_IRQL_INVALID
Definition: miarm.h:284
VOID NTAPI ObDereferenceObject(IN PVOID Object)
Definition: obref.c:375
ULONG NonPagedFrees
Definition: extypes.h:1133
#define NT_VERIFY(exp)
Definition: rtlfuncs.h:3289
PKGUARDED_MUTEX ExpPagedPoolMutex
Definition: expool.c:37
VOID NTAPI ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
Definition: expool.c:105
void DbgBreakPoint()
Definition: mach.c:558
#define TAG_NONE
Definition: tag.h:127
ULONG RunningDeAllocs
Definition: miarm.h:293
LIST_ENTRY ListHeads[POOL_LISTS_PER_PAGE]
Definition: miarm.h:302
PVOID NTAPI MmAllocateSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag, IN POOL_TYPE PoolType, IN ULONG SpecialType)
#define ExAcquireSpinLock(Lock, OldIrql)
_Must_inspect_result_ FORCEINLINE BOOLEAN IsListEmpty(_In_ const LIST_ENTRY *ListHead)
Definition: rtlfuncs.h:57
ULONG PoolHitTag
Definition: expool.c:44
static int Link(const char **args)
Definition: vfdcmd.c:2414
PSLIST_ENTRY WINAPI InterlockedPopEntrySList(PSLIST_HEADER ListHead)
Definition: interlocked.c:55
BOOLEAN NTAPI ExpAddTagForBigPages(IN PVOID Va, IN ULONG Key, IN ULONG NumberOfPages, IN POOL_TYPE PoolType)
Definition: expool.c:1546
NTSTATUS NTAPI ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, IN ULONG SystemInformationLength, IN OUT PULONG ReturnLength OPTIONAL)
Definition: expool.c:1352
uint32_t ULONG_PTR
Definition: typedefs.h:63
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE GENERAL_LOOKASIDE
KSPIN_LOCK ExpLargePoolTableLock
Definition: expool.c:46
FORCEINLINE ULONG KeGetCurrentProcessorNumber(VOID)
Definition: ke.h:325
#define POOL_FLAG_CHECK_TIMERS
Definition: miarm.h:257
UCHAR KIRQL
Definition: env_spec_w32.h:591
#define POOL_FREE_BLOCK(x)
Definition: expool.c:53
_Must_inspect_result_ _In_ ULONG Flags
Definition: wsk.h:170
GLenum GLclampf GLint i
Definition: glfuncs.h:14
struct _POOL_DPC_CONTEXT POOL_DPC_CONTEXT
ULONG PFN_NUMBER
Definition: ke.h:8
SIZE_T PoolTrackTableSizeExpansion
Definition: expool.c:30
NTSTATUS(* NTAPI)(IN PFILE_FULL_EA_INFORMATION EaBuffer, IN ULONG EaLength, OUT PULONG ErrorOffset)
Definition: IoEaTest.cpp:117
PEPROCESS PsInitialSystemProcess
Definition: psmgr.c:50
PPOOL_DESCRIPTOR PoolVector[2]
Definition: expool.c:36
long LONG
Definition: pedump.c:60
#define POOL_NEXT_BLOCK(x)
Definition: expool.c:55
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE * PGENERAL_LOOKASIDE
enum _EX_POOL_PRIORITY EX_POOL_PRIORITY
SIZE_T PoolBigPageTableSize
Definition: expool.c:39
#define InterlockedCompareExchangePointer
Definition: interlocked.h:129
VOID NTAPI PsReturnPoolQuota(IN PEPROCESS Process, IN POOL_TYPE PoolType, IN SIZE_T Amount)
Definition: quota.c:236
#define POOL_BLOCK(x, i)
Definition: expool.c:54
#define PsGetCurrentProcess
Definition: psfuncs.h:17
PPOOL_TRACKER_TABLE PoolTrackTableExpansion
Definition: expool.c:29
FORCEINLINE VOID KeInitializeSpinLock(_Out_ PKSPIN_LOCK SpinLock)
Definition: kefuncs.h:251
_Must_inspect_result_ _In_ LPCGUID ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _In_ ULONG PoolTag
Definition: fltkernel.h:2520
unsigned char BOOLEAN
struct _POOL_DPC_CONTEXT * PPOOL_DPC_CONTEXT
VOID NTAPI ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
Definition: expool.c:184
smooth NULL
Definition: ftsmooth.c:416
#define POOL_ENTRY(x)
Definition: expool.c:52
ULONG ExpNumberOfPagedPools
Definition: expool.c:33
#define INIT_FUNCTION
Definition: dfs.h:10
#define FORCEINLINE
Definition: ntbasedef.h:221
_At_(*)(_In_ PWSK_CLIENT Client, _In_opt_ PUNICODE_STRING NodeName, _In_opt_ PUNICODE_STRING ServiceName, _In_opt_ ULONG NameSpace, _In_opt_ GUID *Provider, _In_opt_ PADDRINFOEXW Hints, _Outptr_ PADDRINFOEXW *Result, _In_opt_ PEPROCESS OwningProcess, _In_opt_ PETHREAD OwningThread, _Inout_ PIRP Irp Result)(Mem)) NTSTATUS(WSKAPI *PFN_WSK_GET_ADDRESS_INFO
Definition: wsk.h:426
IN PSCSI_REQUEST_BLOCK IN OUT NTSTATUS IN OUT BOOLEAN * Retry
Definition: class2.h:49
void DPRINT(...)
Definition: polytest.cpp:61
ULONG ExpPoolBigEntriesInUse
Definition: expool.c:47
SIZE_T NonPagedBytes
Definition: miarm.h:351
Definition: bufpool.h:45
ULONG NonPagedAllocs
Definition: extypes.h:1132
ULONG NTAPI ExpFindAndRemoveTagBigPages(IN PVOID Va, OUT PULONG_PTR BigPages, IN POOL_TYPE PoolType)
Definition: expool.c:1647
#define BASE_POOL_TYPE_MASK
Definition: ExPools.c:15
struct _POOL_TRACKER_TABLE POOL_TRACKER_TABLE
void * PVOID
Definition: retypes.h:9
struct _POOL_HEADER * PPOOL_HEADER
#define InterlockedExchangeAdd
Definition: interlocked.h:181
PFLT_MESSAGE_WAITER_QUEUE CONTAINING_RECORD(Csq, DEVICE_EXTENSION, IrpQueue)) -> WaiterQ.mLock) _IRQL_raises_(DISPATCH_LEVEL) VOID NTAPI FltpAcquireMessageWaiterLock(_In_ PIO_CSQ Csq, _Out_ PKIRQL Irql)
Definition: Messaging.c:560
#define POOL_ALLOC_IRQL_INVALID
Definition: miarm.h:283
PVOID NTAPI ExAllocatePool(POOL_TYPE PoolType, SIZE_T NumberOfBytes)
Definition: expool.c:2412
SIZE_T NonPagedUsed
Definition: extypes.h:1134
VOID NTAPI ExpCheckPoolBlocks(IN PVOID Block)
Definition: expool.c:367
#define DBG_UNREFERENCED_LOCAL_VARIABLE(L)
Definition: ntbasedef.h:326
_In_opt_ PVOID _In_opt_ PVOID SystemArgument1
Definition: ketypes.h:675
PLIST_ENTRY NTAPI ExpDecodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:76
struct _LIST_ENTRY * Flink
Definition: typedefs.h:119
_In_ HANDLE _Outptr_result_bytebuffer_ ViewSize PVOID * BaseAddress
Definition: mmfuncs.h:404
_In_ LARGE_INTEGER _In_opt_ PKDPC Dpc
Definition: kefuncs.h:524
BOOLEAN ExStopBadTags
Definition: expool.c:45
POOL_TYPE NTAPI MmDeterminePoolType(IN PVOID VirtualAddress)
Definition: pool.c:406
VOID NTAPI ExpRemovePoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:755
_Inout_ PVOID Lookaside
Definition: fltkernel.h:2532
_Out_ PBOOLEAN QuotaCharged
Definition: exfuncs.h:945
ULONG RunningAllocs
Definition: miarm.h:292
if(!(yy_init))
Definition: macro.lex.yy.c:717
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:41
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:24
PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16+1]
Definition: expool.c:35
INT POOL_TYPE
Definition: typedefs.h:76
VOID NTAPI ExpGetPoolTagInfoTarget(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
Definition: expool.c:1317
uint64_t ULONGLONG
Definition: typedefs.h:65
#define KeAcquireSpinLock(sl, irql)
Definition: env_spec_w32.h:609
POOL_DESCRIPTOR NonPagedPoolDescriptor
Definition: expool.c:34
VOID KdbpPrint(IN PCHAR Format, IN ... OPTIONAL)
Prints the given string with printf-like formatting.
Definition: kdb_cli.c:2472
ULONG ExpBigTableExpansionFailed
Definition: expool.c:40
SIZE_T PagedUsed
Definition: extypes.h:1131
static const UCHAR Index[8]
Definition: usbohci.c:18
#define PROTECTED_POOL
Definition: extypes.h:294
VOID FASTCALL KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber, IN KIRQL OldIrql)
Definition: spinlock.c:154
#define PAGE_ALIGN(Va)
LONG NonPagedFrees
Definition: miarm.h:350
FORCEINLINE USHORT ExQueryDepthSList(_In_ PSLIST_HEADER SListHead)
Definition: exfuncs.h:153
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:27
#define MUST_SUCCEED_POOL_MASK
Definition: mm.h:99
SIZE_T PagedBytes
Definition: miarm.h:354
ULONG TotalPages
Definition: miarm.h:294
ASSERT((InvokeOnSuccess||InvokeOnError||InvokeOnCancel) ?(CompletionRoutine !=NULL) :TRUE)
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes)
Definition: pool.c:420
#define BYTES_TO_PAGES(Size)
ULONG NTAPI MiFreePoolPages(IN PVOID StartingAddress)
Definition: pool.c:905
VOID NTAPI ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:154
char * PBOOLEAN
Definition: retypes.h:11
#define InterlockedExchangeAddSizeT(a, b)
Definition: interlocked.h:196
FORCEINLINE KIRQL ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
Definition: expool.c:1269
_Requires_lock_held_ Interrupt _Releases_lock_ Interrupt _In_ _IRQL_restores_ KIRQL OldIrql
Definition: kefuncs.h:803
Definition: ketypes.h:687
FORCEINLINE VOID ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN PVOID Entry)
Definition: expool.c:407
Definition: btrfs_drv.h:1801
#define InterlockedDecrementUL(Addend)
Definition: ex.h:1496
ULONG PagedFrees
Definition: extypes.h:1130
_Must_inspect_result_ _In_ USHORT NewSize
Definition: fltkernel.h:975
#define PAGE_SIZE
Definition: env_spec_w32.h:49
Definition: typedefs.h:117
NTKERNELAPI PSLIST_ENTRY FASTCALL InterlockedPushEntrySList(IN PSLIST_HEADER ListHead, IN PSLIST_ENTRY ListEntry)
Definition: interlocked.c:82
Definition: copy.c:32
IN PVOID IN PVOID IN USHORT IN USHORT Size
Definition: pci.h:359
#define PASSIVE_LEVEL
Definition: env_spec_w32.h:693
KIRQL FASTCALL KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
Definition: spinlock.c:108
PVOID NTAPI ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes)
Definition: expool.c:2890
UCHAR KeNumberNodes
Definition: krnlinit.c:40
Status
Definition: gdiplustypes.h:24
_In_opt_ PVOID _In_opt_ PVOID _In_opt_ PVOID SystemArgument2
Definition: ketypes.h:675
INIT_FUNCTION VOID NTAPI ExpSeedHotTags(VOID)
Definition: expool.c:635
#define DISPATCH_LEVEL
Definition: env_spec_w32.h:696
ULONG TotalBigPages
Definition: miarm.h:295
#define _In_
Definition: no_sal2.h:204
#define POOL_FLAG_CHECK_DEADLOCK
Definition: miarm.h:261
ULONG_PTR SIZE_T
Definition: typedefs.h:78
VOID FASTCALL KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:31
#define QUOTA_POOL_MASK
Definition: ExPools.c:16
#define InterlockedIncrement
Definition: armddk.h:53
VOID NTAPI KeSignalCallDpcDone(IN PVOID SystemArgument1)
Definition: dpc.c:1012
USHORT PreviousSize
#define NUMBER_POOL_LOOKASIDE_LISTS
Definition: ketypes.h:286
#define ROUND_TO_PAGES(Size)
unsigned short USHORT
Definition: pedump.c:61
VOID NTAPI KeGenericCallDpc(IN PKDEFERRED_ROUTINE Routine, IN PVOID Context)
Definition: dpc.c:983
#define InterlockedIncrementUL(Addend)
Definition: ex.h:1499
SIZE_T NTAPI ExQueryPoolBlockSize(IN PVOID PoolBlock, OUT PBOOLEAN QuotaCharged)
Definition: expool.c:2874
#define POOL_FLAG_CHECK_WORKERS
Definition: miarm.h:258
ULONG KSPIN_LOCK
Definition: env_spec_w32.h:72
UNICODE_STRING BaseDllName
Definition: ldrtypes.h:143
#define FIELD_OFFSET(t, f)
Definition: typedefs.h:254
PVOID NTAPI ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag, IN EX_POOL_PRIORITY Priority)
Definition: expool.c:2904
ULONG ExPoolFailures
Definition: expool.c:49
VOID NTAPI ExpInsertPoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:846
__int3264 LONG_PTR
Definition: mstsclib_h.h:276
unsigned int * PULONG
Definition: retypes.h:1
#define min(a, b)
Definition: monoChain.cc:55
#define KeReleaseSpinLock(sl, irql)
Definition: env_spec_w32.h:627
FORCEINLINE ULONG ExpComputeHashForTag(IN ULONG Tag, IN SIZE_T BucketMask)
Definition: expool.c:432
INIT_FUNCTION VOID NTAPI ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, IN POOL_TYPE PoolType, IN ULONG PoolIndex, IN ULONG Threshold, IN PVOID PoolLock)
Definition: expool.c:965
#define POOL_FLAG_DBGPRINT_ON_FAILURE
Definition: miarm.h:263
VOID FASTCALL KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:53
FORCEINLINE ULONG ExpComputePartialHashForAddress(IN PVOID BaseAddress)
Definition: expool.c:448
#define DPRINT1
Definition: precomp.h:8
_Must_inspect_result_ typedef _In_ ULONG TableEntry
Definition: iotypes.h:3946
#define POOL_LISTS_PER_PAGE
Definition: miarm.h:250
IN ULONG IN ULONG Tag
Definition: evtlib.h:159
PPOOL_TRACKER_BIG_PAGES PoolBigPageTable
Definition: expool.c:42
SIZE_T PoolBigPageTableHash
Definition: expool.c:39
#define POOL_BIG_TABLE_ENTRY_FREE
Definition: expool.c:23
_Must_inspect_result_ _In_ PLARGE_INTEGER _In_ PLARGE_INTEGER _In_ ULONG _In_ PFILE_OBJECT _In_ PVOID Process
Definition: fsrtlfuncs.h:219
void * _ReturnAddress(void)
LONG NonPagedAllocs
Definition: miarm.h:349
#define OUT
Definition: typedefs.h:39
#define ObReferenceObject
Definition: obfuncs.h:204
_Must_inspect_result_ typedef _Out_ PULONG TableSize
Definition: iotypes.h:3970
FORCEINLINE VOID ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, IN KIRQL OldIrql)
Definition: expool.c:1293
ULONG ExpPoolFlags
Definition: expool.c:48
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _Inout_ PLARGE_INTEGER NumberOfBytes
Definition: iotypes.h:997
struct tagContext Context
Definition: acpixf.h:1020
unsigned int ULONG
Definition: retypes.h:1
VOID NTAPI ExFreePool(PVOID P)
Definition: expool.c:2861
#define UNIMPLEMENTED
Definition: debug.h:114
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:261
VOID NTAPI MmFreeSpecialPool(IN PVOID P)
#define ULONG_PTR
Definition: config.h:101
uint32_t * PULONG_PTR
Definition: typedefs.h:63
#define ALIGN_UP_BY(size, align)
BOOLEAN NTAPI MmUseSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag)
ULONG TagUlong
Definition: extypes.h:1127
#define POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
#define POOL_MAX_ALLOC
Definition: miarm.h:252
#define POOL_RAISE_IF_ALLOCATION_FAILURE
#define POOL_BLOCK_SIZE
Definition: miarm.h:248
ULONG MmSizeOfNonPagedPoolInBytes
Definition: init.c:21
PLIST_ENTRY NTAPI ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
Definition: expool.c:142
IN BOOLEAN OUT PSTR Buffer
Definition: progress.h:34
VOID NTAPI ExFreePoolWithTag(IN PVOID P, IN ULONG TagToFree)
Definition: expool.c:2441
return STATUS_SUCCESS
Definition: btrfs.c:2725
_Must_inspect_result_ _In_ FLT_CONTEXT_TYPE _In_ SIZE_T _In_ POOL_TYPE PoolType
Definition: fltkernel.h:1444
signed int * PLONG
Definition: retypes.h:5
BOOLEAN NTAPI MmIsSpecialPoolAddress(IN PVOID P)
#define APC_LEVEL
Definition: env_spec_w32.h:695
base of all file and directory entries
Definition: entries.h:82
VOID NTAPI KeBugCheckEx(_In_ ULONG BugCheckCode, _In_ ULONG_PTR BugCheckParameter1, _In_ ULONG_PTR BugCheckParameter2, _In_ ULONG_PTR BugCheckParameter3, _In_ ULONG_PTR BugCheckParameter4)
Definition: rtlcompat.c:107
VOID NTAPI ExQueryPoolUsage(OUT PULONG PagedPoolPages, OUT PULONG NonPagedPoolPages, OUT PULONG PagedPoolAllocs, OUT PULONG PagedPoolFrees, OUT PULONG PagedPoolLookasideHits, OUT PULONG NonPagedPoolAllocs, OUT PULONG NonPagedPoolFrees, OUT PULONG NonPagedPoolLookasideHits)
Definition: expool.c:1721
#define _IRQL_restores_
Definition: no_sal2.h:653
#define P(row, col)
Definition: m_matrix.c:147
_Must_inspect_result_ _In_ LPCGUID _In_ ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _Inout_ PVOID LookasideList
Definition: fltkernel.h:2551
BOOLEAN NTAPI KeSignalCallDpcSynchronize(IN PVOID SystemArgument2)
Definition: dpc.c:1025
#define POOL_FLAG_CHECK_RESOURCES
Definition: miarm.h:259
GENERAL_LOOKASIDE_POOL PPPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:626
KSPIN_LOCK ExpTaggedPoolLock
Definition: expool.c:43
_In_ PSTORAGE_PROPERTY_ID _Outptr_ PSTORAGE_DESCRIPTOR_HEADER * Descriptor
Definition: classpnp.h:966
PULONG MinorVersion OPTIONAL
Definition: CrossNt.h:68
_In_opt_ PVOID DeferredContext
Definition: ketypes.h:675