ReactOS  r73918
expool.c
Go to the documentation of this file.
1 /*
2  * PROJECT: ReactOS Kernel
3  * LICENSE: BSD - See COPYING.ARM in the top level directory
4  * FILE: ntoskrnl/mm/ARM3/expool.c
5  * PURPOSE: ARM Memory Manager Executive Pool Manager
6  * PROGRAMMERS: ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 #undef ExAllocatePoolWithQuota
19 #undef ExAllocatePoolWithQuotaTag
20 
21 /* GLOBALS ********************************************************************/
22 
23 #define POOL_BIG_TABLE_ENTRY_FREE 0x1
24 
25 typedef struct _POOL_DPC_CONTEXT
26 {
32 
49 
50 /* Pool block/header/list access macros */
51 #define POOL_ENTRY(x) (PPOOL_HEADER)((ULONG_PTR)(x) - sizeof(POOL_HEADER))
52 #define POOL_FREE_BLOCK(x) (PLIST_ENTRY)((ULONG_PTR)(x) + sizeof(POOL_HEADER))
53 #define POOL_BLOCK(x, i) (PPOOL_HEADER)((ULONG_PTR)(x) + ((i) * POOL_BLOCK_SIZE))
54 #define POOL_NEXT_BLOCK(x) POOL_BLOCK((x), (x)->BlockSize)
55 #define POOL_PREV_BLOCK(x) POOL_BLOCK((x), -((x)->PreviousSize))
56 
57 /*
58  * Pool list access debug macros, similar to Arthur's pfnlist.c work.
59  * Microsoft actually implements similar checks in the Windows Server 2003 SP1
60  * pool code, but only for checked builds.
61  *
62  * As of Vista, however, an MSDN Blog entry by a Security Team Manager indicates
63  * that these checks are done even on retail builds, due to the increasing
64  * number of kernel-mode attacks which depend on dangling list pointers and other
65  * kinds of list-based attacks.
66  *
67  * For now, I will leave these checks on all the time, but later they are likely
68  * to be DBG-only, at least until there are enough kernel-mode security attacks
69  * against ReactOS to warrant the performance hit.
70  *
71  * For now, these are not made inline, so we can get good stack traces.
72  */
74 NTAPI
76 {
77  return (PLIST_ENTRY)((ULONG_PTR)Link & ~1);
78 }
79 
81 NTAPI
83 {
84  return (PLIST_ENTRY)((ULONG_PTR)Link | 1);
85 }
86 
87 VOID
88 NTAPI
90 {
91  if ((ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Flink)->Blink) != ListHead) ||
92  (ExpDecodePoolLink(ExpDecodePoolLink(ListHead->Blink)->Flink) != ListHead))
93  {
94  KeBugCheckEx(BAD_POOL_HEADER,
95  3,
96  (ULONG_PTR)ListHead,
99  }
100 }
101 
102 VOID
103 NTAPI
105 {
106  ListHead->Flink = ListHead->Blink = ExpEncodePoolLink(ListHead);
107 }
108 
109 BOOLEAN
110 NTAPI
112 {
113  return (ExpDecodePoolLink(ListHead->Flink) == ListHead);
114 }
115 
116 VOID
117 NTAPI
119 {
120  PLIST_ENTRY Blink, Flink;
121  Flink = ExpDecodePoolLink(Entry->Flink);
122  Blink = ExpDecodePoolLink(Entry->Blink);
123  Flink->Blink = ExpEncodePoolLink(Blink);
124  Blink->Flink = ExpEncodePoolLink(Flink);
125 }
126 
128 NTAPI
130 {
131  PLIST_ENTRY Entry, Flink;
132  Entry = ExpDecodePoolLink(ListHead->Flink);
133  Flink = ExpDecodePoolLink(Entry->Flink);
134  ListHead->Flink = ExpEncodePoolLink(Flink);
135  Flink->Blink = ExpEncodePoolLink(ListHead);
136  return Entry;
137 }
138 
140 NTAPI
142 {
143  PLIST_ENTRY Entry, Blink;
144  Entry = ExpDecodePoolLink(ListHead->Blink);
145  Blink = ExpDecodePoolLink(Entry->Blink);
146  ListHead->Blink = ExpEncodePoolLink(Blink);
147  Blink->Flink = ExpEncodePoolLink(ListHead);
148  return Entry;
149 }
150 
151 VOID
152 NTAPI
155 {
156  PLIST_ENTRY Blink;
157  ExpCheckPoolLinks(ListHead);
158  Blink = ExpDecodePoolLink(ListHead->Blink);
159  Entry->Flink = ExpEncodePoolLink(ListHead);
160  Entry->Blink = ExpEncodePoolLink(Blink);
161  Blink->Flink = ExpEncodePoolLink(Entry);
162  ListHead->Blink = ExpEncodePoolLink(Entry);
163  ExpCheckPoolLinks(ListHead);
164 }
165 
166 VOID
167 NTAPI
170 {
171  PLIST_ENTRY Flink;
172  ExpCheckPoolLinks(ListHead);
173  Flink = ExpDecodePoolLink(ListHead->Flink);
174  Entry->Flink = ExpEncodePoolLink(Flink);
175  Entry->Blink = ExpEncodePoolLink(ListHead);
176  Flink->Blink = ExpEncodePoolLink(Entry);
177  ListHead->Flink = ExpEncodePoolLink(Entry);
178  ExpCheckPoolLinks(ListHead);
179 }
180 
181 VOID
182 NTAPI
184 {
185  PPOOL_HEADER PreviousEntry, NextEntry;
186 
187  /* Is there a block before this one? */
188  if (Entry->PreviousSize)
189  {
190  /* Get it */
191  PreviousEntry = POOL_PREV_BLOCK(Entry);
192 
193  /* The two blocks must be on the same page! */
194  if (PAGE_ALIGN(Entry) != PAGE_ALIGN(PreviousEntry))
195  {
196  /* Something is awry */
197  KeBugCheckEx(BAD_POOL_HEADER,
198  6,
199  (ULONG_PTR)PreviousEntry,
200  __LINE__,
201  (ULONG_PTR)Entry);
202  }
203 
204  /* This block should also indicate that it's as large as we think it is */
205  if (PreviousEntry->BlockSize != Entry->PreviousSize)
206  {
207  /* Otherwise, someone corrupted one of the sizes */
208  DPRINT1("PreviousEntry BlockSize %lu, tag %.4s. Entry PreviousSize %lu, tag %.4s\n",
209  PreviousEntry->BlockSize, (char *)&PreviousEntry->PoolTag,
210  Entry->PreviousSize, (char *)&Entry->PoolTag);
211  KeBugCheckEx(BAD_POOL_HEADER,
212  5,
213  (ULONG_PTR)PreviousEntry,
214  __LINE__,
215  (ULONG_PTR)Entry);
216  }
217  }
218  else if (PAGE_ALIGN(Entry) != Entry)
219  {
220  /* If there's no block before us, we are the first block, so we should be on a page boundary */
221  KeBugCheckEx(BAD_POOL_HEADER,
222  7,
223  0,
224  __LINE__,
225  (ULONG_PTR)Entry);
226  }
227 
228  /* This block must have a size */
229  if (!Entry->BlockSize)
230  {
231  /* Someone must've corrupted this field */
232  if (Entry->PreviousSize)
233  {
234  PreviousEntry = POOL_PREV_BLOCK(Entry);
235  DPRINT1("PreviousEntry tag %.4s. Entry tag %.4s\n",
236  (char *)&PreviousEntry->PoolTag,
237  (char *)&Entry->PoolTag);
238  }
239  else
240  {
241  DPRINT1("Entry tag %.4s\n",
242  (char *)&Entry->PoolTag);
243  }
244  KeBugCheckEx(BAD_POOL_HEADER,
245  8,
246  0,
247  __LINE__,
248  (ULONG_PTR)Entry);
249  }
250 
251  /* Okay, now get the next block */
252  NextEntry = POOL_NEXT_BLOCK(Entry);
253 
254  /* If this is the last block, then we'll be page-aligned, otherwise, check this block */
255  if (PAGE_ALIGN(NextEntry) != NextEntry)
256  {
257  /* The two blocks must be on the same page! */
258  if (PAGE_ALIGN(Entry) != PAGE_ALIGN(NextEntry))
259  {
260  /* Something is messed up */
261  KeBugCheckEx(BAD_POOL_HEADER,
262  9,
263  (ULONG_PTR)NextEntry,
264  __LINE__,
265  (ULONG_PTR)Entry);
266  }
267 
268  /* And this block should think we are as large as we truly are */
269  if (NextEntry->PreviousSize != Entry->BlockSize)
270  {
271  /* Otherwise, someone corrupted the field */
272  DPRINT1("Entry BlockSize %lu, tag %.4s. NextEntry PreviousSize %lu, tag %.4s\n",
273  Entry->BlockSize, (char *)&Entry->PoolTag,
274  NextEntry->PreviousSize, (char *)&NextEntry->PoolTag);
275  KeBugCheckEx(BAD_POOL_HEADER,
276  5,
277  (ULONG_PTR)NextEntry,
278  __LINE__,
279  (ULONG_PTR)Entry);
280  }
281  }
282 }
283 
284 VOID
285 NTAPI
287  PVOID P,
289  ULONG Tag)
290 {
292  ULONG i;
293  KIRQL OldIrql;
294  POOL_TYPE RealPoolType;
295 
296  /* Get the pool header */
297  Entry = ((PPOOL_HEADER)P) - 1;
298 
299  /* Check if this is a large allocation */
300  if (PAGE_ALIGN(P) == P)
301  {
302  /* Lock the pool table */
304 
305  /* Find the pool tag */
306  for (i = 0; i < PoolBigPageTableSize; i++)
307  {
308  /* Check if this is our allocation */
309  if (PoolBigPageTable[i].Va == P)
310  {
311  /* Make sure the tag is ok */
312  if (PoolBigPageTable[i].Key != Tag)
313  {
314  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, PoolBigPageTable[i].Key, Tag);
315  }
316 
317  break;
318  }
319  }
320 
321  /* Release the lock */
323 
324  if (i == PoolBigPageTableSize)
325  {
326  /* Did not find the allocation */
327  //ASSERT(FALSE);
328  }
329 
330  /* Get Pool type by address */
331  RealPoolType = MmDeterminePoolType(P);
332  }
333  else
334  {
335  /* Verify the tag */
336  if (Entry->PoolTag != Tag)
337  {
338  DPRINT1("Allocation has wrong pool tag! Expected '%.4s', got '%.4s' (0x%08lx)\n",
339  &Tag, &Entry->PoolTag, Entry->PoolTag);
340  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Entry->PoolTag, Tag);
341  }
342 
343  /* Check the rest of the header */
344  ExpCheckPoolHeader(Entry);
345 
346  /* Get Pool type from entry */
347  RealPoolType = (Entry->PoolType - 1);
348  }
349 
350  /* Should we check the pool type? */
351  if (PoolType != -1)
352  {
353  /* Verify the pool type */
354  if (RealPoolType != PoolType)
355  {
356  DPRINT1("Wrong pool type! Expected %s, got %s\n",
357  PoolType & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool",
358  (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK ? "PagedPool" : "NonPagedPool");
359  KeBugCheckEx(BAD_POOL_CALLER, 0xCC, (ULONG_PTR)P, Entry->PoolTag, Tag);
360  }
361  }
362 }
363 
364 VOID
365 NTAPI
367 {
368  BOOLEAN FoundBlock = FALSE;
369  SIZE_T Size = 0;
371 
372  /* Get the first entry for this page, make sure it really is the first */
373  Entry = PAGE_ALIGN(Block);
374  ASSERT(Entry->PreviousSize == 0);
375 
376  /* Now scan each entry */
377  while (TRUE)
378  {
379  /* When we actually found our block, remember this */
380  if (Entry == Block) FoundBlock = TRUE;
381 
382  /* Now validate this block header */
383  ExpCheckPoolHeader(Entry);
384 
385  /* And go to the next one, keeping track of our size */
386  Size += Entry->BlockSize;
387  Entry = POOL_NEXT_BLOCK(Entry);
388 
389  /* If we hit the last block, stop */
390  if (Size >= (PAGE_SIZE / POOL_BLOCK_SIZE)) break;
391 
392  /* If we hit the end of the page, stop */
393  if (PAGE_ALIGN(Entry) == Entry) break;
394  }
395 
396  /* We must've found our block, and we must have hit the end of the page */
397  if ((PAGE_ALIGN(Entry) != Entry) || !(FoundBlock))
398  {
399  /* Otherwise, the blocks are messed up */
400  KeBugCheckEx(BAD_POOL_HEADER, 10, (ULONG_PTR)Block, __LINE__, (ULONG_PTR)Entry);
401  }
402 }
403 
405 VOID
408  IN PVOID Entry)
409 {
410  //
411  // Validate IRQL: It must be APC_LEVEL or lower for Paged Pool, and it must
412  // be DISPATCH_LEVEL or lower for Non Paged Pool
413  //
414  if (((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) ?
417  {
418  //
419  // Take the system down
420  //
421  KeBugCheckEx(BAD_POOL_CALLER,
424  PoolType,
425  !Entry ? NumberOfBytes : (ULONG_PTR)Entry);
426  }
427 }
428 
430 ULONG
432  IN SIZE_T BucketMask)
433 {
434  //
435  // Compute the hash by multiplying with a large prime number and then XORing
436  // with the HIDWORD of the result.
437  //
438  // Finally, AND with the bucket mask to generate a valid index/bucket into
439  // the table
440  //
441  ULONGLONG Result = (ULONGLONG)40543 * Tag;
442  return (ULONG)BucketMask & ((ULONG)Result ^ (Result >> 32));
443 }
444 
446 ULONG
448 {
449  ULONG Result;
450  //
451  // Compute the hash by converting the address into a page number, and then
452  // XORing each nibble with the next one.
453  //
454  // We do *NOT* AND with the bucket mask at this point because big table expansion
455  // might happen. Therefore, the final step of the hash must be performed
456  // while holding the expansion pushlock, and this is why we call this a
457  // "partial" hash only.
458  //
459  Result = (ULONG)((ULONG_PTR)BaseAddress >> PAGE_SHIFT);
460  return (Result >> 24) ^ (Result >> 16) ^ (Result >> 8) ^ Result;
461 }
462 
463 /* PRIVATE FUNCTIONS **********************************************************/
464 
465 VOID
466 NTAPI
469 {
470  ULONG i, Key, Hash, Index;
471  PPOOL_TRACKER_TABLE TrackTable = PoolTrackTable;
472  ULONG TagList[] =
473  {
474  ' oI',
475  ' laH',
476  'PldM',
477  'LooP',
478  'tSbO',
479  ' prI',
480  'bdDN',
481  'LprI',
482  'pOoI',
483  ' ldM',
484  'eliF',
485  'aVMC',
486  'dSeS',
487  'CFtN',
488  'looP',
489  'rPCT',
490  'bNMC',
491  'dTeS',
492  'sFtN',
493  'TPCT',
494  'CPCT',
495  ' yeK',
496  'qSbO',
497  'mNoI',
498  'aEoI',
499  'cPCT',
500  'aFtN',
501  '0ftN',
502  'tceS',
503  'SprI',
504  'ekoT',
505  ' eS',
506  'lCbO',
507  'cScC',
508  'lFtN',
509  'cAeS',
510  'mfSF',
511  'kWcC',
512  'miSF',
513  'CdfA',
514  'EdfA',
515  'orSF',
516  'nftN',
517  'PRIU',
518  'rFpN',
519  'RFpN',
520  'aPeS',
521  'sUeS',
522  'FpcA',
523  'MpcA',
524  'cSeS',
525  'mNbO',
526  'sFpN',
527  'uLeS',
528  'DPcS',
529  'nevE',
530  'vrqR',
531  'ldaV',
532  ' pP',
533  'SdaV',
534  ' daV',
535  'LdaV',
536  'FdaV',
537  ' GIB',
538  };
539 
540  //
541  // Loop all 64 hot tags
542  //
543  ASSERT((sizeof(TagList) / sizeof(ULONG)) == 64);
544  for (i = 0; i < sizeof(TagList) / sizeof(ULONG); i++)
545  {
546  //
547  // Get the current tag, and compute its hash in the tracker table
548  //
549  Key = TagList[i];
551 
552  //
553  // Loop all the hashes in this index/bucket
554  //
555  Index = Hash;
556  while (TRUE)
557  {
558  //
559  // Find an empty entry, and make sure this isn't the last hash that
560  // can fit.
561  //
562  // On checked builds, also make sure this is the first time we are
563  // seeding this tag.
564  //
565  ASSERT(TrackTable[Hash].Key != Key);
566  if (!(TrackTable[Hash].Key) && (Hash != PoolTrackTableSize - 1))
567  {
568  //
569  // It has been seeded, move on to the next tag
570  //
571  TrackTable[Hash].Key = Key;
572  break;
573  }
574 
575  //
576  // This entry was already taken, compute the next possible hash while
577  // making sure we're not back at our initial index.
578  //
579  ASSERT(TrackTable[Hash].Key != Key);
580  Hash = (Hash + 1) & PoolTrackTableMask;
581  if (Hash == Index) break;
582  }
583  }
584 }
585 
586 VOID
587 NTAPI
591 {
592  ULONG Hash, Index;
594  SIZE_T TableMask, TableSize;
595 
596  //
597  // Remove the PROTECTED_POOL flag which is not part of the tag
598  //
599  Key &= ~PROTECTED_POOL;
600 
601  //
602  // With WinDBG you can set a tag you want to break on when an allocation is
603  // attempted
604  //
605  if (Key == PoolHitTag) DbgBreakPoint();
606 
607  //
608  // Why the double indirection? Because normally this function is also used
609  // when doing session pool allocations, which has another set of tables,
610  // sizes, and masks that live in session pool. Now we don't support session
611  // pool so we only ever use the regular tables, but I'm keeping the code this
612  // way so that the day we DO support session pool, it won't require that
613  // many changes
614  //
615  Table = PoolTrackTable;
616  TableMask = PoolTrackTableMask;
617  TableSize = PoolTrackTableSize;
619 
620  //
621  // Compute the hash for this key, and loop all the possible buckets
622  //
623  Hash = ExpComputeHashForTag(Key, TableMask);
624  Index = Hash;
625  while (TRUE)
626  {
627  //
628  // Have we found the entry for this tag? */
629  //
630  TableEntry = &Table[Hash];
631  if (TableEntry->Key == Key)
632  {
633  //
634  // Decrement the counters depending on if this was paged or nonpaged
635  // pool
636  //
637  if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
638  {
639  InterlockedIncrement(&TableEntry->NonPagedFrees);
641  -(SSIZE_T)NumberOfBytes);
642  return;
643  }
644  InterlockedIncrement(&TableEntry->PagedFrees);
646  -(SSIZE_T)NumberOfBytes);
647  return;
648  }
649 
650  //
651  // We should have only ended up with an empty entry if we've reached
652  // the last bucket
653  //
654  if (!TableEntry->Key)
655  {
656  DPRINT1("Empty item reached in tracker table. Hash=0x%lx, TableMask=0x%lx, Tag=0x%08lx, NumberOfBytes=%lu, PoolType=%d\n",
657  Hash, TableMask, Key, (ULONG)NumberOfBytes, PoolType);
658  ASSERT(Hash == TableMask);
659  }
660 
661  //
662  // This path is hit when we don't have an entry, and the current bucket
663  // is full, so we simply try the next one
664  //
665  Hash = (Hash + 1) & TableMask;
666  if (Hash == Index) break;
667  }
668 
669  //
670  // And finally this path is hit when all the buckets are full, and we need
671  // some expansion. This path is not yet supported in ReactOS and so we'll
672  // ignore the tag
673  //
674  DPRINT1("Out of pool tag space, ignoring...\n");
675 }
676 
677 VOID
678 NTAPI
682 {
683  ULONG Hash, Index;
684  KIRQL OldIrql;
686  SIZE_T TableMask, TableSize;
687 
688  //
689  // Remove the PROTECTED_POOL flag which is not part of the tag
690  //
691  Key &= ~PROTECTED_POOL;
692 
693  //
694  // With WinDBG you can set a tag you want to break on when an allocation is
695  // attempted
696  //
697  if (Key == PoolHitTag) DbgBreakPoint();
698 
699  //
700  // There is also an internal flag you can set to break on malformed tags
701  //
702  if (ExStopBadTags) ASSERT(Key & 0xFFFFFF00);
703 
704  //
705  // ASSERT on ReactOS features not yet supported
706  //
707  ASSERT(!(PoolType & SESSION_POOL_MASK));
709 
710  //
711  // Why the double indirection? Because normally this function is also used
712  // when doing session pool allocations, which has another set of tables,
713  // sizes, and masks that live in session pool. Now we don't support session
714  // pool so we only ever use the regular tables, but I'm keeping the code this
715  // way so that the day we DO support session pool, it won't require that
716  // many changes
717  //
718  Table = PoolTrackTable;
719  TableMask = PoolTrackTableMask;
720  TableSize = PoolTrackTableSize;
722 
723  //
724  // Compute the hash for this key, and loop all the possible buckets
725  //
726  Hash = ExpComputeHashForTag(Key, TableMask);
727  Index = Hash;
728  while (TRUE)
729  {
730  //
731  // Do we already have an entry for this tag? */
732  //
733  TableEntry = &Table[Hash];
734  if (TableEntry->Key == Key)
735  {
736  //
737  // Increment the counters depending on if this was paged or nonpaged
738  // pool
739  //
740  if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
741  {
742  InterlockedIncrement(&TableEntry->NonPagedAllocs);
743  InterlockedExchangeAddSizeT(&TableEntry->NonPagedBytes, NumberOfBytes);
744  return;
745  }
746  InterlockedIncrement(&TableEntry->PagedAllocs);
747  InterlockedExchangeAddSizeT(&TableEntry->PagedBytes, NumberOfBytes);
748  return;
749  }
750 
751  //
752  // We don't have an entry yet, but we've found a free bucket for it
753  //
754  if (!(TableEntry->Key) && (Hash != PoolTrackTableSize - 1))
755  {
756  //
757  // We need to hold the lock while creating a new entry, since other
758  // processors might be in this code path as well
759  //
761  if (!PoolTrackTable[Hash].Key)
762  {
763  //
764  // We've won the race, so now create this entry in the bucket
765  //
766  ASSERT(Table[Hash].Key == 0);
767  PoolTrackTable[Hash].Key = Key;
768  TableEntry->Key = Key;
769  }
771 
772  //
773  // Now we force the loop to run again, and we should now end up in
774  // the code path above which does the interlocked increments...
775  //
776  continue;
777  }
778 
779  //
780  // This path is hit when we don't have an entry, and the current bucket
781  // is full, so we simply try the next one
782  //
783  Hash = (Hash + 1) & TableMask;
784  if (Hash == Index) break;
785  }
786 
787  //
788  // And finally this path is hit when all the buckets are full, and we need
789  // some expansion. This path is not yet supported in ReactOS and so we'll
790  // ignore the tag
791  //
792  DPRINT1("Out of pool tag space, ignoring...\n");
793 }
794 
795 VOID
796 NTAPI
800  IN ULONG PoolIndex,
801  IN ULONG Threshold,
802  IN PVOID PoolLock)
803 {
804  PLIST_ENTRY NextEntry, LastEntry;
805 
806  //
807  // Setup the descriptor based on the caller's request
808  //
809  PoolDescriptor->PoolType = PoolType;
810  PoolDescriptor->PoolIndex = PoolIndex;
811  PoolDescriptor->Threshold = Threshold;
812  PoolDescriptor->LockAddress = PoolLock;
813 
814  //
815  // Initialize accounting data
816  //
817  PoolDescriptor->RunningAllocs = 0;
818  PoolDescriptor->RunningDeAllocs = 0;
819  PoolDescriptor->TotalPages = 0;
820  PoolDescriptor->TotalBytes = 0;
821  PoolDescriptor->TotalBigPages = 0;
822 
823  //
824  // Nothing pending for now
825  //
826  PoolDescriptor->PendingFrees = NULL;
827  PoolDescriptor->PendingFreeDepth = 0;
828 
829  //
830  // Loop all the descriptor's allocation lists and initialize them
831  //
832  NextEntry = PoolDescriptor->ListHeads;
833  LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
834  while (NextEntry < LastEntry)
835  {
836  ExpInitializePoolListHead(NextEntry);
837  NextEntry++;
838  }
839 
840  //
841  // Note that ReactOS does not support Session Pool Yet
842  //
843  ASSERT(PoolType != PagedPoolSession);
844 }
845 
846 VOID
847 NTAPI
850  IN ULONG Threshold)
851 {
854  ULONG i;
855 
856  //
857  // Check what kind of pool this is
858  //
859  if (PoolType == NonPagedPool)
860  {
861  //
862  // Compute the track table size and convert it from a power of two to an
863  // actual byte size
864  //
865  // NOTE: On checked builds, we'll assert if the registry table size was
866  // invalid, while on retail builds we'll just break out of the loop at
867  // that point.
868  //
870  for (i = 0; i < 32; i++)
871  {
872  if (TableSize & 1)
873  {
874  ASSERT((TableSize & ~1) == 0);
875  if (!(TableSize & ~1)) break;
876  }
877  TableSize >>= 1;
878  }
879 
880  //
881  // If we hit bit 32, than no size was defined in the registry, so
882  // we'll use the default size of 2048 entries.
883  //
884  // Otherwise, use the size from the registry, as long as it's not
885  // smaller than 64 entries.
886  //
887  if (i == 32)
888  {
889  PoolTrackTableSize = 2048;
890  }
891  else
892  {
893  PoolTrackTableSize = max(1 << i, 64);
894  }
895 
896  //
897  // Loop trying with the biggest specified size first, and cut it down
898  // by a power of two each iteration in case not enough memory exist
899  //
900  while (TRUE)
901  {
902  //
903  // Do not allow overflow
904  //
905  if ((PoolTrackTableSize + 1) > (MAXULONG_PTR / sizeof(POOL_TRACKER_TABLE)))
906  {
907  PoolTrackTableSize >>= 1;
908  continue;
909  }
910 
911  //
912  // Allocate the tracker table and exit the loop if this worked
913  //
914  PoolTrackTable = MiAllocatePoolPages(NonPagedPool,
915  (PoolTrackTableSize + 1) *
916  sizeof(POOL_TRACKER_TABLE));
917  if (PoolTrackTable) break;
918 
919  //
920  // Otherwise, as long as we're not down to the last bit, keep
921  // iterating
922  //
923  if (PoolTrackTableSize == 1)
924  {
925  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
926  TableSize,
927  0xFFFFFFFF,
928  0xFFFFFFFF,
929  0xFFFFFFFF);
930  }
931  PoolTrackTableSize >>= 1;
932  }
933 
934  //
935  // Add one entry, compute the hash, and zero the table
936  //
939 
940  RtlZeroMemory(PoolTrackTable,
942 
943  //
944  // Finally, add the most used tags to speed up those allocations
945  //
946  ExpSeedHotTags();
947 
948  //
949  // We now do the exact same thing with the tracker table for big pages
950  //
952  for (i = 0; i < 32; i++)
953  {
954  if (TableSize & 1)
955  {
956  ASSERT((TableSize & ~1) == 0);
957  if (!(TableSize & ~1)) break;
958  }
959  TableSize >>= 1;
960  }
961 
962  //
963  // For big pages, the default tracker table is 4096 entries, while the
964  // minimum is still 64
965  //
966  if (i == 32)
967  {
968  PoolBigPageTableSize = 4096;
969  }
970  else
971  {
972  PoolBigPageTableSize = max(1 << i, 64);
973  }
974 
975  //
976  // Again, run the exact same loop we ran earlier, but this time for the
977  // big pool tracker instead
978  //
979  while (TRUE)
980  {
982  {
983  PoolBigPageTableSize >>= 1;
984  continue;
985  }
986 
987  PoolBigPageTable = MiAllocatePoolPages(NonPagedPool,
989  sizeof(POOL_TRACKER_BIG_PAGES));
990  if (PoolBigPageTable) break;
991 
992  if (PoolBigPageTableSize == 1)
993  {
994  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
995  TableSize,
996  0xFFFFFFFF,
997  0xFFFFFFFF,
998  0xFFFFFFFF);
999  }
1000 
1001  PoolBigPageTableSize >>= 1;
1002  }
1003 
1004  //
1005  // An extra entry is not needed for for the big pool tracker, so just
1006  // compute the hash and zero it
1007  //
1009  RtlZeroMemory(PoolBigPageTable,
1011  for (i = 0; i < PoolBigPageTableSize; i++) PoolBigPageTable[i].Va = (PVOID)1;
1012 
1013  //
1014  // During development, print this out so we can see what's happening
1015  //
1016  DPRINT("EXPOOL: Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1017  PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1018  DPRINT("EXPOOL: Big Pool Tracker Table at: 0x%p with 0x%lx bytes\n",
1019  PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES));
1020 
1021  //
1022  // Insert the generic tracker for all of big pool
1023  //
1024  ExpInsertPoolTracker('looP',
1025  ROUND_TO_PAGES(PoolBigPageTableSize *
1026  sizeof(POOL_TRACKER_BIG_PAGES)),
1027  NonPagedPool);
1028 
1029  //
1030  // No support for NUMA systems at this time
1031  //
1032  ASSERT(KeNumberNodes == 1);
1033 
1034  //
1035  // Initialize the tag spinlock
1036  //
1038 
1039  //
1040  // Initialize the nonpaged pool descriptor
1041  //
1042  PoolVector[NonPagedPool] = &NonPagedPoolDescriptor;
1044  NonPagedPool,
1045  0,
1046  Threshold,
1047  NULL);
1048  }
1049  else
1050  {
1051  //
1052  // No support for NUMA systems at this time
1053  //
1054  ASSERT(KeNumberNodes == 1);
1055 
1056  //
1057  // Allocate the pool descriptor
1058  //
1059  Descriptor = ExAllocatePoolWithTag(NonPagedPool,
1060  sizeof(KGUARDED_MUTEX) +
1061  sizeof(POOL_DESCRIPTOR),
1062  'looP');
1063  if (!Descriptor)
1064  {
1065  //
1066  // This is really bad...
1067  //
1068  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1069  0,
1070  -1,
1071  -1,
1072  -1);
1073  }
1074 
1075  //
1076  // Setup the vector and guarded mutex for paged pool
1077  //
1078  PoolVector[PagedPool] = Descriptor;
1079  ExpPagedPoolMutex = (PKGUARDED_MUTEX)(Descriptor + 1);
1080  ExpPagedPoolDescriptor[0] = Descriptor;
1081  KeInitializeGuardedMutex(ExpPagedPoolMutex);
1082  ExInitializePoolDescriptor(Descriptor,
1083  PagedPool,
1084  0,
1085  Threshold,
1086  ExpPagedPoolMutex);
1087 
1088  //
1089  // Insert the generic tracker for all of nonpaged pool
1090  //
1091  ExpInsertPoolTracker('looP',
1093  NonPagedPool);
1094  }
1095 }
1096 
1098 KIRQL
1100 {
1101  //
1102  // Check if this is nonpaged pool
1103  //
1104  if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1105  {
1106  //
1107  // Use the queued spin lock
1108  //
1110  }
1111  else
1112  {
1113  //
1114  // Use the guarded mutex
1115  //
1116  KeAcquireGuardedMutex(Descriptor->LockAddress);
1117  return APC_LEVEL;
1118  }
1119 }
1120 
1122 VOID
1124  IN KIRQL OldIrql)
1125 {
1126  //
1127  // Check if this is nonpaged pool
1128  //
1129  if ((Descriptor->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool)
1130  {
1131  //
1132  // Use the queued spin lock
1133  //
1135  }
1136  else
1137  {
1138  //
1139  // Use the guarded mutex
1140  //
1141  KeReleaseGuardedMutex(Descriptor->LockAddress);
1142  }
1143 }
1144 
1145 VOID
1146 NTAPI
1151 {
1152  PPOOL_DPC_CONTEXT Context = DeferredContext;
1155 
1156  //
1157  // Make sure we win the race, and if we did, copy the data atomically
1158  //
1159  if (KeSignalCallDpcSynchronize(SystemArgument2))
1160  {
1161  RtlCopyMemory(Context->PoolTrackTable,
1162  PoolTrackTable,
1163  Context->PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE));
1164 
1165  //
1166  // This is here because ReactOS does not yet support expansion
1167  //
1168  ASSERT(Context->PoolTrackTableSizeExpansion == 0);
1169  }
1170 
1171  //
1172  // Regardless of whether we won or not, we must now synchronize and then
1173  // decrement the barrier since this is one more processor that has completed
1174  // the callback.
1175  //
1176  KeSignalCallDpcSynchronize(SystemArgument2);
1177  KeSignalCallDpcDone(SystemArgument1);
1178 }
1179 
1180 NTSTATUS
1181 NTAPI
1183  IN ULONG SystemInformationLength,
1185 {
1186  ULONG TableSize, CurrentLength;
1187  ULONG EntryCount;
1189  PSYSTEM_POOLTAG TagEntry;
1190  PPOOL_TRACKER_TABLE Buffer, TrackerEntry;
1193 
1194  //
1195  // Keep track of how much data the caller's buffer must hold
1196  //
1197  CurrentLength = FIELD_OFFSET(SYSTEM_POOLTAG_INFORMATION, TagInfo);
1198 
1199  //
1200  // Initialize the caller's buffer
1201  //
1202  TagEntry = &SystemInformation->TagInfo[0];
1203  SystemInformation->Count = 0;
1204 
1205  //
1206  // Capture the number of entries, and the total size needed to make a copy
1207  // of the table
1208  //
1209  EntryCount = (ULONG)PoolTrackTableSize;
1210  TableSize = EntryCount * sizeof(POOL_TRACKER_TABLE);
1211 
1212  //
1213  // Allocate the "Generic DPC" temporary buffer
1214  //
1215  Buffer = ExAllocatePoolWithTag(NonPagedPool, TableSize, 'ofnI');
1216  if (!Buffer) return STATUS_INSUFFICIENT_RESOURCES;
1217 
1218  //
1219  // Do a "Generic DPC" to atomically retrieve the tag and allocation data
1220  //
1221  Context.PoolTrackTable = Buffer;
1223  Context.PoolTrackTableExpansion = NULL;
1224  Context.PoolTrackTableSizeExpansion = 0;
1226 
1227  //
1228  // Now parse the results
1229  //
1230  for (TrackerEntry = Buffer; TrackerEntry < (Buffer + EntryCount); TrackerEntry++)
1231  {
1232  //
1233  // If the entry is empty, skip it
1234  //
1235  if (!TrackerEntry->Key) continue;
1236 
1237  //
1238  // Otherwise, add one more entry to the caller's buffer, and ensure that
1239  // enough space has been allocated in it
1240  //
1241  SystemInformation->Count++;
1242  CurrentLength += sizeof(*TagEntry);
1243  if (SystemInformationLength < CurrentLength)
1244  {
1245  //
1246  // The caller's buffer is too small, so set a failure code. The
1247  // caller will know the count, as well as how much space is needed.
1248  //
1249  // We do NOT break out of the loop, because we want to keep incrementing
1250  // the Count as well as CurrentLength so that the caller can know the
1251  // final numbers
1252  //
1253  Status = STATUS_INFO_LENGTH_MISMATCH;
1254  }
1255  else
1256  {
1257  //
1258  // Small sanity check that our accounting is working correctly
1259  //
1260  ASSERT(TrackerEntry->PagedAllocs >= TrackerEntry->PagedFrees);
1261  ASSERT(TrackerEntry->NonPagedAllocs >= TrackerEntry->NonPagedFrees);
1262 
1263  //
1264  // Return the data into the caller's buffer
1265  //
1266  TagEntry->TagUlong = TrackerEntry->Key;
1267  TagEntry->PagedAllocs = TrackerEntry->PagedAllocs;
1268  TagEntry->PagedFrees = TrackerEntry->PagedFrees;
1269  TagEntry->PagedUsed = TrackerEntry->PagedBytes;
1270  TagEntry->NonPagedAllocs = TrackerEntry->NonPagedAllocs;
1271  TagEntry->NonPagedFrees = TrackerEntry->NonPagedFrees;
1272  TagEntry->NonPagedUsed = TrackerEntry->NonPagedBytes;
1273  TagEntry++;
1274  }
1275  }
1276 
1277  //
1278  // Free the "Generic DPC" temporary buffer, return the buffer length and status
1279  //
1280  ExFreePoolWithTag(Buffer, 'ofnI');
1281  if (ReturnLength) *ReturnLength = CurrentLength;
1282  return Status;
1283 }
1284 
1285 BOOLEAN
1286 NTAPI
1288  IN ULONG Key,
1289  IN ULONG NumberOfPages,
1291 {
1292  ULONG Hash, i = 0;
1293  PVOID OldVa;
1294  KIRQL OldIrql;
1295  SIZE_T TableSize;
1296  PPOOL_TRACKER_BIG_PAGES Entry, EntryEnd, EntryStart;
1298  ASSERT(!(PoolType & SESSION_POOL_MASK));
1299 
1300  //
1301  // As the table is expandable, these values must only be read after acquiring
1302  // the lock to avoid a teared access during an expansion
1303  //
1306  Hash &= PoolBigPageTableHash;
1307  TableSize = PoolBigPageTableSize;
1308 
1309  //
1310  // We loop from the current hash bucket to the end of the table, and then
1311  // rollover to hash bucket 0 and keep going from there. If we return back
1312  // to the beginning, then we attempt expansion at the bottom of the loop
1313  //
1314  EntryStart = Entry = &PoolBigPageTable[Hash];
1315  EntryEnd = &PoolBigPageTable[TableSize];
1316  do
1317  {
1318  //
1319  // Make sure that this is a free entry and attempt to atomically make the
1320  // entry busy now
1321  //
1322  OldVa = Entry->Va;
1323  if (((ULONG_PTR)OldVa & POOL_BIG_TABLE_ENTRY_FREE) &&
1324  (InterlockedCompareExchangePointer(&Entry->Va, Va, OldVa) == OldVa))
1325  {
1326  //
1327  // We now own this entry, write down the size and the pool tag
1328  //
1329  Entry->Key = Key;
1330  Entry->NumberOfPages = NumberOfPages;
1331 
1332  //
1333  // Add one more entry to the count, and see if we're getting within
1334  // 25% of the table size, at which point we'll do an expansion now
1335  // to avoid blocking too hard later on.
1336  //
1337  // Note that we only do this if it's also been the 16th time that we
1338  // keep losing the race or that we are not finding a free entry anymore,
1339  // which implies a massive number of concurrent big pool allocations.
1340  //
1342  if ((i >= 16) && (ExpPoolBigEntriesInUse > (TableSize / 4)))
1343  {
1344  DPRINT("Should attempt expansion since we now have %lu entries\n",
1346  }
1347 
1348  //
1349  // We have our entry, return
1350  //
1352  return TRUE;
1353  }
1354 
1355  //
1356  // We don't have our entry yet, so keep trying, making the entry list
1357  // circular if we reach the last entry. We'll eventually break out of
1358  // the loop once we've rolled over and returned back to our original
1359  // hash bucket
1360  //
1361  i++;
1362  if (++Entry >= EntryEnd) Entry = &PoolBigPageTable[0];
1363  } while (Entry != EntryStart);
1364 
1365  //
1366  // This means there's no free hash buckets whatsoever, so we would now have
1367  // to attempt expanding the table
1368  //
1369  DPRINT1("Big pool expansion needed, not implemented!\n");
1371  return FALSE;
1372 }
1373 
1374 ULONG
1375 NTAPI
1377  OUT PULONG_PTR BigPages,
1379 {
1380  BOOLEAN FirstTry = TRUE;
1381  SIZE_T TableSize;
1382  KIRQL OldIrql;
1383  ULONG PoolTag, Hash;
1386  ASSERT(!(PoolType & SESSION_POOL_MASK));
1387 
1388  //
1389  // As the table is expandable, these values must only be read after acquiring
1390  // the lock to avoid a teared access during an expansion
1391  //
1394  Hash &= PoolBigPageTableHash;
1395  TableSize = PoolBigPageTableSize;
1396 
1397  //
1398  // Loop while trying to find this big page allocation
1399  //
1400  while (PoolBigPageTable[Hash].Va != Va)
1401  {
1402  //
1403  // Increment the size until we go past the end of the table
1404  //
1405  if (++Hash >= TableSize)
1406  {
1407  //
1408  // Is this the second time we've tried?
1409  //
1410  if (!FirstTry)
1411  {
1412  //
1413  // This means it was never inserted into the pool table and it
1414  // received the special "BIG" tag -- return that and return 0
1415  // so that the code can ask Mm for the page count instead
1416  //
1418  *BigPages = 0;
1419  return ' GIB';
1420  }
1421 
1422  //
1423  // The first time this happens, reset the hash index and try again
1424  //
1425  Hash = 0;
1426  FirstTry = FALSE;
1427  }
1428  }
1429 
1430  //
1431  // Now capture all the information we need from the entry, since after we
1432  // release the lock, the data can change
1433  //
1434  Entry = &PoolBigPageTable[Hash];
1435  *BigPages = Entry->NumberOfPages;
1436  PoolTag = Entry->Key;
1437 
1438  //
1439  // Set the free bit, and decrement the number of allocations. Finally, release
1440  // the lock and return the tag that was located
1441  //
1442  InterlockedIncrement((PLONG)&Entry->Va);
1445  return PoolTag;
1446 }
1447 
1448 VOID
1449 NTAPI
1450 ExQueryPoolUsage(OUT PULONG PagedPoolPages,
1451  OUT PULONG NonPagedPoolPages,
1452  OUT PULONG PagedPoolAllocs,
1453  OUT PULONG PagedPoolFrees,
1454  OUT PULONG PagedPoolLookasideHits,
1455  OUT PULONG NonPagedPoolAllocs,
1456  OUT PULONG NonPagedPoolFrees,
1457  OUT PULONG NonPagedPoolLookasideHits)
1458 {
1459  ULONG i;
1460  PPOOL_DESCRIPTOR PoolDesc;
1461 
1462  //
1463  // Assume all failures
1464  //
1465  *PagedPoolPages = 0;
1466  *PagedPoolAllocs = 0;
1467  *PagedPoolFrees = 0;
1468 
1469  //
1470  // Tally up the totals for all the apged pool
1471  //
1472  for (i = 0; i < ExpNumberOfPagedPools + 1; i++)
1473  {
1474  PoolDesc = ExpPagedPoolDescriptor[i];
1475  *PagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1476  *PagedPoolAllocs += PoolDesc->RunningAllocs;
1477  *PagedPoolFrees += PoolDesc->RunningDeAllocs;
1478  }
1479 
1480  //
1481  // The first non-paged pool has a hardcoded well-known descriptor name
1482  //
1483  PoolDesc = &NonPagedPoolDescriptor;
1484  *NonPagedPoolPages = PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1485  *NonPagedPoolAllocs = PoolDesc->RunningAllocs;
1486  *NonPagedPoolFrees = PoolDesc->RunningDeAllocs;
1487 
1488  //
1489  // If the system has more than one non-paged pool, copy the other descriptor
1490  // totals as well
1491  //
1492 #if 0
1493  if (ExpNumberOfNonPagedPools > 1)
1494  {
1495  for (i = 0; i < ExpNumberOfNonPagedPools; i++)
1496  {
1497  PoolDesc = ExpNonPagedPoolDescriptor[i];
1498  *NonPagedPoolPages += PoolDesc->TotalPages + PoolDesc->TotalBigPages;
1499  *NonPagedPoolAllocs += PoolDesc->RunningAllocs;
1500  *NonPagedPoolFrees += PoolDesc->RunningDeAllocs;
1501  }
1502  }
1503 #endif
1504 
1505  //
1506  // FIXME: Not yet supported
1507  //
1508  *NonPagedPoolLookasideHits += 0;
1509  *PagedPoolLookasideHits += 0;
1510 }
1511 
1512 VOID
1513 NTAPI
1515 {
1518  USHORT BlockSize;
1520 
1523  {
1524  return;
1525  }
1526 
1527  Entry = P;
1528  Entry--;
1529  ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
1530 
1531  PoolType = Entry->PoolType - 1;
1532  BlockSize = Entry->BlockSize;
1533 
1534  if (PoolType & QUOTA_POOL_MASK)
1535  {
1536  Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
1537  ASSERT(Process != NULL);
1538  if (Process)
1539  {
1540  if (Process->Pcb.Header.Type != ProcessObject)
1541  {
1542  DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
1543  Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
1544  KeBugCheckEx(BAD_POOL_CALLER,
1545  0x0D,
1546  (ULONG_PTR)P,
1547  Entry->PoolTag,
1548  (ULONG_PTR)Process);
1549  }
1550  ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
1551  PsReturnPoolQuota(Process,
1552  PoolType & BASE_POOL_TYPE_MASK,
1553  BlockSize * POOL_BLOCK_SIZE);
1554  ObDereferenceObject(Process);
1555  }
1556  }
1557 }
1558 
1559 /* PUBLIC FUNCTIONS ***********************************************************/
1560 
1561 /*
1562  * @implemented
1563  */
1564 PVOID
1565 NTAPI
1568  IN ULONG Tag)
1569 {
1570  PPOOL_DESCRIPTOR PoolDesc;
1571  PLIST_ENTRY ListHead;
1572  PPOOL_HEADER Entry, NextEntry, FragmentEntry;
1573  KIRQL OldIrql;
1574  USHORT BlockSize, i;
1575  ULONG OriginalType;
1576  PKPRCB Prcb = KeGetCurrentPrcb();
1578 
1579  //
1580  // Some sanity checks
1581  //
1582  ASSERT(Tag != 0);
1583  ASSERT(Tag != ' GIB');
1584  ASSERT(NumberOfBytes != 0);
1585  ExpCheckPoolIrqlLevel(PoolType, NumberOfBytes, NULL);
1586 
1587  //
1588  // Not supported in ReactOS
1589  //
1590  ASSERT(!(PoolType & SESSION_POOL_MASK));
1591 
1592  //
1593  // Check if verifier or special pool is enabled
1594  //
1596  {
1597  //
1598  // For verifier, we should call the verification routine
1599  //
1601  {
1602  DPRINT1("Driver Verifier is not yet supported\n");
1603  }
1604 
1605  //
1606  // For special pool, we check if this is a suitable allocation and do
1607  // the special allocation if needed
1608  //
1610  {
1611  //
1612  // Check if this is a special pool allocation
1613  //
1614  if (MmUseSpecialPool(NumberOfBytes, Tag))
1615  {
1616  //
1617  // Try to allocate using special pool
1618  //
1619  Entry = MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, 2);
1620  if (Entry) return Entry;
1621  }
1622  }
1623  }
1624 
1625  //
1626  // Get the pool type and its corresponding vector for this request
1627  //
1628  OriginalType = PoolType;
1629  PoolType = PoolType & BASE_POOL_TYPE_MASK;
1630  PoolDesc = PoolVector[PoolType];
1631  ASSERT(PoolDesc != NULL);
1632 
1633  //
1634  // Check if this is a big page allocation
1635  //
1636  if (NumberOfBytes > POOL_MAX_ALLOC)
1637  {
1638  //
1639  // Allocate pages for it
1640  //
1641  Entry = MiAllocatePoolPages(OriginalType, NumberOfBytes);
1642  if (!Entry)
1643  {
1644  //
1645  // Must succeed pool is deprecated, but still supported. These allocation
1646  // failures must cause an immediate bugcheck
1647  //
1648  if (OriginalType & MUST_SUCCEED_POOL_MASK)
1649  {
1650  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1651  NumberOfBytes,
1652  NonPagedPoolDescriptor.TotalPages,
1653  NonPagedPoolDescriptor.TotalBigPages,
1654  0);
1655  }
1656 
1657  //
1658  // Internal debugging
1659  //
1660  ExPoolFailures++;
1661 
1662  //
1663  // This flag requests printing failures, and can also further specify
1664  // breaking on failures
1665  //
1667  {
1668  DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1669  NumberOfBytes,
1670  OriginalType);
1672  }
1673 
1674  //
1675  // Finally, this flag requests an exception, which we are more than
1676  // happy to raise!
1677  //
1678  if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
1679  {
1681  }
1682 
1683  return NULL;
1684  }
1685 
1686  //
1687  // Increment required counters
1688  //
1690  (LONG)BYTES_TO_PAGES(NumberOfBytes));
1691  InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, NumberOfBytes);
1693 
1694  //
1695  // Add a tag for the big page allocation and switch to the generic "BIG"
1696  // tag if we failed to do so, then insert a tracker for this alloation.
1697  //
1698  if (!ExpAddTagForBigPages(Entry,
1699  Tag,
1700  (ULONG)BYTES_TO_PAGES(NumberOfBytes),
1701  OriginalType))
1702  {
1703  Tag = ' GIB';
1704  }
1705  ExpInsertPoolTracker(Tag, ROUND_TO_PAGES(NumberOfBytes), OriginalType);
1706  return Entry;
1707  }
1708 
1709  //
1710  // Should never request 0 bytes from the pool, but since so many drivers do
1711  // it, we'll just assume they want 1 byte, based on NT's similar behavior
1712  //
1713  if (!NumberOfBytes) NumberOfBytes = 1;
1714 
1715  //
1716  // A pool allocation is defined by its data, a linked list to connect it to
1717  // the free list (if necessary), and a pool header to store accounting info.
1718  // Calculate this size, then convert it into a block size (units of pool
1719  // headers)
1720  //
1721  // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
1722  // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
1723  // the direct allocation of pages.
1724  //
1725  i = (USHORT)((NumberOfBytes + sizeof(POOL_HEADER) + (POOL_BLOCK_SIZE - 1))
1726  / POOL_BLOCK_SIZE);
1728 
1729  //
1730  // Handle lookaside list optimization for both paged and nonpaged pool
1731  //
1732  if (i <= NUMBER_POOL_LOOKASIDE_LISTS)
1733  {
1734  //
1735  // Try popping it from the per-CPU lookaside list
1736  //
1737  LookasideList = (PoolType == PagedPool) ?
1738  Prcb->PPPagedLookasideList[i - 1].P :
1739  Prcb->PPNPagedLookasideList[i - 1].P;
1740  LookasideList->TotalAllocates++;
1741  Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1742  if (!Entry)
1743  {
1744  //
1745  // We failed, try popping it from the global list
1746  //
1747  LookasideList = (PoolType == PagedPool) ?
1748  Prcb->PPPagedLookasideList[i - 1].L :
1749  Prcb->PPNPagedLookasideList[i - 1].L;
1750  LookasideList->TotalAllocates++;
1751  Entry = (PPOOL_HEADER)InterlockedPopEntrySList(&LookasideList->ListHead);
1752  }
1753 
1754  //
1755  // If we were able to pop it, update the accounting and return the block
1756  //
1757  if (Entry)
1758  {
1759  LookasideList->AllocateHits++;
1760 
1761  //
1762  // Get the real entry, write down its pool type, and track it
1763  //
1764  Entry--;
1765  Entry->PoolType = OriginalType + 1;
1767  Entry->BlockSize * POOL_BLOCK_SIZE,
1768  OriginalType);
1769 
1770  //
1771  // Return the pool allocation
1772  //
1773  Entry->PoolTag = Tag;
1774  (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1775  (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1776  return POOL_FREE_BLOCK(Entry);
1777  }
1778  }
1779 
1780  //
1781  // Loop in the free lists looking for a block if this size. Start with the
1782  // list optimized for this kind of size lookup
1783  //
1784  ListHead = &PoolDesc->ListHeads[i];
1785  do
1786  {
1787  //
1788  // Are there any free entries available on this list?
1789  //
1790  if (!ExpIsPoolListEmpty(ListHead))
1791  {
1792  //
1793  // Acquire the pool lock now
1794  //
1795  OldIrql = ExLockPool(PoolDesc);
1796 
1797  //
1798  // And make sure the list still has entries
1799  //
1800  if (ExpIsPoolListEmpty(ListHead))
1801  {
1802  //
1803  // Someone raced us (and won) before we had a chance to acquire
1804  // the lock.
1805  //
1806  // Try again!
1807  //
1808  ExUnlockPool(PoolDesc, OldIrql);
1809  continue;
1810  }
1811 
1812  //
1813  // Remove a free entry from the list
1814  // Note that due to the way we insert free blocks into multiple lists
1815  // there is a guarantee that any block on this list will either be
1816  // of the correct size, or perhaps larger.
1817  //
1818  ExpCheckPoolLinks(ListHead);
1819  Entry = POOL_ENTRY(ExpRemovePoolHeadList(ListHead));
1820  ExpCheckPoolLinks(ListHead);
1821  ExpCheckPoolBlocks(Entry);
1822  ASSERT(Entry->BlockSize >= i);
1823  ASSERT(Entry->PoolType == 0);
1824 
1825  //
1826  // Check if this block is larger that what we need. The block could
1827  // not possibly be smaller, due to the reason explained above (and
1828  // we would've asserted on a checked build if this was the case).
1829  //
1830  if (Entry->BlockSize != i)
1831  {
1832  //
1833  // Is there an entry before this one?
1834  //
1835  if (Entry->PreviousSize == 0)
1836  {
1837  //
1838  // There isn't anyone before us, so take the next block and
1839  // turn it into a fragment that contains the leftover data
1840  // that we don't need to satisfy the caller's request
1841  //
1842  FragmentEntry = POOL_BLOCK(Entry, i);
1843  FragmentEntry->BlockSize = Entry->BlockSize - i;
1844 
1845  //
1846  // And make it point back to us
1847  //
1848  FragmentEntry->PreviousSize = i;
1849 
1850  //
1851  // Now get the block that follows the new fragment and check
1852  // if it's still on the same page as us (and not at the end)
1853  //
1854  NextEntry = POOL_NEXT_BLOCK(FragmentEntry);
1855  if (PAGE_ALIGN(NextEntry) != NextEntry)
1856  {
1857  //
1858  // Adjust this next block to point to our newly created
1859  // fragment block
1860  //
1861  NextEntry->PreviousSize = FragmentEntry->BlockSize;
1862  }
1863  }
1864  else
1865  {
1866  //
1867  // There is a free entry before us, which we know is smaller
1868  // so we'll make this entry the fragment instead
1869  //
1870  FragmentEntry = Entry;
1871 
1872  //
1873  // And then we'll remove from it the actual size required.
1874  // Now the entry is a leftover free fragment
1875  //
1876  Entry->BlockSize -= i;
1877 
1878  //
1879  // Now let's go to the next entry after the fragment (which
1880  // used to point to our original free entry) and make it
1881  // reference the new fragment entry instead.
1882  //
1883  // This is the entry that will actually end up holding the
1884  // allocation!
1885  //
1886  Entry = POOL_NEXT_BLOCK(Entry);
1887  Entry->PreviousSize = FragmentEntry->BlockSize;
1888 
1889  //
1890  // And now let's go to the entry after that one and check if
1891  // it's still on the same page, and not at the end
1892  //
1893  NextEntry = POOL_BLOCK(Entry, i);
1894  if (PAGE_ALIGN(NextEntry) != NextEntry)
1895  {
1896  //
1897  // Make it reference the allocation entry
1898  //
1899  NextEntry->PreviousSize = i;
1900  }
1901  }
1902 
1903  //
1904  // Now our (allocation) entry is the right size
1905  //
1906  Entry->BlockSize = i;
1907 
1908  //
1909  // And the next entry is now the free fragment which contains
1910  // the remaining difference between how big the original entry
1911  // was, and the actual size the caller needs/requested.
1912  //
1913  FragmentEntry->PoolType = 0;
1914  BlockSize = FragmentEntry->BlockSize;
1915 
1916  //
1917  // Now check if enough free bytes remained for us to have a
1918  // "full" entry, which contains enough bytes for a linked list
1919  // and thus can be used for allocations (up to 8 bytes...)
1920  //
1921  ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
1922  if (BlockSize != 1)
1923  {
1924  //
1925  // Insert the free entry into the free list for this size
1926  //
1927  ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
1928  POOL_FREE_BLOCK(FragmentEntry));
1929  ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
1930  }
1931  }
1932 
1933  //
1934  // We have found an entry for this allocation, so set the pool type
1935  // and release the lock since we're done
1936  //
1937  Entry->PoolType = OriginalType + 1;
1938  ExpCheckPoolBlocks(Entry);
1939  ExUnlockPool(PoolDesc, OldIrql);
1940 
1941  //
1942  // Increment required counters
1943  //
1946 
1947  //
1948  // Track this allocation
1949  //
1951  Entry->BlockSize * POOL_BLOCK_SIZE,
1952  OriginalType);
1953 
1954  //
1955  // Return the pool allocation
1956  //
1957  Entry->PoolTag = Tag;
1958  (POOL_FREE_BLOCK(Entry))->Flink = NULL;
1959  (POOL_FREE_BLOCK(Entry))->Blink = NULL;
1960  return POOL_FREE_BLOCK(Entry);
1961  }
1962  } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
1963 
1964  //
1965  // There were no free entries left, so we have to allocate a new fresh page
1966  //
1967  Entry = MiAllocatePoolPages(OriginalType, PAGE_SIZE);
1968  if (!Entry)
1969  {
1970  //
1971  // Must succeed pool is deprecated, but still supported. These allocation
1972  // failures must cause an immediate bugcheck
1973  //
1974  if (OriginalType & MUST_SUCCEED_POOL_MASK)
1975  {
1976  KeBugCheckEx(MUST_SUCCEED_POOL_EMPTY,
1977  PAGE_SIZE,
1978  NonPagedPoolDescriptor.TotalPages,
1979  NonPagedPoolDescriptor.TotalBigPages,
1980  0);
1981  }
1982 
1983  //
1984  // Internal debugging
1985  //
1986  ExPoolFailures++;
1987 
1988  //
1989  // This flag requests printing failures, and can also further specify
1990  // breaking on failures
1991  //
1993  {
1994  DPRINT1("EX: ExAllocatePool (%lu, 0x%x) returning NULL\n",
1995  NumberOfBytes,
1996  OriginalType);
1998  }
1999 
2000  //
2001  // Finally, this flag requests an exception, which we are more than
2002  // happy to raise!
2003  //
2004  if (OriginalType & POOL_RAISE_IF_ALLOCATION_FAILURE)
2005  {
2007  }
2008 
2009  //
2010  // Return NULL to the caller in all other cases
2011  //
2012  return NULL;
2013  }
2014 
2015  //
2016  // Setup the entry data
2017  //
2018  Entry->Ulong1 = 0;
2019  Entry->BlockSize = i;
2020  Entry->PoolType = OriginalType + 1;
2021 
2022  //
2023  // This page will have two entries -- one for the allocation (which we just
2024  // created above), and one for the remaining free bytes, which we're about
2025  // to create now. The free bytes are the whole page minus what was allocated
2026  // and then converted into units of block headers.
2027  //
2028  BlockSize = (PAGE_SIZE / POOL_BLOCK_SIZE) - i;
2029  FragmentEntry = POOL_BLOCK(Entry, i);
2030  FragmentEntry->Ulong1 = 0;
2031  FragmentEntry->BlockSize = BlockSize;
2032  FragmentEntry->PreviousSize = i;
2033 
2034  //
2035  // Increment required counters
2036  //
2037  InterlockedIncrement((PLONG)&PoolDesc->TotalPages);
2039 
2040  //
2041  // Now check if enough free bytes remained for us to have a "full" entry,
2042  // which contains enough bytes for a linked list and thus can be used for
2043  // allocations (up to 8 bytes...)
2044  //
2045  if (FragmentEntry->BlockSize != 1)
2046  {
2047  //
2048  // Excellent -- acquire the pool lock
2049  //
2050  OldIrql = ExLockPool(PoolDesc);
2051 
2052  //
2053  // And insert the free entry into the free list for this block size
2054  //
2055  ExpCheckPoolLinks(&PoolDesc->ListHeads[BlockSize - 1]);
2056  ExpInsertPoolTailList(&PoolDesc->ListHeads[BlockSize - 1],
2057  POOL_FREE_BLOCK(FragmentEntry));
2058  ExpCheckPoolLinks(POOL_FREE_BLOCK(FragmentEntry));
2059 
2060  //
2061  // Release the pool lock
2062  //
2063  ExpCheckPoolBlocks(Entry);
2064  ExUnlockPool(PoolDesc, OldIrql);
2065  }
2066  else
2067  {
2068  //
2069  // Simply do a sanity check
2070  //
2071  ExpCheckPoolBlocks(Entry);
2072  }
2073 
2074  //
2075  // Increment performance counters and track this allocation
2076  //
2079  Entry->BlockSize * POOL_BLOCK_SIZE,
2080  OriginalType);
2081 
2082  //
2083  // And return the pool allocation
2084  //
2085  ExpCheckPoolBlocks(Entry);
2086  Entry->PoolTag = Tag;
2087  return POOL_FREE_BLOCK(Entry);
2088 }
2089 
2090 /*
2091  * @implemented
2092  */
2093 PVOID
2094 NTAPI
2097 {
2098  ULONG Tag = TAG_NONE;
2099 #if 0 && DBG
2100  PLDR_DATA_TABLE_ENTRY LdrEntry;
2101 
2102  /* Use the first four letters of the driver name, or "None" if unavailable */
2103  LdrEntry = KeGetCurrentIrql() <= APC_LEVEL
2105  : NULL;
2106  if (LdrEntry)
2107  {
2108  ULONG i;
2109  Tag = 0;
2110  for (i = 0; i < min(4, LdrEntry->BaseDllName.Length / sizeof(WCHAR)); i++)
2111  Tag = Tag >> 8 | (LdrEntry->BaseDllName.Buffer[i] & 0xff) << 24;
2112  for (; i < 4; i++)
2113  Tag = Tag >> 8 | ' ' << 24;
2114  }
2115 #endif
2116  return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2117 }
2118 
2119 /*
2120  * @implemented
2121  */
2122 VOID
2123 NTAPI
2125  IN ULONG TagToFree)
2126 {
2127  PPOOL_HEADER Entry, NextEntry;
2128  USHORT BlockSize;
2129  KIRQL OldIrql;
2131  PPOOL_DESCRIPTOR PoolDesc;
2132  ULONG Tag;
2133  BOOLEAN Combined = FALSE;
2134  PFN_NUMBER PageCount, RealPageCount;
2135  PKPRCB Prcb = KeGetCurrentPrcb();
2138 
2139  //
2140  // Check if any of the debug flags are enabled
2141  //
2148  {
2149  //
2150  // Check if special pool is enabled
2151  //
2153  {
2154  //
2155  // Check if it was allocated from a special pool
2156  //
2157  if (MmIsSpecialPoolAddress(P))
2158  {
2159  //
2160  // Was deadlock verification also enabled? We can do some extra
2161  // checks at this point
2162  //
2164  {
2165  DPRINT1("Verifier not yet supported\n");
2166  }
2167 
2168  //
2169  // It is, so handle it via special pool free routine
2170  //
2171  MmFreeSpecialPool(P);
2172  return;
2173  }
2174  }
2175 
2176  //
2177  // For non-big page allocations, we'll do a bunch of checks in here
2178  //
2179  if (PAGE_ALIGN(P) != P)
2180  {
2181  //
2182  // Get the entry for this pool allocation
2183  // The pointer math here may look wrong or confusing, but it is quite right
2184  //
2185  Entry = P;
2186  Entry--;
2187 
2188  //
2189  // Get the pool type
2190  //
2191  PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2192 
2193  //
2194  // FIXME: Many other debugging checks go here
2195  //
2196  ExpCheckPoolIrqlLevel(PoolType, 0, P);
2197  }
2198  }
2199 
2200  //
2201  // Check if this is a big page allocation
2202  //
2203  if (PAGE_ALIGN(P) == P)
2204  {
2205  //
2206  // We need to find the tag for it, so first we need to find out what
2207  // kind of allocation this was (paged or nonpaged), then we can go
2208  // ahead and try finding the tag for it. Remember to get rid of the
2209  // PROTECTED_POOL tag if it's found.
2210  //
2211  // Note that if at insertion time, we failed to add the tag for a big
2212  // pool allocation, we used a special tag called 'BIG' to identify the
2213  // allocation, and we may get this tag back. In this scenario, we must
2214  // manually get the size of the allocation by actually counting through
2215  // the PFN database.
2216  //
2217  PoolType = MmDeterminePoolType(P);
2218  ExpCheckPoolIrqlLevel(PoolType, 0, P);
2219  Tag = ExpFindAndRemoveTagBigPages(P, &PageCount, PoolType);
2220  if (!Tag)
2221  {
2222  DPRINT1("We do not know the size of this allocation. This is not yet supported\n");
2223  ASSERT(Tag == ' GIB');
2224  PageCount = 1; // We are going to lie! This might screw up accounting?
2225  }
2226  else if (Tag & PROTECTED_POOL)
2227  {
2228  Tag &= ~PROTECTED_POOL;
2229  }
2230 
2231  //
2232  // Check block tag
2233  //
2234  if (TagToFree && TagToFree != Tag)
2235  {
2236  DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2237  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2238  }
2239 
2240  //
2241  // We have our tag and our page count, so we can go ahead and remove this
2242  // tracker now
2243  //
2244  ExpRemovePoolTracker(Tag, PageCount << PAGE_SHIFT, PoolType);
2245 
2246  //
2247  // Check if any of the debug flags are enabled
2248  //
2253  {
2254  //
2255  // Was deadlock verification also enabled? We can do some extra
2256  // checks at this point
2257  //
2259  {
2260  DPRINT1("Verifier not yet supported\n");
2261  }
2262 
2263  //
2264  // FIXME: Many debugging checks go here
2265  //
2266  }
2267 
2268  //
2269  // Update counters
2270  //
2271  PoolDesc = PoolVector[PoolType];
2274  -(LONG_PTR)(PageCount << PAGE_SHIFT));
2275 
2276  //
2277  // Do the real free now and update the last counter with the big page count
2278  //
2279  RealPageCount = MiFreePoolPages(P);
2280  ASSERT(RealPageCount == PageCount);
2282  -(LONG)RealPageCount);
2283  return;
2284  }
2285 
2286  //
2287  // Get the entry for this pool allocation
2288  // The pointer math here may look wrong or confusing, but it is quite right
2289  //
2290  Entry = P;
2291  Entry--;
2292  ASSERT((ULONG_PTR)Entry % POOL_BLOCK_SIZE == 0);
2293 
2294  //
2295  // Get the size of the entry, and it's pool type, then load the descriptor
2296  // for this pool type
2297  //
2298  BlockSize = Entry->BlockSize;
2299  PoolType = (Entry->PoolType - 1) & BASE_POOL_TYPE_MASK;
2300  PoolDesc = PoolVector[PoolType];
2301 
2302  //
2303  // Make sure that the IRQL makes sense
2304  //
2305  ExpCheckPoolIrqlLevel(PoolType, 0, P);
2306 
2307  //
2308  // Get the pool tag and get rid of the PROTECTED_POOL flag
2309  //
2310  Tag = Entry->PoolTag;
2311  if (Tag & PROTECTED_POOL) Tag &= ~PROTECTED_POOL;
2312 
2313  //
2314  // Check block tag
2315  //
2316  if (TagToFree && TagToFree != Tag)
2317  {
2318  DPRINT1("Freeing pool - invalid tag specified: %.4s != %.4s\n", (char*)&TagToFree, (char*)&Tag);
2319  KeBugCheckEx(BAD_POOL_CALLER, 0x0A, (ULONG_PTR)P, Tag, TagToFree);
2320  }
2321 
2322  //
2323  // Track the removal of this allocation
2324  //
2326  BlockSize * POOL_BLOCK_SIZE,
2327  Entry->PoolType - 1);
2328 
2329  //
2330  // Release pool quota, if any
2331  //
2332  if ((Entry->PoolType - 1) & QUOTA_POOL_MASK)
2333  {
2334  Process = ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1];
2335  if (Process)
2336  {
2337  if (Process->Pcb.Header.Type != ProcessObject)
2338  {
2339  DPRINT1("Object %p is not a process. Type %u, pool type 0x%x, block size %u\n",
2340  Process, Process->Pcb.Header.Type, Entry->PoolType, BlockSize);
2341  KeBugCheckEx(BAD_POOL_CALLER,
2342  0x0D,
2343  (ULONG_PTR)P,
2344  Tag,
2345  (ULONG_PTR)Process);
2346  }
2347  PsReturnPoolQuota(Process, PoolType, BlockSize * POOL_BLOCK_SIZE);
2348  ObDereferenceObject(Process);
2349  }
2350  }
2351 
2352  //
2353  // Is this allocation small enough to have come from a lookaside list?
2354  //
2355  if (BlockSize <= NUMBER_POOL_LOOKASIDE_LISTS)
2356  {
2357  //
2358  // Try pushing it into the per-CPU lookaside list
2359  //
2360  LookasideList = (PoolType == PagedPool) ?
2361  Prcb->PPPagedLookasideList[BlockSize - 1].P :
2362  Prcb->PPNPagedLookasideList[BlockSize - 1].P;
2363  LookasideList->TotalFrees++;
2364  if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2365  {
2366  LookasideList->FreeHits++;
2367  InterlockedPushEntrySList(&LookasideList->ListHead, P);
2368  return;
2369  }
2370 
2371  //
2372  // We failed, try to push it into the global lookaside list
2373  //
2374  LookasideList = (PoolType == PagedPool) ?
2375  Prcb->PPPagedLookasideList[BlockSize - 1].L :
2376  Prcb->PPNPagedLookasideList[BlockSize - 1].L;
2377  LookasideList->TotalFrees++;
2378  if (ExQueryDepthSList(&LookasideList->ListHead) < LookasideList->Depth)
2379  {
2380  LookasideList->FreeHits++;
2381  InterlockedPushEntrySList(&LookasideList->ListHead, P);
2382  return;
2383  }
2384  }
2385 
2386  //
2387  // Get the pointer to the next entry
2388  //
2389  NextEntry = POOL_BLOCK(Entry, BlockSize);
2390 
2391  //
2392  // Update performance counters
2393  //
2395  InterlockedExchangeAddSizeT(&PoolDesc->TotalBytes, -BlockSize * POOL_BLOCK_SIZE);
2396 
2397  //
2398  // Acquire the pool lock
2399  //
2400  OldIrql = ExLockPool(PoolDesc);
2401 
2402  //
2403  // Check if the next allocation is at the end of the page
2404  //
2405  ExpCheckPoolBlocks(Entry);
2406  if (PAGE_ALIGN(NextEntry) != NextEntry)
2407  {
2408  //
2409  // We may be able to combine the block if it's free
2410  //
2411  if (NextEntry->PoolType == 0)
2412  {
2413  //
2414  // The next block is free, so we'll do a combine
2415  //
2416  Combined = TRUE;
2417 
2418  //
2419  // Make sure there's actual data in the block -- anything smaller
2420  // than this means we only have the header, so there's no linked list
2421  // for us to remove
2422  //
2423  if ((NextEntry->BlockSize != 1))
2424  {
2425  //
2426  // The block is at least big enough to have a linked list, so go
2427  // ahead and remove it
2428  //
2429  ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2431  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2432  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2433  }
2434 
2435  //
2436  // Our entry is now combined with the next entry
2437  //
2438  Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
2439  }
2440  }
2441 
2442  //
2443  // Now check if there was a previous entry on the same page as us
2444  //
2445  if (Entry->PreviousSize)
2446  {
2447  //
2448  // Great, grab that entry and check if it's free
2449  //
2450  NextEntry = POOL_PREV_BLOCK(Entry);
2451  if (NextEntry->PoolType == 0)
2452  {
2453  //
2454  // It is, so we can do a combine
2455  //
2456  Combined = TRUE;
2457 
2458  //
2459  // Make sure there's actual data in the block -- anything smaller
2460  // than this means we only have the header so there's no linked list
2461  // for us to remove
2462  //
2463  if ((NextEntry->BlockSize != 1))
2464  {
2465  //
2466  // The block is at least big enough to have a linked list, so go
2467  // ahead and remove it
2468  //
2469  ExpCheckPoolLinks(POOL_FREE_BLOCK(NextEntry));
2471  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Flink));
2472  ExpCheckPoolLinks(ExpDecodePoolLink((POOL_FREE_BLOCK(NextEntry))->Blink));
2473  }
2474 
2475  //
2476  // Combine our original block (which might've already been combined
2477  // with the next block), into the previous block
2478  //
2479  NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
2480 
2481  //
2482  // And now we'll work with the previous block instead
2483  //
2484  Entry = NextEntry;
2485  }
2486  }
2487 
2488  //
2489  // By now, it may have been possible for our combined blocks to actually
2490  // have made up a full page (if there were only 2-3 allocations on the
2491  // page, they could've all been combined).
2492  //
2493  if ((PAGE_ALIGN(Entry) == Entry) &&
2494  (PAGE_ALIGN(POOL_NEXT_BLOCK(Entry)) == POOL_NEXT_BLOCK(Entry)))
2495  {
2496  //
2497  // In this case, release the pool lock, update the performance counter,
2498  // and free the page
2499  //
2500  ExUnlockPool(PoolDesc, OldIrql);
2501  InterlockedExchangeAdd((PLONG)&PoolDesc->TotalPages, -1);
2502  MiFreePoolPages(Entry);
2503  return;
2504  }
2505 
2506  //
2507  // Otherwise, we now have a free block (or a combination of 2 or 3)
2508  //
2509  Entry->PoolType = 0;
2510  BlockSize = Entry->BlockSize;
2511  ASSERT(BlockSize != 1);
2512 
2513  //
2514  // Check if we actually did combine it with anyone
2515  //
2516  if (Combined)
2517  {
2518  //
2519  // Get the first combined block (either our original to begin with, or
2520  // the one after the original, depending if we combined with the previous)
2521  //
2522  NextEntry = POOL_NEXT_BLOCK(Entry);
2523 
2524  //
2525  // As long as the next block isn't on a page boundary, have it point
2526  // back to us
2527  //
2528  if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
2529  }
2530 
2531  //
2532  // Insert this new free block, and release the pool lock
2533  //
2534  ExpInsertPoolHeadList(&PoolDesc->ListHeads[BlockSize - 1], POOL_FREE_BLOCK(Entry));
2536  ExUnlockPool(PoolDesc, OldIrql);
2537 }
2538 
2539 /*
2540  * @implemented
2541  */
2542 VOID
2543 NTAPI
2545 {
2546  //
2547  // Just free without checking for the tag
2548  //
2549  ExFreePoolWithTag(P, 0);
2550 }
2551 
2552 /*
2553  * @unimplemented
2554  */
2555 SIZE_T
2556 NTAPI
2559 {
2560  //
2561  // Not implemented
2562  //
2563  UNIMPLEMENTED;
2564  return FALSE;
2565 }
2566 
2567 /*
2568  * @implemented
2569  */
2570 
2571 PVOID
2572 NTAPI
2575 {
2576  //
2577  // Allocate the pool
2578  //
2579  return ExAllocatePoolWithQuotaTag(PoolType, NumberOfBytes, TAG_NONE);
2580 }
2581 
2582 /*
2583  * @implemented
2584  */
2585 PVOID
2586 NTAPI
2589  IN ULONG Tag,
2591 {
2592  //
2593  // Allocate the pool
2594  //
2595  UNIMPLEMENTED;
2596  return ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2597 }
2598 
2599 /*
2600  * @implemented
2601  */
2602 PVOID
2603 NTAPI
2606  IN ULONG Tag)
2607 {
2608  BOOLEAN Raise = TRUE;
2609  PVOID Buffer;
2611  NTSTATUS Status;
2613 
2614  //
2615  // Check if we should fail instead of raising an exception
2616  //
2617  if (PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE)
2618  {
2619  Raise = FALSE;
2620  PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE;
2621  }
2622 
2623  //
2624  // Inject the pool quota mask
2625  //
2626  PoolType += QUOTA_POOL_MASK;
2627 
2628  //
2629  // Check if we have enough space to add the quota owner process, as long as
2630  // this isn't the system process, which never gets charged quota
2631  //
2632  ASSERT(NumberOfBytes != 0);
2633  if ((NumberOfBytes <= (PAGE_SIZE - POOL_BLOCK_SIZE - sizeof(PVOID))) &&
2634  (Process != PsInitialSystemProcess))
2635  {
2636  //
2637  // Add space for our EPROCESS pointer
2638  //
2639  NumberOfBytes += sizeof(PEPROCESS);
2640  }
2641  else
2642  {
2643  //
2644  // We won't be able to store the pointer, so don't use quota for this
2645  //
2646  PoolType -= QUOTA_POOL_MASK;
2647  }
2648 
2649  //
2650  // Allocate the pool buffer now
2651  //
2652  Buffer = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag);
2653 
2654  //
2655  // If the buffer is page-aligned, this is a large page allocation and we
2656  // won't touch it
2657  //
2658  if (PAGE_ALIGN(Buffer) != Buffer)
2659  {
2660  //
2661  // Also if special pool is enabled, and this was allocated from there,
2662  // we won't touch it either
2663  //
2665  (MmIsSpecialPoolAddress(Buffer)))
2666  {
2667  return Buffer;
2668  }
2669 
2670  //
2671  // If it wasn't actually allocated with quota charges, ignore it too
2672  //
2673  if (!(PoolType & QUOTA_POOL_MASK)) return Buffer;
2674 
2675  //
2676  // If this is the system process, we don't charge quota, so ignore
2677  //
2678  if (Process == PsInitialSystemProcess) return Buffer;
2679 
2680  //
2681  // Actually go and charge quota for the process now
2682  //
2683  Entry = POOL_ENTRY(Buffer);
2684  Status = PsChargeProcessPoolQuota(Process,
2685  PoolType & BASE_POOL_TYPE_MASK,
2686  Entry->BlockSize * POOL_BLOCK_SIZE);
2687  if (!NT_SUCCESS(Status))
2688  {
2689  //
2690  // Quota failed, back out the allocation, clear the owner, and fail
2691  //
2692  ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = NULL;
2693  ExFreePoolWithTag(Buffer, Tag);
2694  if (Raise) RtlRaiseStatus(Status);
2695  return NULL;
2696  }
2697 
2698  //
2699  // Quota worked, write the owner and then reference it before returning
2700  //
2701  ((PVOID *)POOL_NEXT_BLOCK(Entry))[-1] = Process;
2702  ObReferenceObject(Process);
2703  }
2704  else if (!(Buffer) && (Raise))
2705  {
2706  //
2707  // The allocation failed, raise an error if we are in raise mode
2708  //
2710  }
2711 
2712  //
2713  // Return the allocated buffer
2714  //
2715  return Buffer;
2716 }
2717 
2718 #if DBG && defined(KDBG)
2719 
2720 BOOLEAN
2722  ULONG Argc,
2723  PCHAR Argv[])
2724 {
2725  ULONG_PTR Address = 0, Flags = 0;
2726  PVOID PoolPage;
2728  BOOLEAN ThisOne;
2729  PULONG Data;
2730 
2731  if (Argc > 1)
2732  {
2733  /* Get address */
2734  if (!KdbpGetHexNumber(Argv[1], &Address))
2735  {
2736  KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2737  return TRUE;
2738  }
2739  }
2740 
2741  if (Argc > 2)
2742  {
2743  /* Get address */
2744  if (!KdbpGetHexNumber(Argv[1], &Flags))
2745  {
2746  KdbpPrint("Invalid parameter: %s\n", Argv[0]);
2747  return TRUE;
2748  }
2749  }
2750 
2751  /* Check if we got an address */
2752  if (Address != 0)
2753  {
2754  /* Get the base page */
2755  PoolPage = PAGE_ALIGN(Address);
2756  }
2757  else
2758  {
2759  KdbpPrint("Heap is unimplemented\n");
2760  return TRUE;
2761  }
2762 
2763  /* No paging support! */
2764  if (!MmIsAddressValid(PoolPage))
2765  {
2766  KdbpPrint("Address not accessible!\n");
2767  return TRUE;
2768  }
2769 
2770  /* Get pool type */
2771  if ((Address >= (ULONG_PTR)MmPagedPoolStart) && (Address <= (ULONG_PTR)MmPagedPoolEnd))
2772  KdbpPrint("Allocation is from PagedPool region\n");
2773  else if ((Address >= (ULONG_PTR)MmNonPagedPoolStart) && (Address <= (ULONG_PTR)MmNonPagedPoolEnd))
2774  KdbpPrint("Allocation is from NonPagedPool region\n");
2775  else
2776  {
2777  KdbpPrint("Address 0x%p is not within any pool!\n", (PVOID)Address);
2778  return TRUE;
2779  }
2780 
2781  /* Loop all entries of that page */
2782  Entry = PoolPage;
2783  do
2784  {
2785  /* Check if the address is within that entry */
2786  ThisOne = ((Address >= (ULONG_PTR)Entry) &&
2787  (Address < (ULONG_PTR)(Entry + Entry->BlockSize)));
2788 
2789  if (!(Flags & 1) || ThisOne)
2790  {
2791  /* Print the line */
2792  KdbpPrint("%c%p size: %4d previous size: %4d %s %.4s\n",
2793  ThisOne ? '*' : ' ', Entry, Entry->BlockSize, Entry->PreviousSize,
2794  (Flags & 0x80000000) ? "" : (Entry->PoolType ? "(Allocated)" : "(Free) "),
2795  (Flags & 0x80000000) ? "" : (PCHAR)&Entry->PoolTag);
2796  }
2797 
2798  if (Flags & 1)
2799  {
2800  Data = (PULONG)(Entry + 1);
2801  KdbpPrint(" %p %08lx %08lx %08lx %08lx\n"
2802  " %p %08lx %08lx %08lx %08lx\n",
2803  &Data[0], Data[0], Data[1], Data[2], Data[3],
2804  &Data[4], Data[4], Data[5], Data[6], Data[7]);
2805  }
2806 
2807  /* Go to next entry */
2808  Entry = POOL_BLOCK(Entry, Entry->BlockSize);
2809  }
2810  while ((Entry->BlockSize != 0) && ((ULONG_PTR)Entry < (ULONG_PTR)PoolPage + PAGE_SIZE));
2811 
2812  return TRUE;
2813 }
2814 
2815 #endif // DBG && KDBG
2816 
2817 /* EOF */
DWORD *typedef PVOID
Definition: winlogon.h:52
#define KeGetCurrentIrql()
Definition: env_spec_w32.h:706
PVOID NTAPI ExAllocatePoolWithTag(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag)
Definition: expool.c:1566
#define STATUS_SUCCESS
Definition: contextmenu.cpp:55
static int Hash(const char *)
Definition: reader.c:2247
IN CINT OUT PVOID IN ULONG OUT PULONG ReturnLength
Definition: dumpinfo.c:39
#define PAGE_SHIFT
Definition: env_spec_w32.h:45
#define IN
Definition: typedefs.h:39
DECLSPEC_NORETURN NTSYSAPI VOID NTAPI RtlRaiseStatus(_In_ NTSTATUS Status)
GENERAL_LOOKASIDE_POOL PPNPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:625
NTSYSAPI VOID NTAPI RtlCopyMemory(VOID UNALIGNED *Destination, CONST VOID UNALIGNED *Source, ULONG Length)
VOID NTAPI ExpCheckPoolLinks(IN PLIST_ENTRY ListHead)
Definition: expool.c:89
PLIST_ENTRY NTAPI ExpRemovePoolHeadList(IN PLIST_ENTRY ListHead)
Definition: expool.c:129
PVOID ULONG Address
Definition: oprghdlr.h:14
ULONG PagedAllocs
Definition: extypes.h:1129
BOOLEAN NTAPI ExpIsPoolListEmpty(IN PLIST_ENTRY ListHead)
Definition: expool.c:111
#define STATUS_INSUFFICIENT_RESOURCES
Definition: udferr_usr.h:158
SIZE_T PoolTrackTableMask
Definition: expool.c:38
#define SESSION_POOL_MASK
Definition: mm.h:100
PVOID NTAPI ExAllocatePoolWithQuotaTag(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag)
Definition: expool.c:2604
#define STATUS_INFO_LENGTH_MISMATCH
Definition: udferr_usr.h:133
NTSTATUS NTAPI PsChargeProcessPoolQuota(IN PEPROCESS Process, IN POOL_TYPE PoolType, IN SIZE_T Amount)
Definition: quota.c:220
#define POOL_FLAG_VERIFIER
Definition: miarm.h:258
struct _Entry Entry
Definition: kefuncs.h:640
SIZE_T PoolTrackTableSize
Definition: expool.c:28
struct _KGUARDED_MUTEX * PKGUARDED_MUTEX
VOID NTAPI ExpCheckPoolAllocation(PVOID P, POOL_TYPE PoolType, ULONG Tag)
Definition: expool.c:286
VOID FASTCALL KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:42
ASSERT((InvokeOnSuccess||InvokeOnError||InvokeOnCancel)?(CompletionRoutine!=NULL):TRUE)
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:315
__wchar_t WCHAR
Definition: xmlstorage.h:180
#define POOL_FLAG_SPECIAL_POOL
Definition: miarm.h:260
struct _LIST_ENTRY * Blink
Definition: typedefs.h:121
#define ExReleaseSpinLock(Lock, OldIrql)
FORCEINLINE struct _KPRCB * KeGetCurrentPrcb(VOID)
Definition: ketypes.h:1054
struct _EPROCESS * PEPROCESS
Definition: nt_native.h:30
PLIST_ENTRY NTAPI ExpEncodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:82
_In_ KPRIORITY Priority
Definition: kefuncs.h:516
SIZE_T PoolTrackTableSize
Definition: expool.c:38
#define TRUE
Definition: numbers.c:17
PLDR_DATA_TABLE_ENTRY NTAPI MiLookupDataTableEntry(IN PVOID Address)
Definition: sysldr.c:3365
#define POOL_FLAG_CRASH_ON_FAILURE
Definition: miarm.h:262
#define ExRaiseStatus
Definition: ntoskrnl.h:94
#define MAXULONG_PTR
Definition: basetsd.h:102
#define POOL_PREV_BLOCK(x)
Definition: expool.c:55
PVOID MmPagedPoolEnd
Definition: init.c:26
VOID NTAPI ExpRemovePoolEntryList(IN PLIST_ENTRY Entry)
Definition: expool.c:118
BOOLEAN ExpKdbgExtPool(ULONG Argc, PCHAR Argv[])
SIZE_T TotalBytes
Definition: miarm.h:298
LONG_PTR SSIZE_T
Definition: basetsd.h:182
PVOID ULONG ULONG PULONG Data
Definition: oprghdlr.h:14
VOID NTAPI ExReturnPoolQuota(IN PVOID P)
Definition: expool.c:1514
VOID NTAPI ExpInsertPoolHeadList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:168
#define POOL_FREE_IRQL_INVALID
Definition: miarm.h:282
VOID NTAPI ObDereferenceObject(IN PVOID Object)
Definition: obref.c:267
ULONG NonPagedFrees
Definition: extypes.h:1133
_At_(*)(_In_ PWSK_CLIENT Client, _In_opt_ PUNICODE_STRING NodeName, _In_opt_ PUNICODE_STRING ServiceName, _In_opt_ ULONG NameSpace, _In_opt_ GUID *Provider, _In_opt_ PADDRINFOEXW Hints, _Outptr_ PADDRINFOEXW *Result, _In_opt_ PEPROCESS OwningProcess, _In_opt_ PETHREAD OwningThread, _Inout_ PIRP Irp Result)(Mem)) NTSTATUS(WSKAPI *PFN_WSK_GET_ADDRESS_INFO
Definition: wsk.h:426
PKGUARDED_MUTEX ExpPagedPoolMutex
Definition: expool.c:37
_Must_inspect_result_ _In_ ULONG Index
Definition: fltkernel.h:1824
VOID NTAPI ExpInitializePoolListHead(IN PLIST_ENTRY ListHead)
Definition: expool.c:104
void DbgBreakPoint()
Definition: mach.c:558
#define TAG_NONE
Definition: tag.h:127
ULONG RunningDeAllocs
Definition: miarm.h:291
LIST_ENTRY ListHeads[POOL_LISTS_PER_PAGE]
Definition: miarm.h:300
PVOID NTAPI MmAllocateSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag, IN POOL_TYPE PoolType, IN ULONG SpecialType)
#define ExAcquireSpinLock(Lock, OldIrql)
BOOLEAN NTAPI KdbpGetHexNumber(IN PCHAR pszNum, OUT ULONG_PTR *pulValue)
Definition: kdb_cli.c:413
ULONG PoolHitTag
Definition: expool.c:43
PSLIST_ENTRY WINAPI InterlockedPopEntrySList(PSLIST_HEADER ListHead)
Definition: interlocked.c:55
BOOLEAN NTAPI ExpAddTagForBigPages(IN PVOID Va, IN ULONG Key, IN ULONG NumberOfPages, IN POOL_TYPE PoolType)
Definition: expool.c:1287
NTSTATUS NTAPI ExGetPoolTagInfo(IN PSYSTEM_POOLTAG_INFORMATION SystemInformation, IN ULONG SystemInformationLength, IN OUT PULONG ReturnLength OPTIONAL)
Definition: expool.c:1182
PVOID MmNonPagedPoolEnd
Definition: mminit.c:99
uint32_t ULONG_PTR
Definition: typedefs.h:64
KSPIN_LOCK ExpLargePoolTableLock
Definition: expool.c:45
FORCEINLINE ULONG KeGetCurrentProcessorNumber(VOID)
Definition: ke.h:325
#define POOL_FLAG_CHECK_TIMERS
Definition: miarm.h:255
UCHAR KIRQL
Definition: env_spec_w32.h:591
#define POOL_FREE_BLOCK(x)
Definition: expool.c:52
_Must_inspect_result_ _In_ ULONG Flags
Definition: wsk.h:170
GLenum GLclampf GLint i
Definition: glfuncs.h:14
struct _POOL_DPC_CONTEXT POOL_DPC_CONTEXT
ULONG PFN_NUMBER
Definition: ke.h:8
SIZE_T PoolTrackTableSizeExpansion
Definition: expool.c:30
NTSTATUS(* NTAPI)(IN PFILE_FULL_EA_INFORMATION EaBuffer, IN ULONG EaLength, OUT PULONG ErrorOffset)
Definition: IoEaTest.cpp:117
PEPROCESS PsInitialSystemProcess
Definition: psmgr.c:50
PPOOL_DESCRIPTOR PoolVector[2]
Definition: expool.c:36
long LONG
Definition: pedump.c:60
#define POOL_NEXT_BLOCK(x)
Definition: expool.c:54
struct LOOKASIDE_ALIGN _GENERAL_LOOKASIDE * PGENERAL_LOOKASIDE
enum _EX_POOL_PRIORITY EX_POOL_PRIORITY
#define NULL
Definition: mystdio.h:57
SIZE_T PoolBigPageTableSize
Definition: expool.c:39
#define InterlockedCompareExchangePointer
Definition: interlocked.h:129
VOID NTAPI PsReturnPoolQuota(IN PEPROCESS Process, IN POOL_TYPE PoolType, IN SIZE_T Amount)
Definition: quota.c:237
#define POOL_BLOCK(x, i)
Definition: expool.c:53
#define PsGetCurrentProcess
Definition: psfuncs.h:17
PPOOL_TRACKER_TABLE PoolTrackTableExpansion
Definition: expool.c:29
FORCEINLINE VOID KeInitializeSpinLock(_Out_ PKSPIN_LOCK SpinLock)
Definition: kefuncs.h:251
_Must_inspect_result_ _In_ LPCGUID ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _In_ ULONG PoolTag
Definition: fltkernel.h:2520
struct _POOL_DPC_CONTEXT * PPOOL_DPC_CONTEXT
VOID NTAPI ExpCheckPoolHeader(IN PPOOL_HEADER Entry)
Definition: expool.c:183
VOID * Table
Definition: acefiex.h:630
#define POOL_ENTRY(x)
Definition: expool.c:51
ULONG ExpNumberOfPagedPools
Definition: expool.c:33
#define FORCEINLINE
Definition: ntbasedef.h:213
void DPRINT(...)
Definition: polytest.cpp:61
ULONG ExpPoolBigEntriesInUse
Definition: expool.c:46
SIZE_T NonPagedBytes
Definition: miarm.h:349
ULONG NonPagedAllocs
Definition: extypes.h:1132
ULONG NTAPI ExpFindAndRemoveTagBigPages(IN PVOID Va, OUT PULONG_PTR BigPages, IN POOL_TYPE PoolType)
Definition: expool.c:1376
#define BASE_POOL_TYPE_MASK
Definition: ExPools.c:15
struct _POOL_TRACKER_TABLE POOL_TRACKER_TABLE
struct _POOL_HEADER * PPOOL_HEADER
#define InterlockedExchangeAdd
Definition: interlocked.h:181
#define POOL_ALLOC_IRQL_INVALID
Definition: miarm.h:281
UINTN Size
Definition: acefiex.h:550
PVOID NTAPI ExAllocatePool(POOL_TYPE PoolType, SIZE_T NumberOfBytes)
Definition: expool.c:2095
SIZE_T NonPagedUsed
Definition: extypes.h:1134
VOID NTAPI ExpCheckPoolBlocks(IN PVOID Block)
Definition: expool.c:366
#define DBG_UNREFERENCED_LOCAL_VARIABLE(L)
Definition: ntbasedef.h:318
_In_opt_ PVOID _In_opt_ PVOID SystemArgument1
Definition: ketypes.h:660
PLIST_ENTRY NTAPI ExpDecodePoolLink(IN PLIST_ENTRY Link)
Definition: expool.c:75
struct _LIST_ENTRY * Flink
Definition: typedefs.h:120
unsigned char BOOLEAN
_In_ HANDLE _Outptr_result_bytebuffer_ ViewSize PVOID * BaseAddress
Definition: mmfuncs.h:404
_In_ LARGE_INTEGER _In_opt_ PKDPC Dpc
Definition: kefuncs.h:524
BOOLEAN ExStopBadTags
Definition: expool.c:44
POOL_TYPE NTAPI MmDeterminePoolType(IN PVOID VirtualAddress)
Definition: pool.c:406
VOID NTAPI ExpRemovePoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:588
_Out_ PBOOLEAN QuotaCharged
Definition: exfuncs.h:945
ULONG RunningAllocs
Definition: miarm.h:290
if(!(yy_init))
Definition: macro.lex.yy.c:704
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:40
VOID NTAPI INIT_SECTION InitializePool(IN POOL_TYPE PoolType, IN ULONG Threshold)
Definition: expool.c:849
PPOOL_DESCRIPTOR ExpPagedPoolDescriptor[16+1]
Definition: expool.c:35
INT POOL_TYPE
Definition: typedefs.h:77
VOID NTAPI ExpGetPoolTagInfoTarget(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
Definition: expool.c:1147
uint64_t ULONGLONG
Definition: typedefs.h:66
VOID NTAPI INIT_SECTION ExpSeedHotTags(VOID)
Definition: expool.c:468
#define KeAcquireSpinLock(sl, irql)
Definition: env_spec_w32.h:609
POOL_DESCRIPTOR NonPagedPoolDescriptor
Definition: expool.c:34
SIZE_T PagedUsed
Definition: extypes.h:1131
#define PROTECTED_POOL
Definition: extypes.h:294
VOID FASTCALL KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber, IN KIRQL OldIrql)
Definition: spinlock.c:154
#define PAGE_ALIGN(Va)
LONG NonPagedFrees
Definition: miarm.h:348
FORCEINLINE USHORT ExQueryDepthSList(_In_ PSLIST_HEADER SListHead)
Definition: exfuncs.h:153
PPOOL_TRACKER_TABLE PoolTrackTable
Definition: expool.c:27
#define MUST_SUCCEED_POOL_MASK
Definition: mm.h:97
UINTN VOID * Buffer
Definition: acefiex.h:370
SIZE_T PagedBytes
Definition: miarm.h:352
BOOLEAN NTAPI MmIsAddressValid(IN PVOID VirtualAddress)
Definition: mmsup.c:174
ULONG TotalPages
Definition: miarm.h:292
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes)
Definition: pool.c:420
#define BYTES_TO_PAGES(Size)
ULONG NTAPI MiFreePoolPages(IN PVOID StartingAddress)
Definition: pool.c:903
VOID NTAPI ExpInsertPoolTailList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry)
Definition: expool.c:153
char * PBOOLEAN
Definition: retypes.h:11
#define InterlockedExchangeAddSizeT(a, b)
Definition: interlocked.h:196
FORCEINLINE KIRQL ExLockPool(IN PPOOL_DESCRIPTOR Descriptor)
Definition: expool.c:1099
VOID KdbpPrint(IN PCHAR Format, IN...OPTIONAL)
Prints the given string with printf-like formatting.
Definition: kdb_cli.c:2462
IN REFCLSID IN PUNKNOWN IN POOL_TYPE PoolType
Definition: unknown.h:68
_Requires_lock_held_ Interrupt _Releases_lock_ Interrupt _In_ _IRQL_restores_ KIRQL OldIrql
Definition: kefuncs.h:803
Definition: ketypes.h:672
IN SIZE_T NumberOfBytes
Definition: ndis.h:3914
FORCEINLINE VOID ExpCheckPoolIrqlLevel(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN PVOID Entry)
Definition: expool.c:406
Definition: ntddk_ex.h:202
#define InterlockedDecrementUL(Addend)
Definition: ex.h:1452
ULONG PagedFrees
Definition: extypes.h:1130
#define PAGE_SIZE
Definition: env_spec_w32.h:49
Definition: typedefs.h:118
KPROCESS Pcb
Definition: pstypes.h:1139
NTKERNELAPI PSLIST_ENTRY FASTCALL InterlockedPushEntrySList(IN PSLIST_HEADER ListHead, IN PSLIST_ENTRY ListEntry)
Definition: interlocked.c:82
#define PASSIVE_LEVEL
Definition: env_spec_w32.h:693
KIRQL FASTCALL KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
Definition: spinlock.c:108
PVOID NTAPI ExAllocatePoolWithQuota(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes)
Definition: expool.c:2573
UCHAR KeNumberNodes
Definition: krnlinit.c:40
Status
Definition: gdiplustypes.h:24
_In_opt_ PVOID _In_opt_ PVOID _In_opt_ PVOID SystemArgument2
Definition: ketypes.h:660
PVOID MmPagedPoolStart
Definition: miarm.h:552
PVOID MmNonPagedPoolStart
Definition: init.c:24
#define DISPATCH_LEVEL
Definition: env_spec_w32.h:696
ULONG TotalBigPages
Definition: miarm.h:293
#define POOL_FLAG_CHECK_DEADLOCK
Definition: miarm.h:259
ULONG_PTR SIZE_T
Definition: typedefs.h:79
#define NT_SUCCESS(StatCode)
Definition: cmd.c:149
VOID NTAPI INIT_SECTION ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, IN POOL_TYPE PoolType, IN ULONG PoolIndex, IN ULONG Threshold, IN PVOID PoolLock)
Definition: expool.c:798
VOID FASTCALL KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:31
LONG NTSTATUS
Definition: DriverTester.h:11
#define QUOTA_POOL_MASK
Definition: ExPools.c:16
#define InterlockedIncrement
Definition: armddk.h:53
VOID NTAPI KeSignalCallDpcDone(IN PVOID SystemArgument1)
Definition: dpc.c:996
USHORT PreviousSize
#define NUMBER_POOL_LOOKASIDE_LISTS
Definition: ketypes.h:286
#define ROUND_TO_PAGES(Size)
unsigned short USHORT
Definition: pedump.c:61
VOID NTAPI KeGenericCallDpc(IN PKDEFERRED_ROUTINE Routine, IN PVOID Context)
Definition: dpc.c:967
#define InterlockedIncrementUL(Addend)
Definition: ex.h:1455
SIZE_T NTAPI ExQueryPoolBlockSize(IN PVOID PoolBlock, OUT PBOOLEAN QuotaCharged)
Definition: expool.c:2557
#define POOL_FLAG_CHECK_WORKERS
Definition: miarm.h:256
ULONG KSPIN_LOCK
Definition: env_spec_w32.h:72
UNICODE_STRING BaseDllName
Definition: ldrtypes.h:143
#define FIELD_OFFSET(t, f)
Definition: typedefs.h:255
PVOID NTAPI ExAllocatePoolWithTagPriority(IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN ULONG Tag, IN EX_POOL_PRIORITY Priority)
Definition: expool.c:2587
ULONG ExPoolFailures
Definition: expool.c:48
VOID NTAPI ExpInsertPoolTracker(IN ULONG Key, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
Definition: expool.c:679
__int3264 LONG_PTR
Definition: mstsclib_h.h:276
unsigned int * PULONG
Definition: retypes.h:1
#define min(a, b)
Definition: monoChain.cc:55
#define KeReleaseSpinLock(sl, irql)
Definition: env_spec_w32.h:627
FORCEINLINE ULONG ExpComputeHashForTag(IN ULONG Tag, IN SIZE_T BucketMask)
Definition: expool.c:431
#define POOL_FLAG_DBGPRINT_ON_FAILURE
Definition: miarm.h:261
VOID FASTCALL KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:53
#define INIT_SECTION
Definition: cdfs.h:11
FORCEINLINE ULONG ExpComputePartialHashForAddress(IN PVOID BaseAddress)
Definition: expool.c:447
#define DPRINT1
Definition: precomp.h:8
_Must_inspect_result_ typedef _In_ ULONG TableEntry
Definition: iotypes.h:3913
#define POOL_LISTS_PER_PAGE
Definition: miarm.h:248
IN ULONG IN ULONG Tag
Definition: evtlib.h:153
PPOOL_TRACKER_BIG_PAGES PoolBigPageTable
Definition: expool.c:41
SIZE_T PoolBigPageTableHash
Definition: expool.c:39
#define POOL_BIG_TABLE_ENTRY_FREE
Definition: expool.c:23
_Must_inspect_result_ _In_ PLARGE_INTEGER _In_ PLARGE_INTEGER _In_ ULONG _In_ PFILE_OBJECT _In_ PVOID Process
Definition: fsrtlfuncs.h:219
void * _ReturnAddress(void)
signed char * PCHAR
Definition: retypes.h:7
LONG NonPagedAllocs
Definition: miarm.h:347
#define OUT
Definition: typedefs.h:40
#define ObReferenceObject
Definition: obfuncs.h:204
#define FALSE
Definition: numbers.c:16
_Must_inspect_result_ typedef _Out_ PULONG TableSize
Definition: iotypes.h:3937
FORCEINLINE VOID ExUnlockPool(IN PPOOL_DESCRIPTOR Descriptor, IN KIRQL OldIrql)
Definition: expool.c:1123
ULONG ExpPoolFlags
Definition: expool.c:47
struct tagContext Context
Definition: acpixf.h:1013
unsigned int ULONG
Definition: retypes.h:1
VOID NTAPI ExFreePool(PVOID P)
Definition: expool.c:2544
#define UNIMPLEMENTED
Definition: debug.h:114
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
VOID NTAPI MmFreeSpecialPool(IN PVOID P)
#define ULONG_PTR
Definition: config.h:101
uint32_t * PULONG_PTR
Definition: typedefs.h:64
BOOLEAN NTAPI MmUseSpecialPool(IN SIZE_T NumberOfBytes, IN ULONG Tag)
ULONG TagUlong
Definition: extypes.h:1127
#define POOL_QUOTA_FAIL_INSTEAD_OF_RAISE
#define POOL_MAX_ALLOC
Definition: miarm.h:250
#define POOL_RAISE_IF_ALLOCATION_FAILURE
#define POOL_BLOCK_SIZE
Definition: miarm.h:246
#define max(a, b)
Definition: slicer.cc:81
ULONG MmSizeOfNonPagedPoolInBytes
Definition: init.c:21
PLIST_ENTRY NTAPI ExpRemovePoolTailList(IN PLIST_ENTRY ListHead)
Definition: expool.c:141
VOID NTAPI ExFreePoolWithTag(IN PVOID P, IN ULONG TagToFree)
Definition: expool.c:2124
signed int * PLONG
Definition: retypes.h:5
BOOLEAN NTAPI MmIsSpecialPoolAddress(IN PVOID P)
#define APC_LEVEL
Definition: env_spec_w32.h:695
IN HDEVINFO IN PSP_DEVINFO_DATA DeviceInfoData OPTIONAL
Definition: devinst.c:44
ACPI_EFI_GUID ACPI_EFI_OPEN_PROTOCOL_INFORMATION_ENTRY UINTN * EntryCount
Definition: acefiex.h:668
base of all file and directory entries
Definition: entries.h:82
VOID NTAPI KeBugCheckEx(_In_ ULONG BugCheckCode, _In_ ULONG_PTR BugCheckParameter1, _In_ ULONG_PTR BugCheckParameter2, _In_ ULONG_PTR BugCheckParameter3, _In_ ULONG_PTR BugCheckParameter4)
Definition: rtlcompat.c:90
ACPI_EFI_INPUT_KEY * Key
Definition: acefiex.h:303
VOID NTAPI ExQueryPoolUsage(OUT PULONG PagedPoolPages, OUT PULONG NonPagedPoolPages, OUT PULONG PagedPoolAllocs, OUT PULONG PagedPoolFrees, OUT PULONG PagedPoolLookasideHits, OUT PULONG NonPagedPoolAllocs, OUT PULONG NonPagedPoolFrees, OUT PULONG NonPagedPoolLookasideHits)
Definition: expool.c:1450
DISPATCHER_HEADER Header
Definition: ketypes.h:1380
_Must_inspect_result_ _In_ LPCGUID _In_ ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _Inout_ PVOID LookasideList
Definition: fltkernel.h:2551
BOOLEAN NTAPI KeSignalCallDpcSynchronize(IN PVOID SystemArgument2)
Definition: dpc.c:1009
#define POOL_FLAG_CHECK_RESOURCES
Definition: miarm.h:257
#define P(a, b, c, d, e, x)
GENERAL_LOOKASIDE_POOL PPPagedLookasideList[NUMBER_POOL_LOOKASIDE_LISTS]
Definition: ketypes.h:626
KSPIN_LOCK ExpTaggedPoolLock
Definition: expool.c:42
_In_ PSTORAGE_PROPERTY_ID _Outptr_ PSTORAGE_DESCRIPTOR_HEADER * Descriptor
Definition: classpnp.h:966
_In_opt_ PVOID DeferredContext
Definition: ketypes.h:660