ReactOS  0.4.10-dev-244-gb941574
pool.c
Go to the documentation of this file.
1 /*
2  * PROJECT: ReactOS Kernel
3  * LICENSE: BSD - See COPYING.ARM in the top level directory
4  * FILE: ntoskrnl/mm/ARM3/pool.c
5  * PURPOSE: ARM Memory Manager Pool Allocator
6  * PROGRAMMERS: ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 /* GLOBALS ********************************************************************/
19 
34 
35 /* PRIVATE FUNCTIONS **********************************************************/
36 
37 VOID
38 NTAPI
40  IN ULONG PageCount)
41 {
42  PMMPTE PointerPte, LastPte;
43  MMPTE TempPte;
44 
45  /* If pool is physical, can't protect PTEs */
46  if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
47 
48  /* Get PTE pointers and loop */
49  PointerPte = MiAddressToPte(VirtualAddress);
50  LastPte = PointerPte + PageCount;
51  do
52  {
53  /* Capture the PTE for safety */
54  TempPte = *PointerPte;
55 
56  /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
57  TempPte.u.Hard.Valid = 0;
58  TempPte.u.Soft.Prototype = 1;
59  MI_WRITE_INVALID_PTE(PointerPte, TempPte);
60  } while (++PointerPte < LastPte);
61 
62  /* Flush the TLB */
64 }
65 
66 BOOLEAN
67 NTAPI
69  IN ULONG PageCount)
70 {
71  PMMPTE PointerPte;
72  MMPTE TempPte;
73  PFN_NUMBER UnprotectedPages = 0;
74 
75  /* If pool is physical, can't protect PTEs */
76  if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
77 
78  /* Get, and capture the PTE */
79  PointerPte = MiAddressToPte(VirtualAddress);
80  TempPte = *PointerPte;
81 
82  /* Loop protected PTEs */
83  while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
84  {
85  /* Unprotect the PTE */
86  TempPte.u.Hard.Valid = 1;
87  TempPte.u.Soft.Prototype = 0;
88  MI_WRITE_VALID_PTE(PointerPte, TempPte);
89 
90  /* One more page */
91  if (++UnprotectedPages == PageCount) break;
92 
93  /* Capture next PTE */
94  TempPte = *(++PointerPte);
95  }
96 
97  /* Return if any pages were unprotected */
98  return UnprotectedPages ? TRUE : FALSE;
99 }
100 
102 VOID
104  OUT PVOID* PoolFlink,
105  OUT PVOID* PoolBlink)
106 {
107  BOOLEAN Safe;
108  PVOID PoolVa;
109 
110  /* Initialize variables */
111  *PoolFlink = *PoolBlink = NULL;
112 
113  /* Check if the list has entries */
114  if (IsListEmpty(Links) == FALSE)
115  {
116  /* We are going to need to forward link to do an insert */
117  PoolVa = Links->Flink;
118 
119  /* So make it safe to access */
120  Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
121  if (Safe) *PoolFlink = PoolVa;
122  }
123 
124  /* Are we going to need a backward link too? */
125  if (Links != Links->Blink)
126  {
127  /* Get the head's backward link for the insert */
128  PoolVa = Links->Blink;
129 
130  /* Make it safe to access */
131  Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
132  if (Safe) *PoolBlink = PoolVa;
133  }
134 }
135 
137 VOID
139  IN PVOID PoolBlink)
140 {
141  /* Reprotect the pages, if they got unprotected earlier */
142  if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
143  if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
144 }
145 
146 VOID
147 NTAPI
150  IN BOOLEAN Critical)
151 {
152  PVOID PoolFlink, PoolBlink;
153 
154  /* Make the list accessible */
155  MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
156 
157  /* Now insert in the right position */
158  Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
159 
160  /* And reprotect the pages containing the free links */
161  MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
162 }
163 
164 VOID
165 NTAPI
167 {
168  PVOID PoolFlink, PoolBlink;
169 
170  /* Make the list accessible */
171  MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
172 
173  /* Now remove */
174  RemoveEntryList(Entry);
175 
176  /* And reprotect the pages containing the free links */
177  if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
178  if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
179 }
180 
181 VOID
182 NTAPI
185 {
187 
188  /* Default low threshold of 8MB or one third of nonpaged pool */
191 
192  /* Default high threshold of 20MB or 50% */
196 }
197 
198 VOID
199 NTAPI
202 {
203  KIRQL OldIrql;
204  PFN_NUMBER FreePoolInPages;
205 
206  /* Lock paged pool */
207  KeAcquireGuardedMutex(&MmPagedPoolMutex);
208 
209  /* Total size of the paged pool minus the allocated size, is free */
210  FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
211 
212  /* Check the initial state high state */
213  if (FreePoolInPages >= MiHighPagedPoolThreshold)
214  {
215  /* We have plenty of pool */
217  }
218  else
219  {
220  /* We don't */
222  }
223 
224  /* Check the initial low state */
225  if (FreePoolInPages <= MiLowPagedPoolThreshold)
226  {
227  /* We're very low in free pool memory */
229  }
230  else
231  {
232  /* We're not */
234  }
235 
236  /* Release the paged pool lock */
237  KeReleaseGuardedMutex(&MmPagedPoolMutex);
238 
239  /* Now it's time for the nonpaged pool lock */
241 
242  /* Free pages are the maximum minus what's been allocated */
244 
245  /* Check if we have plenty */
246  if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
247  {
248  /* We do, set the event */
250  }
251  else
252  {
253  /* We don't, clear the event */
255  }
256 
257  /* Check if we have very little */
258  if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
259  {
260  /* We do, set the event */
262  }
263  else
264  {
265  /* We don't, clear it */
267  }
268 
269  /* We're done, release the nonpaged pool lock */
271 }
272 
273 VOID
274 NTAPI
277 {
278  ULONG i;
279  PFN_COUNT PoolPages;
280  PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
281  PMMPTE PointerPte;
282  PAGED_CODE();
283 
284  //
285  // Initialize the pool S-LISTs as well as their maximum count. In general,
286  // we'll allow 8 times the default on a 2GB system, and two times the default
287  // on a 1GB system.
288  //
289  InitializeSListHead(&MiPagedPoolSListHead);
290  InitializeSListHead(&MiNonPagedPoolSListHead);
291  if (MmNumberOfPhysicalPages >= ((2 * _1GB) /PAGE_SIZE))
292  {
295  }
296  else if (MmNumberOfPhysicalPages >= (_1GB /PAGE_SIZE))
297  {
300  }
301 
302  //
303  // However if debugging options for the pool are enabled, turn off the S-LIST
304  // to reduce the risk of messing things up even more
305  //
307  {
310  }
311 
312  //
313  // We keep 4 lists of free pages (4 lists help avoid contention)
314  //
315  for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
316  {
317  //
318  // Initialize each of them
319  //
320  InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
321  }
322 
323  //
324  // Calculate how many pages the initial nonpaged pool has
325  //
327  MmNumberOfFreeNonPagedPool = PoolPages;
328 
329  //
330  // Initialize the first free entry
331  //
332  FreeEntry = MmNonPagedPoolStart;
333  FirstEntry = FreeEntry;
334  FreeEntry->Size = PoolPages;
335  FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
336  FreeEntry->Owner = FirstEntry;
337 
338  //
339  // Insert it into the last list
340  //
341  InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
342  &FreeEntry->List);
343 
344  //
345  // Now create free entries for every single other page
346  //
347  while (PoolPages-- > 1)
348  {
349  //
350  // Link them all back to the original entry
351  //
352  FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
353  FreeEntry->Owner = FirstEntry;
354  FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
355  }
356 
357  //
358  // Validate and remember first allocated pool page
359  //
360  PointerPte = MiAddressToPte(MmNonPagedPoolStart);
361  ASSERT(PointerPte->u.Hard.Valid == 1);
363 
364  //
365  // Keep track of where initial nonpaged pool ends
366  //
369 
370  //
371  // Validate and remember last allocated pool page
372  //
373  PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
374  ASSERT(PointerPte->u.Hard.Valid == 1);
376 
377  //
378  // Validate the first nonpaged pool expansion page (which is a guard page)
379  //
381  ASSERT(PointerPte->u.Hard.Valid == 0);
382 
383  //
384  // Calculate the size of the expansion region alone
385  //
388 
389  //
390  // Remove 2 pages, since there's a guard page on top and on the bottom
391  //
393 
394  //
395  // Now initialize the nonpaged pool expansion PTE space. Remember there's a
396  // guard page on top so make sure to skip it. The bottom guard page will be
397  // guaranteed by the fact our size is off by one.
398  //
399  MiInitializeSystemPtes(PointerPte + 1,
402 }
403 
404 POOL_TYPE
405 NTAPI
407 {
408  //
409  // Use a simple bounds check
410  //
411  if (PoolAddress >= MmPagedPoolStart && PoolAddress <= MmPagedPoolEnd)
412  return PagedPool;
413  else if (PoolAddress >= MmNonPagedPoolStart && PoolAddress <= MmNonPagedPoolEnd)
414  return NonPagedPool;
415  KeBugCheckEx(BAD_POOL_CALLER, 0x42, (ULONG_PTR)PoolAddress, 0, 0);
416 }
417 
418 PVOID
419 NTAPI
421  IN SIZE_T SizeInBytes)
422 {
423  PFN_NUMBER PageFrameNumber;
424  PFN_COUNT SizeInPages, PageTableCount;
425  ULONG i;
426  KIRQL OldIrql;
427  PLIST_ENTRY NextEntry, NextHead, LastHead;
428  PMMPTE PointerPte, StartPte;
429  PMMPDE PointerPde;
430  ULONG EndAllocation;
431  MMPTE TempPte;
432  MMPDE TempPde;
433  PMMPFN Pfn1;
434  PVOID BaseVa, BaseVaStart;
435  PMMFREE_POOL_ENTRY FreeEntry;
436 
437  //
438  // Figure out how big the allocation is in pages
439  //
440  SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);
441 
442  //
443  // Check for overflow
444  //
445  if (SizeInPages == 0)
446  {
447  //
448  // Fail
449  //
450  return NULL;
451  }
452 
453  //
454  // Handle paged pool
455  //
456  if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
457  {
458  //
459  // If only one page is being requested, try to grab it from the S-LIST
460  //
461  if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
462  {
463  BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
464  if (BaseVa) return BaseVa;
465  }
466 
467  //
468  // Lock the paged pool mutex
469  //
470  KeAcquireGuardedMutex(&MmPagedPoolMutex);
471 
472  //
473  // Find some empty allocation space
474  //
476  SizeInPages,
477  MmPagedPoolInfo.PagedPoolHint);
478  if (i == 0xFFFFFFFF)
479  {
480  //
481  // Get the page bit count
482  //
483  i = ((SizeInPages - 1) / PTE_COUNT) + 1;
484  DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);
485 
486  //
487  // Check if there is enougn paged pool expansion space left
488  //
489  if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
490  (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
491  {
492  //
493  // Out of memory!
494  //
495  DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
496  KeReleaseGuardedMutex(&MmPagedPoolMutex);
497  return NULL;
498  }
499 
500  //
501  // Check if we'll have to expand past the last PTE we have available
502  //
503  if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
504  (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
505  {
506  //
507  // We can only support this much then
508  //
509  PointerPde = MiPteToPde(MmPagedPoolInfo.LastPteForPagedPool);
510  PageTableCount = (PFN_COUNT)(PointerPde + 1 -
511  MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
512  ASSERT(PageTableCount < i);
513  i = PageTableCount;
514  }
515  else
516  {
517  //
518  // Otherwise, there is plenty of space left for this expansion
519  //
520  PageTableCount = i;
521  }
522 
523  //
524  // Get the template PDE we'll use to expand
525  //
526  TempPde = ValidKernelPde;
527 
528  //
529  // Get the first PTE in expansion space
530  //
531  PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
532  BaseVa = MiPdeToPte(PointerPde);
533  BaseVaStart = BaseVa;
534 
535  //
536  // Lock the PFN database and loop pages
537  //
538  OldIrql = MiAcquirePfnLock();
539  do
540  {
541  //
542  // It should not already be valid
543  //
544  ASSERT(PointerPde->u.Hard.Valid == 0);
545 
546  /* Request a page */
548  MI_SET_PROCESS2("Kernel");
549  PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
550  TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
551 #if (_MI_PAGING_LEVELS >= 3)
552  /* On PAE/x64 systems, there's no double-buffering */
553  ASSERT(FALSE);
554 #else
555  //
556  // Save it into our double-buffered system page directory
557  //
558  MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
559 
560  /* Initialize the PFN */
561  MiInitializePfnForOtherProcess(PageFrameNumber,
562  (PMMPTE)PointerPde,
564 
565  /* Write the actual PDE now */
566 // MI_WRITE_VALID_PDE(PointerPde, TempPde);
567 #endif
568  //
569  // Move on to the next expansion address
570  //
571  PointerPde++;
572  BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
573  i--;
574  } while (i > 0);
575 
576  //
577  // Release the PFN database lock
578  //
579  MiReleasePfnLock(OldIrql);
580 
581  //
582  // These pages are now available, clear their availablity bits
583  //
584  EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
585  (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
586  PTE_COUNT;
587  RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
588  EndAllocation,
589  PageTableCount * PTE_COUNT);
590 
591  //
592  // Update the next expansion location
593  //
594  MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;
595 
596  //
597  // Zero out the newly available memory
598  //
599  RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);
600 
601  //
602  // Now try consuming the pages again
603  //
605  SizeInPages,
606  0);
607  if (i == 0xFFFFFFFF)
608  {
609  //
610  // Out of memory!
611  //
612  DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
613  KeReleaseGuardedMutex(&MmPagedPoolMutex);
614  return NULL;
615  }
616  }
617 
618  //
619  // Update the pool hint if the request was just one page
620  //
621  if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
622 
623  //
624  // Update the end bitmap so we know the bounds of this allocation when
625  // the time comes to free it
626  //
627  EndAllocation = i + SizeInPages - 1;
628  RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
629 
630  //
631  // Now we can release the lock (it mainly protects the bitmap)
632  //
633  KeReleaseGuardedMutex(&MmPagedPoolMutex);
634 
635  //
636  // Now figure out where this allocation starts
637  //
638  BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
639 
640  //
641  // Flush the TLB
642  //
644 
645  /* Setup a demand-zero writable PTE */
647 
648  //
649  // Find the first and last PTE, then loop them all
650  //
651  PointerPte = MiAddressToPte(BaseVa);
652  StartPte = PointerPte + SizeInPages;
653  do
654  {
655  //
656  // Write the demand zero PTE and keep going
657  //
658  MI_WRITE_INVALID_PTE(PointerPte, TempPte);
659  } while (++PointerPte < StartPte);
660 
661  //
662  // Return the allocation address to the caller
663  //
664  return BaseVa;
665  }
666 
667  //
668  // If only one page is being requested, try to grab it from the S-LIST
669  //
670  if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
671  {
672  BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
673  if (BaseVa) return BaseVa;
674  }
675 
676  //
677  // Allocations of less than 4 pages go into their individual buckets
678  //
679  i = SizeInPages - 1;
681 
682  //
683  // Loop through all the free page lists based on the page index
684  //
685  NextHead = &MmNonPagedPoolFreeListHead[i];
686  LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
687 
688  //
689  // Acquire the nonpaged pool lock
690  //
692  do
693  {
694  //
695  // Now loop through all the free page entries in this given list
696  //
697  NextEntry = NextHead->Flink;
698  while (NextEntry != NextHead)
699  {
700  /* Is freed non paged pool enabled */
702  {
703  /* We need to be able to touch this page, unprotect it */
704  MiUnProtectFreeNonPagedPool(NextEntry, 0);
705  }
706 
707  //
708  // Grab the entry and see if it can handle our allocation
709  //
710  FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
711  ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
712  if (FreeEntry->Size >= SizeInPages)
713  {
714  //
715  // It does, so consume the pages from here
716  //
717  FreeEntry->Size -= SizeInPages;
718 
719  //
720  // The allocation will begin in this free page area
721  //
722  BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
723  (FreeEntry->Size << PAGE_SHIFT));
724 
725  /* Remove the item from the list, depending if pool is protected */
728  else
729  RemoveEntryList(&FreeEntry->List);
730 
731  //
732  // However, check if its' still got space left
733  //
734  if (FreeEntry->Size != 0)
735  {
736  /* Check which list to insert this entry into */
737  i = FreeEntry->Size - 1;
739 
740  /* Insert the entry into the free list head, check for prot. pool */
742  MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
743  else
744  InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
745 
746  /* Is freed non paged pool protected? */
748  {
749  /* Protect the freed pool! */
750  MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
751  }
752  }
753 
754  //
755  // Grab the PTE for this allocation
756  //
757  PointerPte = MiAddressToPte(BaseVa);
758  ASSERT(PointerPte->u.Hard.Valid == 1);
759 
760  //
761  // Grab the PFN NextEntry and index
762  //
763  Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
764 
765  //
766  // Now mark it as the beginning of an allocation
767  //
768  ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
769  Pfn1->u3.e1.StartOfAllocation = 1;
770 
771  /* Mark it as special pool if needed */
772  ASSERT(Pfn1->u4.VerifierAllocation == 0);
773  if (PoolType & VERIFIER_POOL_MASK)
774  {
775  Pfn1->u4.VerifierAllocation = 1;
776  }
777 
778  //
779  // Check if the allocation is larger than one page
780  //
781  if (SizeInPages != 1)
782  {
783  //
784  // Navigate to the last PFN entry and PTE
785  //
786  PointerPte += SizeInPages - 1;
787  ASSERT(PointerPte->u.Hard.Valid == 1);
788  Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
789  }
790 
791  //
792  // Mark this PFN as the last (might be the same as the first)
793  //
794  ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
795  Pfn1->u3.e1.EndOfAllocation = 1;
796 
797  //
798  // Release the nonpaged pool lock, and return the allocation
799  //
801  return BaseVa;
802  }
803 
804  //
805  // Try the next free page entry
806  //
807  NextEntry = FreeEntry->List.Flink;
808 
809  /* Is freed non paged pool protected? */
811  {
812  /* Protect the freed pool! */
813  MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
814  }
815  }
816  } while (++NextHead < LastHead);
817 
818  //
819  // If we got here, we're out of space.
820  // Start by releasing the lock
821  //
823 
824  //
825  // Allocate some system PTEs
826  //
827  StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
828  PointerPte = StartPte;
829  if (StartPte == NULL)
830  {
831  //
832  // Ran out of memory
833  //
834  DPRINT1("Out of NP Expansion Pool\n");
835  return NULL;
836  }
837 
838  //
839  // Acquire the pool lock now
840  //
842 
843  //
844  // Lock the PFN database too
845  //
847 
848  //
849  // Loop the pages
850  //
851  TempPte = ValidKernelPte;
852  do
853  {
854  /* Allocate a page */
856  MI_SET_PROCESS2("Kernel");
857  PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
858 
859  /* Get the PFN entry for it and fill it out */
860  Pfn1 = MiGetPfnEntry(PageFrameNumber);
861  Pfn1->u3.e2.ReferenceCount = 1;
862  Pfn1->u2.ShareCount = 1;
863  Pfn1->PteAddress = PointerPte;
865  Pfn1->u4.VerifierAllocation = 0;
866 
867  /* Write the PTE for it */
868  TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
869  MI_WRITE_VALID_PTE(PointerPte++, TempPte);
870  } while (--SizeInPages > 0);
871 
872  //
873  // This is the last page
874  //
875  Pfn1->u3.e1.EndOfAllocation = 1;
876 
877  //
878  // Get the first page and mark it as such
879  //
880  Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
881  Pfn1->u3.e1.StartOfAllocation = 1;
882 
883  /* Mark it as a verifier allocation if needed */
884  ASSERT(Pfn1->u4.VerifierAllocation == 0);
885  if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;
886 
887  //
888  // Release the PFN and nonpaged pool lock
889  //
892 
893  //
894  // Return the address
895  //
896  return MiPteToAddress(StartPte);
897 }
898 
899 ULONG
900 NTAPI
902 {
903  PMMPTE PointerPte, StartPte;
904  PMMPFN Pfn1, StartPfn;
905  PFN_COUNT FreePages, NumberOfPages;
906  KIRQL OldIrql;
907  PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
908  ULONG i, End;
910 
911  //
912  // Handle paged pool
913  //
914  if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
915  {
916  //
917  // Calculate the offset from the beginning of paged pool, and convert it
918  // into pages
919  //
920  Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart;
921  i = (ULONG)(Offset >> PAGE_SHIFT);
922  End = i;
923 
924  //
925  // Now use the end bitmap to scan until we find a set bit, meaning that
926  // this allocation finishes here
927  //
928  while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
929 
930  //
931  // Now calculate the total number of pages this allocation spans. If it's
932  // only one page, add it to the S-LIST instead of freeing it
933  //
934  NumberOfPages = End - i + 1;
935  if ((NumberOfPages == 1) &&
936  (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum))
937  {
938  InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa);
939  return 1;
940  }
941 
942  /* Delete the actual pages */
943  PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
944  FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
945  ASSERT(FreePages == NumberOfPages);
946 
947  //
948  // Acquire the paged pool lock
949  //
950  KeAcquireGuardedMutex(&MmPagedPoolMutex);
951 
952  //
953  // Clear the allocation and free bits
954  //
955  RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End);
956  RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
957 
958  //
959  // Update the hint if we need to
960  //
961  if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
962 
963  //
964  // Release the lock protecting the bitmaps
965  //
966  KeReleaseGuardedMutex(&MmPagedPoolMutex);
967 
968  //
969  // And finally return the number of pages freed
970  //
971  return NumberOfPages;
972  }
973 
974  //
975  // Get the first PTE and its corresponding PFN entry. If this is also the
976  // last PTE, meaning that this allocation was only for one page, push it into
977  // the S-LIST instead of freeing it
978  //
979  StartPte = PointerPte = MiAddressToPte(StartingVa);
980  StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
981  if ((Pfn1->u3.e1.EndOfAllocation == 1) &&
982  (ExQueryDepthSList(&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum))
983  {
984  InterlockedPushEntrySList(&MiNonPagedPoolSListHead, StartingVa);
985  return 1;
986  }
987 
988  //
989  // Loop until we find the last PTE
990  //
991  while (Pfn1->u3.e1.EndOfAllocation == 0)
992  {
993  //
994  // Keep going
995  //
996  PointerPte++;
997  Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
998  }
999 
1000  //
1001  // Now we know how many pages we have
1002  //
1003  NumberOfPages = (PFN_COUNT)(PointerPte - StartPte + 1);
1004 
1005  //
1006  // Acquire the nonpaged pool lock
1007  //
1009 
1010  //
1011  // Mark the first and last PTEs as not part of an allocation anymore
1012  //
1013  StartPfn->u3.e1.StartOfAllocation = 0;
1014  Pfn1->u3.e1.EndOfAllocation = 0;
1015 
1016  //
1017  // Assume we will free as many pages as the allocation was
1018  //
1019  FreePages = NumberOfPages;
1020 
1021  //
1022  // Peek one page past the end of the allocation
1023  //
1024  PointerPte++;
1025 
1026  //
1027  // Guard against going past initial nonpaged pool
1028  //
1030  {
1031  //
1032  // This page is on the outskirts of initial nonpaged pool, so ignore it
1033  //
1034  Pfn1 = NULL;
1035  }
1036  else
1037  {
1038  /* Sanity check */
1039  ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
1040 
1041  /* Check if protected pool is enabled */
1043  {
1044  /* The freed block will be merged, it must be made accessible */
1046  }
1047 
1048  //
1049  // Otherwise, our entire allocation must've fit within the initial non
1050  // paged pool, or the expansion nonpaged pool, so get the PFN entry of
1051  // the next allocation
1052  //
1053  if (PointerPte->u.Hard.Valid == 1)
1054  {
1055  //
1056  // It's either expansion or initial: get the PFN entry
1057  //
1058  Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1059  }
1060  else
1061  {
1062  //
1063  // This means we've reached the guard page that protects the end of
1064  // the expansion nonpaged pool
1065  //
1066  Pfn1 = NULL;
1067  }
1068 
1069  }
1070 
1071  //
1072  // Check if this allocation actually exists
1073  //
1074  if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
1075  {
1076  //
1077  // It doesn't, so we should actually locate a free entry descriptor
1078  //
1079  FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
1080  (NumberOfPages << PAGE_SHIFT));
1081  ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1082  ASSERT(FreeEntry->Owner == FreeEntry);
1083 
1084  /* Consume this entry's pages */
1085  FreePages += FreeEntry->Size;
1086 
1087  /* Remove the item from the list, depending if pool is protected */
1089  MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1090  else
1091  RemoveEntryList(&FreeEntry->List);
1092  }
1093 
1094  //
1095  // Now get the official free entry we'll create for the caller's allocation
1096  //
1097  FreeEntry = StartingVa;
1098 
1099  //
1100  // Check if the our allocation is the very first page
1101  //
1103  {
1104  //
1105  // Then we can't do anything or we'll risk underflowing
1106  //
1107  Pfn1 = NULL;
1108  }
1109  else
1110  {
1111  //
1112  // Otherwise, get the PTE for the page right before our allocation
1113  //
1114  PointerPte -= NumberOfPages + 1;
1115 
1116  /* Check if protected pool is enabled */
1118  {
1119  /* The freed block will be merged, it must be made accessible */
1121  }
1122 
1123  /* Check if this is valid pool, or a guard page */
1124  if (PointerPte->u.Hard.Valid == 1)
1125  {
1126  //
1127  // It's either expansion or initial nonpaged pool, get the PFN entry
1128  //
1129  Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1130  }
1131  else
1132  {
1133  //
1134  // We must've reached the guard page, so don't risk touching it
1135  //
1136  Pfn1 = NULL;
1137  }
1138  }
1139 
1140  //
1141  // Check if there is a valid PFN entry for the page before the allocation
1142  // and then check if this page was actually the end of an allocation.
1143  // If it wasn't, then we know for sure it's a free page
1144  //
1145  if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
1146  {
1147  //
1148  // Get the free entry descriptor for that given page range
1149  //
1150  FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
1151  ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1152  FreeEntry = FreeEntry->Owner;
1153 
1154  /* Check if protected pool is enabled */
1156  {
1157  /* The freed block will be merged, it must be made accessible */
1158  MiUnProtectFreeNonPagedPool(FreeEntry, 0);
1159  }
1160 
1161  //
1162  // Check if the entry is small enough to be indexed on a free list
1163  // If it is, we'll want to re-insert it, since we're about to
1164  // collapse our pages on top of it, which will change its count
1165  //
1166  if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
1167  {
1168  /* Remove the item from the list, depending if pool is protected */
1170  MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1171  else
1172  RemoveEntryList(&FreeEntry->List);
1173 
1174  //
1175  // Update its size
1176  //
1177  FreeEntry->Size += FreePages;
1178 
1179  //
1180  // And now find the new appropriate list to place it in
1181  //
1182  i = (ULONG)(FreeEntry->Size - 1);
1184 
1185  /* Insert the entry into the free list head, check for prot. pool */
1187  MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1188  else
1189  InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1190  }
1191  else
1192  {
1193  //
1194  // Otherwise, just combine our free pages into this entry
1195  //
1196  FreeEntry->Size += FreePages;
1197  }
1198  }
1199 
1200  //
1201  // Check if we were unable to do any compaction, and we'll stick with this
1202  //
1203  if (FreeEntry == StartingVa)
1204  {
1205  //
1206  // Well, now we are a free entry. At worse we just have our newly freed
1207  // pages, at best we have our pages plus whatever entry came after us
1208  //
1209  FreeEntry->Size = FreePages;
1210 
1211  //
1212  // Find the appropriate list we should be on
1213  //
1214  i = FreeEntry->Size - 1;
1216 
1217  /* Insert the entry into the free list head, check for prot. pool */
1219  MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1220  else
1221  InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1222  }
1223 
1224  //
1225  // Just a sanity check
1226  //
1227  ASSERT(FreePages != 0);
1228 
1229  //
1230  // Get all the pages between our allocation and its end. These will all now
1231  // become free page chunks.
1232  //
1233  NextEntry = StartingVa;
1234  LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
1235  do
1236  {
1237  //
1238  // Link back to the parent free entry, and keep going
1239  //
1240  NextEntry->Owner = FreeEntry;
1241  NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
1242  NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
1243  } while (NextEntry != LastEntry);
1244 
1245  /* Is freed non paged pool protected? */
1247  {
1248  /* Protect the freed pool! */
1249  MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
1250  }
1251 
1252  //
1253  // We're done, release the lock and let the caller know how much we freed
1254  //
1256  return NumberOfPages;
1257 }
1258 
1259 
1260 BOOLEAN
1261 NTAPI
1263  IN ULONG CurrentMaxQuota,
1264  OUT PULONG NewMaxQuota)
1265 {
1266  //
1267  // Not implemented
1268  //
1269  UNIMPLEMENTED;
1270  *NewMaxQuota = CurrentMaxQuota + 65536;
1271  return TRUE;
1272 }
1273 
1274 NTSTATUS
1275 NTAPI
1277 {
1278  PMMPTE PointerPte, LastPte;
1279  PMMPDE PointerPde, LastPde;
1280  PFN_NUMBER PageFrameIndex, PdeCount;
1281  PPOOL_DESCRIPTOR PoolDescriptor;
1282  PMM_SESSION_SPACE SessionGlobal;
1283  PMM_PAGED_POOL_INFO PagedPoolInfo;
1284  NTSTATUS Status;
1285  ULONG Index, PoolSize, BitmapSize;
1286  PAGED_CODE();
1287 
1288  /* Lock session pool */
1289  SessionGlobal = MmSessionSpace->GlobalVirtualAddress;
1290  KeInitializeGuardedMutex(&SessionGlobal->PagedPoolMutex);
1291 
1292  /* Setup a valid pool descriptor */
1293  PoolDescriptor = &MmSessionSpace->PagedPool;
1294  ExInitializePoolDescriptor(PoolDescriptor,
1296  0,
1297  0,
1298  &SessionGlobal->PagedPoolMutex);
1299 
1300  /* Setup the pool addresses */
1303  DPRINT1("Session Pool Start: 0x%p End: 0x%p\n",
1305 
1306  /* Reset all the counters */
1307  PagedPoolInfo = &MmSessionSpace->PagedPoolInfo;
1308  PagedPoolInfo->PagedPoolCommit = 0;
1309  PagedPoolInfo->PagedPoolHint = 0;
1310  PagedPoolInfo->AllocatedPagedPool = 0;
1311 
1312  /* Compute PDE and PTE addresses */
1317 
1318  /* Write them down */
1319  MmSessionSpace->PagedPoolBasePde = PointerPde;
1320  PagedPoolInfo->FirstPteForPagedPool = PointerPte;
1321  PagedPoolInfo->LastPteForPagedPool = LastPte;
1322  PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1;
1323 
1324  /* Zero the PDEs */
1325  PdeCount = LastPde - PointerPde;
1326  RtlZeroMemory(PointerPde, (PdeCount + 1) * sizeof(MMPTE));
1327 
1328  /* Initialize the PFN for the PDE */
1329  Status = MiInitializeAndChargePfn(&PageFrameIndex,
1330  PointerPde,
1332  TRUE);
1333  ASSERT(NT_SUCCESS(Status) == TRUE);
1334 
1335  /* Initialize the first page table */
1337  Index >>= 22;
1338 #ifndef _M_AMD64 // FIXME
1339  ASSERT(MmSessionSpace->PageTables[Index].u.Long == 0);
1340  MmSessionSpace->PageTables[Index] = *PointerPde;
1341 #endif
1342 
1343  /* Bump up counters */
1346 
1347  /* Compute the size of the pool in pages, and of the bitmap for it */
1348  PoolSize = MmSessionPoolSize >> PAGE_SHIFT;
1349  BitmapSize = sizeof(RTL_BITMAP) + ((PoolSize + 31) / 32) * sizeof(ULONG);
1350 
1351  /* Allocate and initialize the bitmap to track allocations */
1353  BitmapSize,
1354  TAG_MM);
1355  ASSERT(PagedPoolInfo->PagedPoolAllocationMap != NULL);
1357  (PULONG)(PagedPoolInfo->PagedPoolAllocationMap + 1),
1358  PoolSize);
1359 
1360  /* Set all bits, but clear the first page table's worth */
1361  RtlSetAllBits(PagedPoolInfo->PagedPoolAllocationMap);
1362  RtlClearBits(PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE);
1363 
1364  /* Allocate and initialize the bitmap to track free space */
1366  BitmapSize,
1367  TAG_MM);
1368  ASSERT(PagedPoolInfo->EndOfPagedPoolBitmap != NULL);
1370  (PULONG)(PagedPoolInfo->EndOfPagedPoolBitmap + 1),
1371  PoolSize);
1372 
1373  /* Clear all the bits and return success */
1374  RtlClearAllBits(PagedPoolInfo->EndOfPagedPoolBitmap);
1375  return STATUS_SUCCESS;
1376 }
1377 
1378 /* PUBLIC FUNCTIONS ***********************************************************/
1379 
1380 /*
1381  * @unimplemented
1382  */
1383 PVOID
1384 NTAPI
1386  IN ULONG PoolTag)
1387 {
1388  UNIMPLEMENTED;
1389  return NULL;
1390 }
1391 
1392 /*
1393  * @unimplemented
1394  */
1395 VOID
1396 NTAPI
1398  IN ULONG PoolTag)
1399 {
1400  UNIMPLEMENTED;
1401 }
1402 
1403 /* EOF */
DWORD *typedef PVOID
Definition: winlogon.h:52
#define MI_MAKE_SOFTWARE_PTE(p, x)
Definition: miarm.h:158
PRTL_BITMAP PagedPoolAllocationMap
Definition: mm.h:412
SIZE_T MmAllocatedNonPagedPool
Definition: pool.c:26
POOL_TYPE NTAPI MmDeterminePoolType(IN PVOID PoolAddress)
Definition: pool.c:406
#define PAGE_SHIFT
Definition: env_spec_w32.h:45
#define IN
Definition: typedefs.h:38
NTSYSAPI void WINAPI RtlClearBits(PRTL_BITMAP, ULONG, ULONG)
#define TRUE
Definition: types.h:120
SIZE_T NonPageablePages
Definition: miarm.h:460
BOOLEAN NTAPI MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress, IN ULONG PageCount)
Definition: pool.c:68
NTSYSAPI void WINAPI RtlInitializeBitMap(PRTL_BITMAP, PULONG, ULONG)
union _MMPFN::@1665 u3
PVOID PagedPoolStart
Definition: miarm.h:462
PFN_NUMBER MiEndOfInitialPoolFrame
Definition: pool.c:23
struct _RTL_BITMAP RTL_BITMAP
PVOID MiSessionPoolStart
Definition: init.c:32
#define MiAddressToPde(x)
Definition: mmx86.c:20
PKEVENT MiHighNonPagedPoolEvent
Definition: mminit.c:297
VOID FASTCALL KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:42
PMMPDE PageTables
Definition: miarm.h:486
ULONG PFN_COUNT
Definition: mmtypes.h:102
ASSERT((InvokeOnSuccess||InvokeOnError||InvokeOnCancel)?(CompletionRoutine!=NULL):TRUE)
PMMPTE NTAPI MiReserveSystemPtes(IN ULONG NumberOfPtes, IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType)
Definition: syspte.c:246
FORCEINLINE VOID InsertHeadList(_Inout_ PLIST_ENTRY ListHead, _Inout_ __drv_aliasesMem PLIST_ENTRY Entry)
Definition: rtlfuncs.h:201
return STATUS_SUCCESS
Definition: btrfs.c:2690
ULONG Signature
Definition: mm.h:402
#define VERIFIER_POOL_MASK
Definition: mm.h:103
NTSTATUS NTAPI MiInitializeAndChargePfn(OUT PPFN_NUMBER PageFrameIndex, IN PMMPDE PointerPde, IN PFN_NUMBER ContainingPageFrame, IN BOOLEAN SessionAllocation)
Definition: pfnlist.c:1093
NTSYSAPI VOID NTAPI RtlSetBit(_In_ PRTL_BITMAP BitMapHeader, _In_range_(<, BitMapHeader->SizeOfBitMap) ULONG BitNumber)
Definition: bitmap.c:304
PVOID MmNonPagedPoolExpansionStart
Definition: init.c:25
#define MM_READWRITE
Definition: miarm.h:51
#define MI_GET_NEXT_COLOR()
Definition: miarm.h:211
HARDWARE_PDE_ARMV6 TempPde
Definition: winldr.c:77
FORCEINLINE KIRQL MiAcquirePfnLock(VOID)
Definition: mm.h:875
PMM_SESSION_SPACE MmSessionSpace
Definition: session.c:21
PVOID MmPagedPoolEnd
Definition: init.c:26
VOID NTAPI MiInitializePfnForOtherProcess(IN PFN_NUMBER PageFrameIndex, IN PVOID PteAddress, IN PFN_NUMBER PteFrame)
Definition: pfnlist.c:1280
NTSTATUS NTAPI MiInitializeSessionPool(VOID)
Definition: pool.c:1276
#define InsertTailList(ListHead, Entry)
LONG NTAPI KeSetEvent(IN PKEVENT Event, IN KPRIORITY Increment, IN BOOLEAN Wait)
Definition: eventobj.c:159
union _MMPTE::@2171 u
VOID NTAPI INIT_FUNCTION MiInitializeNonPagedPool(VOID)
Definition: pool.c:276
_Must_inspect_result_ FORCEINLINE BOOLEAN IsListEmpty(_In_ const LIST_ENTRY *ListHead)
Definition: rtlfuncs.h:57
FORCEINLINE VOID MiReleasePfnLock(_In_ KIRQL OldIrql)
Definition: mm.h:882
USHORT PageLocation
Definition: mm.h:295
#define PAGED_CODE()
Definition: video.h:57
PSLIST_ENTRY WINAPI InterlockedPopEntrySList(PSLIST_HEADER ListHead)
Definition: interlocked.c:55
ULONG MmSpecialPoolTag
Definition: pool.c:27
MM_PAGED_POOL_INFO PagedPoolInfo
Definition: miarm.h:478
FORCEINLINE VOID MiReleasePfnLockFromDpcLevel(VOID)
Definition: mm.h:901
SLIST_HEADER MiNonPagedPoolSListHead
Definition: pool.c:30
PVOID MmNonPagedPoolEnd
Definition: mminit.c:99
uint32_t ULONG_PTR
Definition: typedefs.h:63
FORCEINLINE VOID MiAcquirePfnLockAtDpcLevel(VOID)
Definition: mm.h:890
FORCEINLINE BOOLEAN RemoveEntryList(_In_ PLIST_ENTRY Entry)
Definition: rtlfuncs.h:105
PFN_COUNT MmNumberOfFreeNonPagedPool
Definition: pool.c:21
BOOLEAN NTAPI MiRaisePoolQuota(IN POOL_TYPE PoolType, IN ULONG CurrentMaxQuota, OUT PULONG NewMaxQuota)
Definition: pool.c:1262
UCHAR KIRQL
Definition: env_spec_w32.h:591
MMPFNENTRY e1
Definition: mm.h:327
PVOID MmSessionBase
Definition: init.c:33
#define PDE_COUNT
Definition: miarm.h:32
GLenum GLclampf GLint i
Definition: glfuncs.h:14
#define MiAddressToPte(x)
Definition: mmx86.c:19
ULONG PFN_NUMBER
Definition: ke.h:8
#define TAG_MM
Definition: tag.h:136
NTSTATUS(* NTAPI)(IN PFILE_FULL_EA_INFORMATION EaBuffer, IN ULONG EaLength, OUT PULONG ErrorOffset)
Definition: IoEaTest.cpp:117
_In_ ULONG _In_ ULONG Offset
Definition: ntddpcm.h:101
PMMPTE LastPteForPagedPool
Definition: mm.h:415
#define FALSE
Definition: types.h:117
FORCEINLINE VOID MI_WRITE_VALID_PTE(IN PMMPTE PointerPte, IN MMPTE TempPte)
Definition: miarm.h:916
ULONG_PTR ShareCount
Definition: mm.h:320
#define InterlockedIncrementSizeT(a)
Definition: interlocked.h:220
VOID NTAPI MiInitializeSystemPtes(IN PMMPTE StartingPte, IN ULONG NumberOfPtes, IN MMSYSTEM_PTE_POOL_TYPE PoolType)
Definition: syspte.c:399
VOID NTAPI MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
Definition: pool.c:166
PKEVENT MiLowNonPagedPoolEvent
Definition: mminit.c:296
#define MI_SET_PROCESS2(x)
Definition: mm.h:252
PFN_NUMBER MiStartOfInitialPoolFrame
Definition: pool.c:23
LIST_ENTRY List
Definition: mm.h:400
_Must_inspect_result_ _In_ LPCGUID ULONG _In_ FSRTL_ALLOCATE_ECP_FLAGS _In_opt_ PFSRTL_EXTRA_CREATE_PARAMETER_CLEANUP_CALLBACK _In_ ULONG PoolTag
Definition: fltkernel.h:2520
POOL_DESCRIPTOR PagedPool
Definition: miarm.h:482
SIZE_T AllocatedPagedPool
Definition: mm.h:419
#define MI_MAX_FREE_PAGE_LISTS
Definition: mm.h:76
smooth NULL
Definition: ftsmooth.c:416
VOID NTAPI INIT_FUNCTION MiInitializeNonPagedPoolThresholds(VOID)
Definition: pool.c:184
PVOID FORCEINLINE MiPteToAddress(PMMPTE PointerPte)
Definition: mm.h:197
PVOID MmNonPagedPoolEnd0
Definition: pool.c:22
ULONG_PTR VerifierAllocation
Definition: mm.h:350
PFN_NUMBER MiHighPagedPoolThreshold
Definition: mminit.c:303
#define FORCEINLINE
Definition: ntbasedef.h:221
void DPRINT(...)
Definition: polytest.cpp:61
PFN_COUNT MiExpansionPoolPagesInitialCharge
Definition: pool.c:21
#define BASE_POOL_TYPE_MASK
Definition: ExPools.c:15
#define MI_SET_USAGE(x)
Definition: mm.h:251
SIZE_T PagedPoolCommit
Definition: mm.h:418
PFLT_MESSAGE_WAITER_QUEUE CONTAINING_RECORD(Csq, DEVICE_EXTENSION, IrpQueue)) -> WaiterQ.mLock) _IRQL_raises_(DISPATCH_LEVEL) VOID NTAPI FltpAcquireMessageWaiterLock(_In_ PIO_CSQ Csq, _Out_ PKIRQL Irql)
Definition: Messaging.c:560
VOID NTAPI KeFlushEntireTb(IN BOOLEAN Invalid, IN BOOLEAN AllProcessors)
Definition: cpu.c:413
UINTN Size
Definition: acefiex.h:555
ULONG MmConsumedPoolPercentage
Definition: pool.c:28
PMMPDE MmSystemPagePtes
Definition: init.c:41
ULONG NTAPI MiFreePoolPages(IN PVOID StartingVa)
Definition: pool.c:901
KGUARDED_MUTEX MmPagedPoolMutex
Definition: pool.c:24
PFN_COUNT Size
Definition: mm.h:401
NTSYSAPI void WINAPI RtlClearAllBits(PRTL_BITMAP)
struct _LIST_ENTRY * Flink
Definition: typedefs.h:119
unsigned char BOOLEAN
union _MMPFN::@1668 u4
_In_ HANDLE _Outptr_result_bytebuffer_ ViewSize PVOID * BaseAddress
Definition: mmfuncs.h:404
struct _MM_SESSION_SPACE * GlobalVirtualAddress
Definition: miarm.h:449
#define MM_FREE_POOL_SIGNATURE
Definition: mm.h:407
LONG NTSTATUS
Definition: precomp.h:26
LIST_ENTRY List
Definition: psmgr.c:57
SIZE_T CommittedPages
Definition: miarm.h:461
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:24
Definition: mm.h:398
INT POOL_TYPE
Definition: typedefs.h:76
ULONG64 Valid
Definition: mmtypes.h:150
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes)
Definition: pool.c:420
static const UCHAR Index[8]
Definition: usbohci.c:11
MM_PAGED_POOL_INFO MmPagedPoolInfo
Definition: pool.c:25
VOID NTAPI RtlClearBit(_In_ PRTL_BITMAP BitMapHeader, _In_ BITMAP_INDEX BitNumber)
Definition: bitmap.c:294
VOID FASTCALL KeReleaseQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber, IN KIRQL OldIrql)
Definition: spinlock.c:154
#define PTE_PER_PAGE
Definition: mm.h:19
HARDWARE_PTE_ARMV6 TempPte
Definition: winldr.c:75
FORCEINLINE USHORT ExQueryDepthSList(_In_ PSLIST_HEADER SListHead)
Definition: exfuncs.h:153
_Must_inspect_result_ NTSYSAPI BOOLEAN NTAPI RtlTestBit(_In_ PRTL_BITMAP BitMapHeader, _In_range_(<, BitMapHeader->SizeOfBitMap) ULONG BitNumber)
Definition: bitmap.c:434
PFN_NUMBER MiHighNonPagedPoolThreshold
Definition: mminit.c:305
MMPTE ValidKernelPte
Definition: init.c:31
#define PTE_COUNT
Definition: miarm.h:33
ULONG MiNonPagedPoolSListMaximum
Definition: pool.c:31
#define ExAllocatePoolWithTag(hernya, size, tag)
Definition: env_spec_w32.h:350
VOID NTAPI MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead, IN PLIST_ENTRY Entry, IN BOOLEAN Critical)
Definition: pool.c:148
struct _MMFREE_POOL_ENTRY * Owner
Definition: mm.h:403
#define BYTES_TO_PAGES(Size)
ULONG MiPagedPoolSListMaximum
Definition: pool.c:33
IN REFCLSID IN PUNKNOWN IN POOL_TYPE PoolType
Definition: unknown.h:68
PVOID PagedPoolEnd
Definition: miarm.h:463
KGUARDED_MUTEX PagedPoolMutex
Definition: miarm.h:477
_Requires_lock_held_ Interrupt _Releases_lock_ Interrupt _In_ _IRQL_restores_ KIRQL OldIrql
Definition: kefuncs.h:803
PFN_NUMBER MmMaximumNonPagedPoolInPages
Definition: mminit.c:30
IN SIZE_T NumberOfBytes
Definition: ndis.h:3915
struct _MMPFN::@1665::@1671 e2
Definition: mm.h:303
ULONG64 Prototype
Definition: mmtypes.h:89
#define PAGE_SIZE
Definition: env_spec_w32.h:49
PFN_NUMBER MmSystemPageDirectory[PD_COUNT]
Definition: init.c:40
Definition: typedefs.h:117
_In_ ULONG _In_ BOOLEAN _Must_inspect_result_ PVOID * VirtualAddress
Definition: ndis.h:3773
NTKERNELAPI PSLIST_ENTRY FASTCALL InterlockedPushEntrySList(IN PSLIST_HEADER ListHead, IN PSLIST_ENTRY ListEntry)
Definition: interlocked.c:82
#define _1MB
Definition: miarm.h:15
NTSYSAPI ULONG WINAPI RtlFindClearBitsAndSet(PRTL_BITMAP, ULONG, ULONG)
KIRQL FASTCALL KeAcquireQueuedSpinLock(IN KSPIN_LOCK_QUEUE_NUMBER LockNumber)
Definition: spinlock.c:108
FORCEINLINE PMMPFN MiGetPfnEntry(IN PFN_NUMBER Pfn)
Definition: mm.h:914
FORCEINLINE VOID MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links, OUT PVOID *PoolFlink, OUT PVOID *PoolBlink)
Definition: pool.c:103
MMPTE ValidKernelPde
Definition: init.c:30
Status
Definition: gdiplustypes.h:24
ULONG_PTR Long
Definition: mmtypes.h:215
PVOID MmPagedPoolStart
Definition: miarm.h:554
PVOID MmNonPagedPoolStart
Definition: init.c:24
PFN_NUMBER MiLowPagedPoolThreshold
Definition: mminit.c:302
ULONG MmSessionPoolSize
Definition: init.c:36
PFN_COUNT NTAPI MiDeleteSystemPageableVm(IN PMMPTE PointerPte, IN PFN_NUMBER PageCount, IN ULONG Flags, OUT PPFN_NUMBER ValidPages)
Definition: virtual.c:297
PFN_NUMBER NTAPI MiRemoveAnyPage(IN ULONG Color)
Definition: pfnlist.c:475
ULONG_PTR SIZE_T
Definition: typedefs.h:78
MMPTE_HARDWARE Hard
Definition: mmtypes.h:217
#define SYSTEM_PD_SIZE
Definition: miarm.h:36
#define MiPteToPde(_Pte)
Definition: mm.h:232
VOID NTAPI INIT_SECTION ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor, IN POOL_TYPE PoolType, IN ULONG PoolIndex, IN ULONG Threshold, IN PVOID PoolLock)
Definition: expool.c:964
FORCEINLINE VOID MI_WRITE_INVALID_PTE(IN PMMPTE PointerPte, IN MMPTE InvalidPte)
Definition: miarm.h:945
PFN_COUNT MmNumberOfPhysicalPages
Definition: init.c:48
BOOLEAN MmProtectFreedNonPagedPool
Definition: pool.c:29
VOID FASTCALL KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:31
#define MiPdeToPte(_Pde)
Definition: mm.h:231
struct _MMFREE_POOL_ENTRY * PMMFREE_POOL_ENTRY
union _MMPFN::@1664 u2
PRTL_BITMAP EndOfPagedPoolBitmap
Definition: mm.h:413
#define InitializeListHead(ListHead)
Definition: env_spec_w32.h:944
ULONG MmMaximumNonPagedPoolInBytes
Definition: init.c:22
PMMPTE PteAddress
Definition: mm.h:316
MMPTE_SOFTWARE Soft
Definition: mmtypes.h:219
unsigned int * PULONG
Definition: retypes.h:1
#define min(a, b)
Definition: monoChain.cc:55
VOID FASTCALL KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex)
Definition: gmutex.c:53
PKEVENT MiLowPagedPoolEvent
Definition: mminit.c:294
#define DPRINT1
Definition: precomp.h:8
NTSYSAPI void WINAPI RtlSetAllBits(PRTL_BITMAP)
SLIST_HEADER MiPagedPoolSListHead
Definition: pool.c:32
FORCEINLINE PFN_NUMBER MiGetPfnEntryIndex(IN PMMPFN Pfn1)
Definition: mm.h:934
#define OUT
Definition: typedefs.h:39
PVOID MiSessionPoolEnd
Definition: init.c:31
PKEVENT MiHighPagedPoolEvent
Definition: mminit.c:295
unsigned int ULONG
Definition: retypes.h:1
#define UNIMPLEMENTED
Definition: debug.h:114
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:261
#define ULONG_PTR
Definition: config.h:101
ULONG64 PageFrameNumber
Definition: mmtypes.h:171
VOID NTAPI MiProtectFreeNonPagedPool(IN PVOID VirtualAddress, IN ULONG PageCount)
Definition: pool.c:39
PFN_NUMBER MiLowNonPagedPoolThreshold
Definition: mminit.c:304
ULONG PagedPoolHint
Definition: mm.h:417
ULONG MmSizeOfNonPagedPoolInBytes
Definition: init.c:21
PFN_NUMBER MmSizeOfPagedPoolInPages
Definition: mminit.c:111
PMMPTE FirstPteForPagedPool
Definition: mm.h:414
VOID NTAPI INIT_FUNCTION MiInitializePoolEvents(VOID)
Definition: pool.c:201
LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS]
Definition: pool.c:20
VOID NTAPI KeClearEvent(IN PKEVENT Event)
Definition: eventobj.c:22
FORCEINLINE BOOLEAN MI_IS_PHYSICAL_ADDRESS(IN PVOID Address)
Definition: miarm.h:902
PMMPDE PagedPoolBasePde
Definition: miarm.h:464
#define PFN_FROM_PTE(v)
Definition: mm.h:88
PVOID NTAPI MmAllocateMappingAddress(IN SIZE_T NumberOfBytes, IN ULONG PoolTag)
Definition: pool.c:1385
base of all file and directory entries
Definition: entries.h:82
VOID NTAPI KeBugCheckEx(_In_ ULONG BugCheckCode, _In_ ULONG_PTR BugCheckParameter1, _In_ ULONG_PTR BugCheckParameter2, _In_ ULONG_PTR BugCheckParameter3, _In_ ULONG_PTR BugCheckParameter4)
Definition: rtlcompat.c:94
#define _1GB
Definition: miarm.h:16
VOID NTAPI MmFreeMappingAddress(IN PVOID BaseAddress, IN ULONG PoolTag)
Definition: pool.c:1397
PMMPDE NextPdeForPagedPoolExpansion
Definition: mm.h:416
#define INIT_FUNCTION
Definition: ntoskrnl.h:11
FORCEINLINE VOID MiProtectedPoolProtectLinks(IN PVOID PoolFlink, IN PVOID PoolBlink)
Definition: pool.c:138
WINBASEAPI VOID WINAPI InitializeSListHead(_Out_ PSLIST_HEADER ListHead)
Definition: rtlfuncs.h:3353
PFN_NUMBER SessionPageDirectoryIndex
Definition: miarm.h:459