ReactOS  0.4.15-dev-2985-g54406bf
virtual.c
Go to the documentation of this file.
1 /*
2  * PROJECT: ReactOS Kernel
3  * LICENSE: BSD - See COPYING.ARM in the top level directory
4  * FILE: ntoskrnl/mm/ARM3/virtual.c
5  * PURPOSE: ARM Memory Manager Virtual Memory Management
6  * PROGRAMMERS: ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 #define MI_MAPPED_COPY_PAGES 14
19 #define MI_POOL_COPY_BYTES 512
20 #define MI_MAX_TRANSFER_SIZE 64 * 1024
21 
25  IN OUT PSIZE_T NumberOfBytesToProtect,
26  IN ULONG NewAccessProtection,
27  OUT PULONG OldAccessProtection OPTIONAL);
28 
29 VOID
30 NTAPI
32  IN PMMPTE PointerPte,
33  IN ULONG ProtectionMask,
34  IN PMMPFN Pfn1,
35  IN BOOLEAN CaptureDirtyBit);
36 
37 
38 /* PRIVATE FUNCTIONS **********************************************************/
39 
40 ULONG
41 NTAPI
43  IN ULONG_PTR EndingAddress,
44  IN PMMVAD Vad,
46 {
47  PMMPTE PointerPte, LastPte;
48  PMMPDE PointerPde;
49  BOOLEAN OnPdeBoundary = TRUE;
50 #if _MI_PAGING_LEVELS >= 3
51  PMMPPE PointerPpe;
52  BOOLEAN OnPpeBoundary = TRUE;
53 #if _MI_PAGING_LEVELS == 4
54  PMMPXE PointerPxe;
55  BOOLEAN OnPxeBoundary = TRUE;
56 #endif
57 #endif
58 
59  /* Make sure this all makes sense */
60  ASSERT(PsGetCurrentThread()->OwnsProcessWorkingSetExclusive || PsGetCurrentThread()->OwnsProcessWorkingSetShared);
61  ASSERT(EndingAddress >= StartingAddress);
62  PointerPte = MiAddressToPte(StartingAddress);
63  LastPte = MiAddressToPte(EndingAddress);
64 
65  /*
66  * In case this is a committed VAD, assume the whole range is committed
67  * and count the individually decommitted pages.
68  * In case it is not, assume the range is not committed and count the individually committed pages.
69  */
70  ULONG_PTR CommittedPages = Vad->u.VadFlags.MemCommit ? BYTES_TO_PAGES(EndingAddress - StartingAddress) : 0;
71 
72  while (PointerPte <= LastPte)
73  {
74 #if _MI_PAGING_LEVELS == 4
75  /* Check if PXE was ever paged in. */
76  if (OnPxeBoundary)
77  {
78  PointerPxe = MiPteToPxe(PointerPte);
79 
80  /* Check that this loop is sane */
81  ASSERT(OnPpeBoundary);
82  ASSERT(OnPdeBoundary);
83 
84  if (PointerPxe->u.Long == 0)
85  {
86  PointerPxe++;
87  PointerPte = MiPxeToPte(PointerPde);
88  continue;
89  }
90 
91  if (PointerPxe->u.Hard.Valid == 0)
93  }
94  ASSERT(PointerPxe->u.Hard.Valid == 1);
95 #endif
96 
97 #if _MI_PAGING_LEVELS >= 3
98  /* Now PPE */
99  if (OnPpeBoundary)
100  {
101  PointerPpe = MiPteToPpe(PointerPte);
102 
103  /* Sanity again */
104  ASSERT(OnPdeBoundary);
105 
106  if (PointerPpe->u.Long == 0)
107  {
108  PointerPpe++;
109  PointerPte = MiPpeToPte(PointerPpe);
110 #if _MI_PAGING_LEVELS == 4
111  OnPxeBoundary = MiIsPteOnPxeBoundary(PointerPte);
112 #endif
113  continue;
114  }
115 
116  if (PointerPpe->u.Hard.Valid == 0)
118  }
119  ASSERT(PointerPpe->u.Hard.Valid == 1);
120 #endif
121 
122  /* Last level is the PDE */
123  if (OnPdeBoundary)
124  {
125  PointerPde = MiPteToPde(PointerPte);
126  if (PointerPde->u.Long == 0)
127  {
128  PointerPde++;
129  PointerPte = MiPdeToPte(PointerPde);
130 #if _MI_PAGING_LEVELS >= 3
131  OnPpeBoundary = MiIsPteOnPpeBoundary(PointerPte);
132 #if _MI_PAGING_LEVELS == 4
133  OnPxeBoundary = MiIsPteOnPxeBoundary(PointerPte);
134 #endif
135 #endif
136  continue;
137  }
138 
139  if (PointerPde->u.Hard.Valid == 0)
140  MiMakeSystemAddressValid(PointerPte, Process);
141  }
142  ASSERT(PointerPde->u.Hard.Valid == 1);
143 
144  /* Is this PTE demand zero? */
145  if (PointerPte->u.Long != 0)
146  {
147  /* It isn't -- is it a decommited, invalid, or faulted PTE? */
148  if ((PointerPte->u.Hard.Valid == 0) &&
149  (PointerPte->u.Soft.Protection == MM_DECOMMIT) &&
150  ((PointerPte->u.Soft.Prototype == 0) ||
151  (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
152  {
153  /* It is, so remove it from the count of committed pages if we have to */
154  if (Vad->u.VadFlags.MemCommit)
155  CommittedPages--;
156  }
157  else if (!Vad->u.VadFlags.MemCommit)
158  {
159  /* It is a valid, non-decommited, non-paged out PTE. Count it in. */
160  CommittedPages++;
161  }
162  }
163 
164  /* Move to the next PTE */
165  PointerPte++;
166  /* Manage page tables */
167  OnPdeBoundary = MiIsPteOnPdeBoundary(PointerPte);
168 #if _MI_PAGING_LEVELS >= 3
169  OnPpeBoundary = MiIsPteOnPpeBoundary(PointerPte);
170 #if _MI_PAGING_LEVELS == 4
171  OnPxeBoundary = MiIsPteOnPxeBoundary(PointerPte);
172 #endif
173 #endif
174  }
175 
176  /* Make sure we didn't mess this up */
177  ASSERT(CommittedPages <= BYTES_TO_PAGES(EndingAddress - StartingAddress));
178  return CommittedPages;
179 }
180 
181 ULONG
182 NTAPI
183 MiMakeSystemAddressValid(IN PVOID PageTableVirtualAddress,
185 {
187  BOOLEAN WsShared = FALSE, WsSafe = FALSE, LockChange = FALSE;
188  PETHREAD CurrentThread = PsGetCurrentThread();
189 
190  /* Must be a non-pool page table, since those are double-mapped already */
191  ASSERT(PageTableVirtualAddress > MM_HIGHEST_USER_ADDRESS);
192  ASSERT((PageTableVirtualAddress < MmPagedPoolStart) ||
193  (PageTableVirtualAddress > MmPagedPoolEnd));
194 
195  /* Working set lock or PFN lock should be held */
197 
198  /* Check if the page table is valid */
199  while (!MmIsAddressValid(PageTableVirtualAddress))
200  {
201  /* Release the working set lock */
203  CurrentThread,
204  &WsSafe,
205  &WsShared);
206 
207  /* Fault it in */
208  Status = MmAccessFault(FALSE, PageTableVirtualAddress, KernelMode, NULL);
209  if (!NT_SUCCESS(Status))
210  {
211  /* This should not fail */
212  KeBugCheckEx(KERNEL_DATA_INPAGE_ERROR,
213  1,
214  Status,
216  (ULONG_PTR)PageTableVirtualAddress);
217  }
218 
219  /* Lock the working set again */
221  CurrentThread,
222  WsSafe,
223  WsShared);
224 
225  /* This flag will be useful later when we do better locking */
226  LockChange = TRUE;
227  }
228 
229  /* Let caller know what the lock state is */
230  return LockChange;
231 }
232 
233 ULONG
234 NTAPI
236  IN KIRQL OldIrql)
237 {
239  BOOLEAN LockChange = FALSE;
240 
241  /* Must be e kernel address */
243 
244  /* Check if the page is valid */
246  {
247  /* Release the PFN database */
248  MiReleasePfnLock(OldIrql);
249 
250  /* Fault it in */
252  if (!NT_SUCCESS(Status))
253  {
254  /* This should not fail */
255  KeBugCheckEx(KERNEL_DATA_INPAGE_ERROR,
256  3,
257  Status,
258  0,
260  }
261 
262  /* This flag will be useful later when we do better locking */
263  LockChange = TRUE;
264 
265  /* Lock the PFN database */
266  OldIrql = MiAcquirePfnLock();
267  }
268 
269  /* Let caller know what the lock state is */
270  return LockChange;
271 }
272 
273 PFN_COUNT
274 NTAPI
276  IN PFN_NUMBER PageCount,
277  IN ULONG Flags,
278  OUT PPFN_NUMBER ValidPages)
279 {
280  PFN_COUNT ActualPages = 0;
281  PETHREAD CurrentThread = PsGetCurrentThread();
282  PMMPFN Pfn1, Pfn2;
283  PFN_NUMBER PageFrameIndex, PageTableIndex;
284  KIRQL OldIrql;
286 
287  /* Lock the system working set */
288  MiLockWorkingSet(CurrentThread, &MmSystemCacheWs);
289 
290  /* Loop all pages */
291  while (PageCount)
292  {
293  /* Make sure there's some data about the page */
294  if (PointerPte->u.Long)
295  {
296  /* Normally this is one possibility -- freeing a valid page */
297  if (PointerPte->u.Hard.Valid)
298  {
299  /* Get the page PFN */
300  PageFrameIndex = PFN_FROM_PTE(PointerPte);
301  Pfn1 = MiGetPfnEntry(PageFrameIndex);
302 
303  /* Should not have any working set data yet */
304  ASSERT(Pfn1->u1.WsIndex == 0);
305 
306  /* Actual valid, legitimate, pages */
307  if (ValidPages) (*ValidPages)++;
308 
309  /* Get the page table entry */
310  PageTableIndex = Pfn1->u4.PteFrame;
311  Pfn2 = MiGetPfnEntry(PageTableIndex);
312 
313  /* Lock the PFN database */
314  OldIrql = MiAcquirePfnLock();
315 
316  /* Delete it the page */
317  MI_SET_PFN_DELETED(Pfn1);
318  MiDecrementShareCount(Pfn1, PageFrameIndex);
319 
320  /* Decrement the page table too */
321  MiDecrementShareCount(Pfn2, PageTableIndex);
322 
323  /* Release the PFN database */
324  MiReleasePfnLock(OldIrql);
325 
326  /* Destroy the PTE */
327  MI_ERASE_PTE(PointerPte);
328  }
329  else
330  {
331  /* As always, only handle current ARM3 scenarios */
332  ASSERT(PointerPte->u.Soft.Prototype == 0);
333  ASSERT(PointerPte->u.Soft.Transition == 0);
334 
335  /*
336  * The only other ARM3 possibility is a demand zero page, which would
337  * mean freeing some of the paged pool pages that haven't even been
338  * touched yet, as part of a larger allocation.
339  *
340  * Right now, we shouldn't expect any page file information in the PTE
341  */
342  ASSERT(PointerPte->u.Soft.PageFileHigh == 0);
343 
344  /* Destroy the PTE */
345  MI_ERASE_PTE(PointerPte);
346  }
347 
348  /* Actual legitimate pages */
349  ActualPages++;
350  }
351 
352  /* Keep going */
353  PointerPte++;
354  PageCount--;
355  }
356 
357  /* Release the working set */
358  MiUnlockWorkingSet(CurrentThread, &MmSystemCacheWs);
359 
360  /* Flush the entire TLB */
362 
363  /* Done */
364  return ActualPages;
365 }
366 
367 VOID
368 NTAPI
369 MiDeletePte(IN PMMPTE PointerPte,
373 {
374  PMMPFN Pfn1;
375  MMPTE TempPte;
376  PFN_NUMBER PageFrameIndex;
377  PMMPDE PointerPde;
378 
379  /* PFN lock must be held */
381 
382  /* WorkingSet must be exclusively locked */
384 
385  /* This must be current process. */
387 
388  /* Capture the PTE */
389  TempPte = *PointerPte;
390 
391  /* See if the PTE is valid */
392  if (TempPte.u.Hard.Valid == 0)
393  {
394  /* Prototype and paged out PTEs not supported yet */
395  ASSERT(TempPte.u.Soft.Prototype == 0);
396  ASSERT((TempPte.u.Soft.PageFileHigh == 0) || (TempPte.u.Soft.Transition == 1));
397 
398  if (TempPte.u.Soft.Transition)
399  {
400  /* Get the PFN entry */
401  PageFrameIndex = PFN_FROM_PTE(&TempPte);
402  Pfn1 = MiGetPfnEntry(PageFrameIndex);
403 
404  DPRINT("Pte %p is transitional!\n", PointerPte);
405 
406  /* Make sure the saved PTE address is valid */
407  ASSERT((PMMPTE)((ULONG_PTR)Pfn1->PteAddress & ~0x1) == PointerPte);
408 
409  /* Destroy the PTE */
410  MI_ERASE_PTE(PointerPte);
411 
412  /* Drop the reference on the page table. */
414 
415  /* In case of shared page, the prototype PTE must be in transition, not the process one */
416  ASSERT(Pfn1->u3.e1.PrototypePte == 0);
417 
418  /* Delete the PFN */
419  MI_SET_PFN_DELETED(Pfn1);
420 
421  /* It must be either free (refcount == 0) or being written (refcount == 1) */
422  ASSERT(Pfn1->u3.e2.ReferenceCount == Pfn1->u3.e1.WriteInProgress);
423 
424  /* See if we must free it ourselves, or if it will be freed once I/O is over */
425  if (Pfn1->u3.e2.ReferenceCount == 0)
426  {
427  /* And it should be in standby or modified list */
429 
430  /* Unlink it and set its reference count to one */
431  MiUnlinkPageFromList(Pfn1);
432  Pfn1->u3.e2.ReferenceCount++;
433 
434  /* This will put it back in free list and clean properly up */
435  MiDecrementReferenceCount(Pfn1, PageFrameIndex);
436  }
437  return;
438  }
439  }
440 
441  /* Get the PFN entry */
442  PageFrameIndex = PFN_FROM_PTE(&TempPte);
443  Pfn1 = MiGetPfnEntry(PageFrameIndex);
444 
445  /* Check if this is a valid, prototype PTE */
446  if (Pfn1->u3.e1.PrototypePte == 1)
447  {
448  /* Get the PDE and make sure it's faulted in */
449  PointerPde = MiPteToPde(PointerPte);
450  if (PointerPde->u.Hard.Valid == 0)
451  {
452 #if (_MI_PAGING_LEVELS == 2)
453  /* Could be paged pool access from a new process -- synchronize the page directories */
455  {
456 #endif
457  /* The PDE must be valid at this point */
458  KeBugCheckEx(MEMORY_MANAGEMENT,
459  0x61940,
460  (ULONG_PTR)PointerPte,
461  PointerPte->u.Long,
463  }
464 #if (_MI_PAGING_LEVELS == 2)
465  }
466 #endif
467  /* Drop the share count on the page table */
468  PointerPde = MiPteToPde(PointerPte);
470  PointerPde->u.Hard.PageFrameNumber);
471 
472  /* Drop the share count */
473  MiDecrementShareCount(Pfn1, PageFrameIndex);
474 
475  /* Either a fork, or this is the shared user data page */
476  if ((PointerPte <= MiHighestUserPte) && (PrototypePte != Pfn1->PteAddress))
477  {
478  /* If it's not the shared user page, then crash, since there's no fork() yet */
481  {
482  /* Must be some sort of memory corruption */
483  KeBugCheckEx(MEMORY_MANAGEMENT,
484  0x400,
485  (ULONG_PTR)PointerPte,
487  (ULONG_PTR)Pfn1->PteAddress);
488  }
489  }
490 
491  /* Erase it */
492  MI_ERASE_PTE(PointerPte);
493  }
494  else
495  {
496  /* Make sure the saved PTE address is valid */
497  if ((PMMPTE)((ULONG_PTR)Pfn1->PteAddress & ~0x1) != PointerPte)
498  {
499  /* The PFN entry is illegal, or invalid */
500  KeBugCheckEx(MEMORY_MANAGEMENT,
501  0x401,
502  (ULONG_PTR)PointerPte,
503  PointerPte->u.Long,
504  (ULONG_PTR)Pfn1->PteAddress);
505  }
506 
507  /* Erase the PTE */
508  MI_ERASE_PTE(PointerPte);
509 
510  /* There should only be 1 shared reference count */
511  ASSERT(Pfn1->u2.ShareCount == 1);
512 
513  /* Drop the reference on the page table. */
515 
516  /* Mark the PFN for deletion and dereference what should be the last ref */
517  MI_SET_PFN_DELETED(Pfn1);
518  MiDecrementShareCount(Pfn1, PageFrameIndex);
519 
520  /* We should eventually do this */
521  //CurrentProcess->NumberOfPrivatePages--;
522  }
523 
524  /* Flush the TLB */
526 }
527 
528 VOID
529 NTAPI
531  IN ULONG_PTR EndingAddress,
532  IN PMMVAD Vad)
533 {
534  PMMPTE PointerPte, PrototypePte, LastPrototypePte;
535  PMMPDE PointerPde;
536 #if (_MI_PAGING_LEVELS >= 3)
537  PMMPPE PointerPpe;
538 #endif
539 #if (_MI_PAGING_LEVELS >= 4)
540  PMMPPE PointerPxe;
541 #endif
542  MMPTE TempPte;
544  KIRQL OldIrql;
545  BOOLEAN AddressGap = FALSE;
546  PSUBSECTION Subsection;
547 
548  /* Get out if this is a fake VAD, RosMm will free the marea pages */
549  if ((Vad) && (Vad->u.VadFlags.Spare == 1)) return;
550 
551  /* Get the current process */
553 
554  /* Check if this is a section VAD or a VM VAD */
555  if (!(Vad) || (Vad->u.VadFlags.PrivateMemory) || !(Vad->FirstPrototypePte))
556  {
557  /* Don't worry about prototypes */
558  PrototypePte = LastPrototypePte = NULL;
559  }
560  else
561  {
562  /* Get the prototype PTE */
563  PrototypePte = Vad->FirstPrototypePte;
564  LastPrototypePte = Vad->FirstPrototypePte + 1;
565  }
566 
567  /* In all cases, we don't support fork() yet */
568  ASSERT(CurrentProcess->CloneRoot == NULL);
569 
570  /* Loop the PTE for each VA (EndingAddress is inclusive!) */
571  while (Va <= EndingAddress)
572  {
573 #if (_MI_PAGING_LEVELS >= 4)
574  /* Get the PXE and check if it's valid */
575  PointerPxe = MiAddressToPxe((PVOID)Va);
576  if (!PointerPxe->u.Hard.Valid)
577  {
578  /* Check for unmapped range and skip it */
579  if (!PointerPxe->u.Long)
580  {
581  /* There are gaps in the address space */
582  AddressGap = TRUE;
583 
584  /* Update Va and continue looping */
585  Va = (ULONG_PTR)MiPxeToAddress(PointerPxe + 1);
586  continue;
587  }
588 
589  /* Make the PXE valid */
591  }
592 #endif
593 #if (_MI_PAGING_LEVELS >= 3)
594  /* Get the PPE and check if it's valid */
595  PointerPpe = MiAddressToPpe((PVOID)Va);
596  if (!PointerPpe->u.Hard.Valid)
597  {
598  /* Check for unmapped range and skip it */
599  if (!PointerPpe->u.Long)
600  {
601  /* There are gaps in the address space */
602  AddressGap = TRUE;
603 
604  /* Update Va and continue looping */
605  Va = (ULONG_PTR)MiPpeToAddress(PointerPpe + 1);
606  continue;
607  }
608 
609  /* Make the PPE valid */
611  }
612 #endif
613  /* Skip invalid PDEs */
614  PointerPde = MiAddressToPde((PVOID)Va);
615  if (!PointerPde->u.Long)
616  {
617  /* There are gaps in the address space */
618  AddressGap = TRUE;
619 
620  /* Check if all the PDEs are invalid, so there's nothing to free */
621  Va = (ULONG_PTR)MiPdeToAddress(PointerPde + 1);
622  continue;
623  }
624 
625  /* Now check if the PDE is mapped in */
626  if (!PointerPde->u.Hard.Valid)
627  {
628  /* It isn't, so map it in */
629  PointerPte = MiPteToAddress(PointerPde);
631  }
632 
633  /* Now we should have a valid PDE, mapped in, and still have some VA */
634  ASSERT(PointerPde->u.Hard.Valid == 1);
635  ASSERT(Va <= EndingAddress);
636 
637  /* Check if this is a section VAD with gaps in it */
638  if ((AddressGap) && (LastPrototypePte))
639  {
640  /* We need to skip to the next correct prototype PTE */
642 
643  /* And we need the subsection to skip to the next last prototype PTE */
644  Subsection = MiLocateSubsection(Vad, Va >> PAGE_SHIFT);
645  if (Subsection)
646  {
647  /* Found it! */
648  LastPrototypePte = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
649  }
650  else
651  {
652  /* No more subsections, we are done with prototype PTEs */
653  PrototypePte = NULL;
654  }
655  }
656 
657  /* Lock the PFN Database while we delete the PTEs */
658  OldIrql = MiAcquirePfnLock();
659  PointerPte = MiAddressToPte(Va);
660  do
661  {
662  /* Making sure the PDE is still valid */
663  ASSERT(PointerPde->u.Hard.Valid == 1);
664 
665  /* Capture the PDE and make sure it exists */
666  TempPte = *PointerPte;
667  if (TempPte.u.Long)
668  {
669  /* Check if the PTE is actually mapped in */
671  {
672  /* Are we dealing with section VAD? */
673  if ((LastPrototypePte) && (PrototypePte > LastPrototypePte))
674  {
675  /* We need to skip to the next correct prototype PTE */
677 
678  /* And we need the subsection to skip to the next last prototype PTE */
679  Subsection = MiLocateSubsection(Vad, Va >> PAGE_SHIFT);
680  if (Subsection)
681  {
682  /* Found it! */
683  LastPrototypePte = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
684  }
685  else
686  {
687  /* No more subsections, we are done with prototype PTEs */
688  PrototypePte = NULL;
689  }
690  }
691 
692  /* Check for prototype PTE */
693  if ((TempPte.u.Hard.Valid == 0) &&
694  (TempPte.u.Soft.Prototype == 1))
695  {
696  /* Just nuke it */
697  MI_ERASE_PTE(PointerPte);
698  }
699  else
700  {
701  /* Delete the PTE proper */
702  MiDeletePte(PointerPte,
703  (PVOID)Va,
705  PrototypePte);
706  }
707  }
708  else
709  {
710  /* The PTE was never mapped, just nuke it here */
711  MI_ERASE_PTE(PointerPte);
712  }
713 
715  {
716  ASSERT(PointerPde->u.Long != 0);
717  /* Delete the PDE proper */
718  MiDeletePde(PointerPde, CurrentProcess);
719  /* Jump */
720  Va = (ULONG_PTR)MiPdeToAddress(PointerPde + 1);
721  break;
722  }
723  }
724 
725  /* Update the address and PTE for it */
726  Va += PAGE_SIZE;
727  PointerPte++;
728  PrototypePte++;
729  } while ((Va & (PDE_MAPPED_VA - 1)) && (Va <= EndingAddress));
730 
731  /* Release the lock */
732  MiReleasePfnLock(OldIrql);
733 
734  if (Va > EndingAddress) return;
735 
736  /* Otherwise, we exited because we hit a new PDE boundary, so start over */
737  AddressGap = FALSE;
738  }
739 }
740 
741 LONG
743  OUT PBOOLEAN HaveBadAddress,
744  OUT PULONG_PTR BadAddress)
745 {
746  PEXCEPTION_RECORD ExceptionRecord;
747  PAGED_CODE();
748 
749  //
750  // Assume default
751  //
752  *HaveBadAddress = FALSE;
753 
754  //
755  // Get the exception record
756  //
757  ExceptionRecord = ExceptionInfo->ExceptionRecord;
758 
759  //
760  // Look at the exception code
761  //
762  if ((ExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION) ||
763  (ExceptionRecord->ExceptionCode == STATUS_GUARD_PAGE_VIOLATION) ||
764  (ExceptionRecord->ExceptionCode == STATUS_IN_PAGE_ERROR))
765  {
766  //
767  // We can tell the address if we have more than one parameter
768  //
769  if (ExceptionRecord->NumberParameters > 1)
770  {
771  //
772  // Return the address
773  //
774  *HaveBadAddress = TRUE;
775  *BadAddress = ExceptionRecord->ExceptionInformation[1];
776  }
777  }
778 
779  //
780  // Continue executing the next handler
781  //
783 }
784 
785 NTSTATUS
786 NTAPI
789  IN PEPROCESS TargetProcess,
793  OUT PSIZE_T ReturnSize)
794 {
795  PFN_NUMBER MdlBuffer[(sizeof(MDL) / sizeof(PFN_NUMBER)) + MI_MAPPED_COPY_PAGES + 1];
796  PMDL Mdl = (PMDL)MdlBuffer;
797  SIZE_T TotalSize, CurrentSize, RemainingSize;
798  volatile BOOLEAN FailedInProbe = FALSE;
799  volatile BOOLEAN PagesLocked = FALSE;
800  PVOID CurrentAddress = SourceAddress, CurrentTargetAddress = TargetAddress;
801  volatile PVOID MdlAddress = NULL;
803  BOOLEAN HaveBadAddress;
804  ULONG_PTR BadAddress;
806  PAGED_CODE();
807 
808  //
809  // Calculate the maximum amount of data to move
810  //
811  TotalSize = MI_MAPPED_COPY_PAGES * PAGE_SIZE;
812  if (BufferSize <= TotalSize) TotalSize = BufferSize;
813  CurrentSize = TotalSize;
814  RemainingSize = BufferSize;
815 
816  //
817  // Loop as long as there is still data
818  //
819  while (RemainingSize > 0)
820  {
821  //
822  // Check if this transfer will finish everything off
823  //
824  if (RemainingSize < CurrentSize) CurrentSize = RemainingSize;
825 
826  //
827  // Attach to the source address space
828  //
829  KeStackAttachProcess(&SourceProcess->Pcb, &ApcState);
830 
831  //
832  // Check state for this pass
833  //
834  ASSERT(MdlAddress == NULL);
835  ASSERT(PagesLocked == FALSE);
836  ASSERT(FailedInProbe == FALSE);
837 
838  //
839  // Protect user-mode copy
840  //
841  _SEH2_TRY
842  {
843  //
844  // If this is our first time, probe the buffer
845  //
846  if ((CurrentAddress == SourceAddress) && (PreviousMode != KernelMode))
847  {
848  //
849  // Catch a failure here
850  //
851  FailedInProbe = TRUE;
852 
853  //
854  // Do the probe
855  //
857 
858  //
859  // Passed
860  //
861  FailedInProbe = FALSE;
862  }
863 
864  //
865  // Initialize and probe and lock the MDL
866  //
867  MmInitializeMdl(Mdl, CurrentAddress, CurrentSize);
869  PagesLocked = TRUE;
870  }
872  {
874  }
875  _SEH2_END
876 
877  /* Detach from source process */
879 
880  if (Status != STATUS_SUCCESS)
881  {
882  goto Exit;
883  }
884 
885  //
886  // Now map the pages
887  //
888  MdlAddress = MmMapLockedPagesSpecifyCache(Mdl,
889  KernelMode,
890  MmCached,
891  NULL,
892  FALSE,
894  if (!MdlAddress)
895  {
897  goto Exit;
898  }
899 
900  //
901  // Grab to the target process
902  //
903  KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
904 
905  _SEH2_TRY
906  {
907  //
908  // Check if this is our first time through
909  //
910  if ((CurrentTargetAddress == TargetAddress) && (PreviousMode != KernelMode))
911  {
912  //
913  // Catch a failure here
914  //
915  FailedInProbe = TRUE;
916 
917  //
918  // Do the probe
919  //
921 
922  //
923  // Passed
924  //
925  FailedInProbe = FALSE;
926  }
927 
928  //
929  // Now do the actual move
930  //
931  RtlCopyMemory(CurrentTargetAddress, MdlAddress, CurrentSize);
932  }
934  &HaveBadAddress,
935  &BadAddress))
936  {
937  *ReturnSize = BufferSize - RemainingSize;
938  //
939  // Check if we failed during the probe
940  //
941  if (FailedInProbe)
942  {
943  //
944  // Exit
945  //
947  }
948  else
949  {
950  //
951  // Othewise we failed during the move.
952  // Check if we know exactly where we stopped copying
953  //
954  if (HaveBadAddress)
955  {
956  //
957  // Return the exact number of bytes copied
958  //
959  *ReturnSize = BadAddress - (ULONG_PTR)SourceAddress;
960  }
961  //
962  // Return partial copy
963  //
965  }
966  }
967  _SEH2_END;
968 
969  /* Detach from target process */
971 
972  //
973  // Check for SEH status
974  //
975  if (Status != STATUS_SUCCESS)
976  {
977  goto Exit;
978  }
979 
980  //
981  // Unmap and unlock
982  //
983  MmUnmapLockedPages(MdlAddress, Mdl);
984  MdlAddress = NULL;
986  PagesLocked = FALSE;
987 
988  //
989  // Update location and size
990  //
991  RemainingSize -= CurrentSize;
992  CurrentAddress = (PVOID)((ULONG_PTR)CurrentAddress + CurrentSize);
993  CurrentTargetAddress = (PVOID)((ULONG_PTR)CurrentTargetAddress + CurrentSize);
994  }
995 
996 Exit:
997  if (MdlAddress != NULL)
998  MmUnmapLockedPages(MdlAddress, Mdl);
999  if (PagesLocked)
1000  MmUnlockPages(Mdl);
1001 
1002  //
1003  // All bytes read
1004  //
1005  if (Status == STATUS_SUCCESS)
1006  *ReturnSize = BufferSize;
1007  return Status;
1008 }
1009 
1010 NTSTATUS
1011 NTAPI
1012 MiDoPoolCopy(IN PEPROCESS SourceProcess,
1014  IN PEPROCESS TargetProcess,
1018  OUT PSIZE_T ReturnSize)
1019 {
1020  UCHAR StackBuffer[MI_POOL_COPY_BYTES];
1021  SIZE_T TotalSize, CurrentSize, RemainingSize;
1022  volatile BOOLEAN FailedInProbe = FALSE, HavePoolAddress = FALSE;
1023  PVOID CurrentAddress = SourceAddress, CurrentTargetAddress = TargetAddress;
1024  PVOID PoolAddress;
1026  BOOLEAN HaveBadAddress;
1027  ULONG_PTR BadAddress;
1029  PAGED_CODE();
1030 
1031  DPRINT("Copying %Iu bytes from process %p (address %p) to process %p (Address %p)\n",
1032  BufferSize, SourceProcess, SourceAddress, TargetProcess, TargetAddress);
1033 
1034  //
1035  // Calculate the maximum amount of data to move
1036  //
1037  TotalSize = MI_MAX_TRANSFER_SIZE;
1038  if (BufferSize <= MI_MAX_TRANSFER_SIZE) TotalSize = BufferSize;
1039  CurrentSize = TotalSize;
1040  RemainingSize = BufferSize;
1041 
1042  //
1043  // Check if we can use the stack
1044  //
1046  {
1047  //
1048  // Use it
1049  //
1050  PoolAddress = (PVOID)StackBuffer;
1051  }
1052  else
1053  {
1054  //
1055  // Allocate pool
1056  //
1057  PoolAddress = ExAllocatePoolWithTag(NonPagedPool, TotalSize, 'VmRw');
1058  if (!PoolAddress) ASSERT(FALSE);
1059  HavePoolAddress = TRUE;
1060  }
1061 
1062  //
1063  // Loop as long as there is still data
1064  //
1065  while (RemainingSize > 0)
1066  {
1067  //
1068  // Check if this transfer will finish everything off
1069  //
1070  if (RemainingSize < CurrentSize) CurrentSize = RemainingSize;
1071 
1072  //
1073  // Attach to the source address space
1074  //
1075  KeStackAttachProcess(&SourceProcess->Pcb, &ApcState);
1076 
1077  /* Check that state is sane */
1078  ASSERT(FailedInProbe == FALSE);
1080 
1081  //
1082  // Protect user-mode copy
1083  //
1084  _SEH2_TRY
1085  {
1086  //
1087  // If this is our first time, probe the buffer
1088  //
1089  if ((CurrentAddress == SourceAddress) && (PreviousMode != KernelMode))
1090  {
1091  //
1092  // Catch a failure here
1093  //
1094  FailedInProbe = TRUE;
1095 
1096  //
1097  // Do the probe
1098  //
1100 
1101  //
1102  // Passed
1103  //
1104  FailedInProbe = FALSE;
1105  }
1106 
1107  //
1108  // Do the copy
1109  //
1110  RtlCopyMemory(PoolAddress, CurrentAddress, CurrentSize);
1111  }
1113  &HaveBadAddress,
1114  &BadAddress))
1115  {
1116  *ReturnSize = BufferSize - RemainingSize;
1117 
1118  //
1119  // Check if we failed during the probe
1120  //
1121  if (FailedInProbe)
1122  {
1123  //
1124  // Exit
1125  //
1127  }
1128  else
1129  {
1130  //
1131  // We failed during the move.
1132  // Check if we know exactly where we stopped copying
1133  //
1134  if (HaveBadAddress)
1135  {
1136  //
1137  // Return the exact number of bytes copied
1138  //
1139  *ReturnSize = BadAddress - (ULONG_PTR)SourceAddress;
1140  }
1141  //
1142  // Return partial copy
1143  //
1145  }
1146  }
1147  _SEH2_END
1148 
1149  /* Let go of the source */
1151 
1152  if (Status != STATUS_SUCCESS)
1153  {
1154  goto Exit;
1155  }
1156 
1157  /* Grab the target process */
1158  KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
1159 
1160  _SEH2_TRY
1161  {
1162  //
1163  // Check if this is our first time through
1164  //
1165  if ((CurrentTargetAddress == TargetAddress) && (PreviousMode != KernelMode))
1166  {
1167  //
1168  // Catch a failure here
1169  //
1170  FailedInProbe = TRUE;
1171 
1172  //
1173  // Do the probe
1174  //
1176 
1177  //
1178  // Passed
1179  //
1180  FailedInProbe = FALSE;
1181  }
1182 
1183  //
1184  // Now do the actual move
1185  //
1186  RtlCopyMemory(CurrentTargetAddress, PoolAddress, CurrentSize);
1187  }
1189  &HaveBadAddress,
1190  &BadAddress))
1191  {
1192  *ReturnSize = BufferSize - RemainingSize;
1193  //
1194  // Check if we failed during the probe
1195  //
1196  if (FailedInProbe)
1197  {
1198  //
1199  // Exit
1200  //
1202  }
1203  else
1204  {
1205  //
1206  // Otherwise we failed during the move.
1207  // Check if we know exactly where we stopped copying
1208  //
1209  if (HaveBadAddress)
1210  {
1211  //
1212  // Return the exact number of bytes copied
1213  //
1214  *ReturnSize = BadAddress - (ULONG_PTR)SourceAddress;
1215  }
1216  //
1217  // Return partial copy
1218  //
1220  }
1221  }
1222  _SEH2_END;
1223 
1224  //
1225  // Detach from target
1226  //
1228 
1229  //
1230  // Check for SEH status
1231  //
1232  if (Status != STATUS_SUCCESS)
1233  {
1234  goto Exit;
1235  }
1236 
1237  //
1238  // Update location and size
1239  //
1240  RemainingSize -= CurrentSize;
1241  CurrentAddress = (PVOID)((ULONG_PTR)CurrentAddress + CurrentSize);
1242  CurrentTargetAddress = (PVOID)((ULONG_PTR)CurrentTargetAddress +
1243  CurrentSize);
1244  }
1245 
1246 Exit:
1247  //
1248  // Check if we had allocated pool
1249  //
1250  if (HavePoolAddress)
1251  ExFreePoolWithTag(PoolAddress, 'VmRw');
1252 
1253  //
1254  // All bytes read
1255  //
1256  if (Status == STATUS_SUCCESS)
1257  *ReturnSize = BufferSize;
1258  return Status;
1259 }
1260 
1261 NTSTATUS
1262 NTAPI
1265  IN PEPROCESS TargetProcess,
1269  OUT PSIZE_T ReturnSize)
1270 {
1271  NTSTATUS Status;
1272  PEPROCESS Process = SourceProcess;
1273 
1274  //
1275  // Don't accept zero-sized buffers
1276  //
1277  if (!BufferSize) return STATUS_SUCCESS;
1278 
1279  //
1280  // If we are copying from ourselves, lock the target instead
1281  //
1282  if (SourceProcess == PsGetCurrentProcess()) Process = TargetProcess;
1283 
1284  //
1285  // Acquire rundown protection
1286  //
1287  if (!ExAcquireRundownProtection(&Process->RundownProtect))
1288  {
1289  //
1290  // Fail
1291  //
1293  }
1294 
1295  //
1296  // See if we should use the pool copy
1297  //
1299  {
1300  //
1301  // Use MDL-copy
1302  //
1303  Status = MiDoMappedCopy(SourceProcess,
1304  SourceAddress,
1305  TargetProcess,
1306  TargetAddress,
1307  BufferSize,
1308  PreviousMode,
1309  ReturnSize);
1310  }
1311  else
1312  {
1313  //
1314  // Do pool copy
1315  //
1316  Status = MiDoPoolCopy(SourceProcess,
1317  SourceAddress,
1318  TargetProcess,
1319  TargetAddress,
1320  BufferSize,
1321  PreviousMode,
1322  ReturnSize);
1323  }
1324 
1325  //
1326  // Release the lock
1327  //
1328  ExReleaseRundownProtection(&Process->RundownProtect);
1329  return Status;
1330 }
1331 
1332 NTSTATUS
1333 NTAPI
1338 {
1339  PAGED_CODE();
1340 
1341  UNIMPLEMENTED;
1342 
1343  return STATUS_NOT_IMPLEMENTED;
1344 }
1345 
1346 ULONG
1347 NTAPI
1349 {
1350  MMPTE TempPte;
1351  PMMPFN Pfn;
1353  PETHREAD CurrentThread;
1354  BOOLEAN WsSafe, WsShared;
1355  ULONG Protect;
1356  KIRQL OldIrql;
1357  PAGED_CODE();
1358 
1359  /* Copy this PTE's contents */
1360  TempPte = *PointerPte;
1361 
1362  /* Assure it's not totally zero */
1363  ASSERT(TempPte.u.Long);
1364 
1365  /* Check for a special prototype format */
1366  if ((TempPte.u.Soft.Valid == 0) &&
1367  (TempPte.u.Soft.Prototype == 1))
1368  {
1369  /* Check if the prototype PTE is not yet pointing to a PTE */
1370  if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)
1371  {
1372  /* The prototype PTE contains the protection */
1373  return MmProtectToValue[TempPte.u.Soft.Protection];
1374  }
1375 
1376  /* Get a pointer to the underlying shared PTE */
1377  PointerPte = MiProtoPteToPte(&TempPte);
1378 
1379  /* Since the PTE we want to read can be paged out at any time, we need
1380  to release the working set lock first, so that it can be paged in */
1381  CurrentThread = PsGetCurrentThread();
1384  CurrentThread,
1385  &WsSafe,
1386  &WsShared);
1387 
1388  /* Now read the PTE value */
1389  TempPte = *PointerPte;
1390 
1391  /* Check if that one is invalid */
1392  if (!TempPte.u.Hard.Valid)
1393  {
1394  /* We get the protection directly from this PTE */
1395  Protect = MmProtectToValue[TempPte.u.Soft.Protection];
1396  }
1397  else
1398  {
1399  /* The PTE is valid, so we might need to get the protection from
1400  the PFN. Lock the PFN database */
1401  OldIrql = MiAcquirePfnLock();
1402 
1403  /* Check if the PDE is still valid */
1404  if (MiAddressToPte(PointerPte)->u.Hard.Valid == 0)
1405  {
1406  /* It's not, make it valid */
1407  MiMakeSystemAddressValidPfn(PointerPte, OldIrql);
1408  }
1409 
1410  /* Now it's safe to read the PTE value again */
1411  TempPte = *PointerPte;
1412  ASSERT(TempPte.u.Long != 0);
1413 
1414  /* Check again if the PTE is invalid */
1415  if (!TempPte.u.Hard.Valid)
1416  {
1417  /* The PTE is not valid, so we can use it's protection field */
1418  Protect = MmProtectToValue[TempPte.u.Soft.Protection];
1419  }
1420  else
1421  {
1422  /* The PTE is valid, so we can find the protection in the
1423  OriginalPte field of the PFN */
1424  Pfn = MI_PFN_ELEMENT(TempPte.u.Hard.PageFrameNumber);
1426  }
1427 
1428  /* Release the PFN database */
1429  MiReleasePfnLock(OldIrql);
1430  }
1431 
1432  /* Lock the working set again */
1434  CurrentThread,
1435  WsSafe,
1436  WsShared);
1437 
1438  return Protect;
1439  }
1440 
1441  /* In the easy case of transition or demand zero PTE just return its protection */
1442  if (!TempPte.u.Hard.Valid) return MmProtectToValue[TempPte.u.Soft.Protection];
1443 
1444  /* If we get here, the PTE is valid, so look up the page in PFN database */
1445  Pfn = MiGetPfnEntry(TempPte.u.Hard.PageFrameNumber);
1446  if (!Pfn->u3.e1.PrototypePte)
1447  {
1448  /* Return protection of the original pte */
1449  ASSERT(Pfn->u4.AweAllocation == 0);
1451  }
1452 
1453  /* This is software PTE */
1454  DPRINT("Prototype PTE: %lx %p\n", TempPte.u.Hard.PageFrameNumber, Pfn);
1455  DPRINT("VA: %p\n", MiPteToAddress(&TempPte));
1456  DPRINT("Mask: %lx\n", TempPte.u.Soft.Protection);
1457  DPRINT("Mask2: %lx\n", Pfn->OriginalPte.u.Soft.Protection);
1458  return MmProtectToValue[TempPte.u.Soft.Protection];
1459 }
1460 
1461 ULONG
1462 NTAPI
1464  IN PMMVAD Vad,
1465  IN PEPROCESS TargetProcess,
1466  OUT PULONG ReturnedProtect,
1467  OUT PVOID *NextVa)
1468 {
1469 
1470  PMMPTE PointerPte, ProtoPte;
1471  PMMPDE PointerPde;
1472 #if (_MI_PAGING_LEVELS >= 3)
1473  PMMPPE PointerPpe;
1474 #endif
1475 #if (_MI_PAGING_LEVELS >= 4)
1476  PMMPXE PointerPxe;
1477 #endif
1478  MMPTE TempPte, TempProtoPte;
1479  BOOLEAN DemandZeroPte = TRUE, ValidPte = FALSE;
1480  ULONG State = MEM_RESERVE, Protect = 0;
1481  ASSERT((Vad->StartingVpn <= ((ULONG_PTR)Va >> PAGE_SHIFT)) &&
1482  (Vad->EndingVpn >= ((ULONG_PTR)Va >> PAGE_SHIFT)));
1483 
1484  /* Only normal VADs supported */
1485  ASSERT(Vad->u.VadFlags.VadType == VadNone);
1486 
1487  /* Get the PDE and PTE for the address */
1488  PointerPde = MiAddressToPde(Va);
1489  PointerPte = MiAddressToPte(Va);
1490 #if (_MI_PAGING_LEVELS >= 3)
1491  PointerPpe = MiAddressToPpe(Va);
1492 #endif
1493 #if (_MI_PAGING_LEVELS >= 4)
1494  PointerPxe = MiAddressToPxe(Va);
1495 #endif
1496 
1497  /* Return the next range */
1498  *NextVa = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
1499 
1500  do
1501  {
1502 #if (_MI_PAGING_LEVELS >= 4)
1503  /* Does the PXE exist? */
1504  if (PointerPxe->u.Long == 0)
1505  {
1506  /* It does not, next range starts at the next PXE */
1507  *NextVa = MiPxeToAddress(PointerPxe + 1);
1508  break;
1509  }
1510 
1511  /* Is the PXE valid? */
1512  if (PointerPxe->u.Hard.Valid == 0)
1513  {
1514  /* Is isn't, fault it in (make the PPE accessible) */
1515  MiMakeSystemAddressValid(PointerPpe, TargetProcess);
1516  }
1517 #endif
1518 #if (_MI_PAGING_LEVELS >= 3)
1519  /* Does the PPE exist? */
1520  if (PointerPpe->u.Long == 0)
1521  {
1522  /* It does not, next range starts at the next PPE */
1523  *NextVa = MiPpeToAddress(PointerPpe + 1);
1524  break;
1525  }
1526 
1527  /* Is the PPE valid? */
1528  if (PointerPpe->u.Hard.Valid == 0)
1529  {
1530  /* Is isn't, fault it in (make the PDE accessible) */
1531  MiMakeSystemAddressValid(PointerPde, TargetProcess);
1532  }
1533 #endif
1534 
1535  /* Does the PDE exist? */
1536  if (PointerPde->u.Long == 0)
1537  {
1538  /* It does not, next range starts at the next PDE */
1539  *NextVa = MiPdeToAddress(PointerPde + 1);
1540  break;
1541  }
1542 
1543  /* Is the PDE valid? */
1544  if (PointerPde->u.Hard.Valid == 0)
1545  {
1546  /* Is isn't, fault it in (make the PTE accessible) */
1547  MiMakeSystemAddressValid(PointerPte, TargetProcess);
1548  }
1549 
1550  /* We have a PTE that we can access now! */
1551  ValidPte = TRUE;
1552 
1553  } while (FALSE);
1554 
1555  /* Is it safe to try reading the PTE? */
1556  if (ValidPte)
1557  {
1558  /* FIXME: watch out for large pages */
1559  ASSERT(PointerPde->u.Hard.LargePage == FALSE);
1560 
1561  /* Capture the PTE */
1562  TempPte = *PointerPte;
1563  if (TempPte.u.Long != 0)
1564  {
1565  /* The PTE is valid, so it's not zeroed out */
1566  DemandZeroPte = FALSE;
1567 
1568  /* Is it a decommited, invalid, or faulted PTE? */
1569  if ((TempPte.u.Soft.Protection == MM_DECOMMIT) &&
1570  (TempPte.u.Hard.Valid == 0) &&
1571  ((TempPte.u.Soft.Prototype == 0) ||
1572  (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
1573  {
1574  /* Otherwise our defaults should hold */
1575  ASSERT(Protect == 0);
1576  ASSERT(State == MEM_RESERVE);
1577  }
1578  else
1579  {
1580  /* This means it's committed */
1581  State = MEM_COMMIT;
1582 
1583  /* We don't support these */
1584  ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
1585  ASSERT(Vad->u.VadFlags.VadType != VadRotatePhysical);
1586  ASSERT(Vad->u.VadFlags.VadType != VadAwe);
1587 
1588  /* Get protection state of this page */
1589  Protect = MiGetPageProtection(PointerPte);
1590 
1591  /* Check if this is an image-backed VAD */
1592  if ((TempPte.u.Soft.Valid == 0) &&
1593  (TempPte.u.Soft.Prototype == 1) &&
1594  (Vad->u.VadFlags.PrivateMemory == 0) &&
1595  (Vad->ControlArea))
1596  {
1597  DPRINT1("Not supported\n");
1598  ASSERT(FALSE);
1599  }
1600  }
1601  }
1602  }
1603 
1604  /* Check if this was a demand-zero PTE, since we need to find the state */
1605  if (DemandZeroPte)
1606  {
1607  /* Not yet handled */
1608  ASSERT(Vad->u.VadFlags.VadType != VadDevicePhysicalMemory);
1609  ASSERT(Vad->u.VadFlags.VadType != VadAwe);
1610 
1611  /* Check if this is private commited memory, or an section-backed VAD */
1612  if ((Vad->u.VadFlags.PrivateMemory == 0) && (Vad->ControlArea))
1613  {
1614  /* Tell caller about the next range */
1615  *NextVa = (PVOID)((ULONG_PTR)Va + PAGE_SIZE);
1616 
1617  /* Get the prototype PTE for this VAD */
1618  ProtoPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(Vad,
1619  (ULONG_PTR)Va >> PAGE_SHIFT);
1620  if (ProtoPte)
1621  {
1622  /* We should unlock the working set, but it's not being held! */
1623 
1624  /* Is the prototype PTE actually valid (committed)? */
1625  TempProtoPte = *ProtoPte;
1626  if (TempProtoPte.u.Long)
1627  {
1628  /* Unless this is a memory-mapped file, handle it like private VAD */
1629  State = MEM_COMMIT;
1630  ASSERT(Vad->u.VadFlags.VadType != VadImageMap);
1631  Protect = MmProtectToValue[Vad->u.VadFlags.Protection];
1632  }
1633 
1634  /* We should re-lock the working set */
1635  }
1636  }
1637  else if (Vad->u.VadFlags.MemCommit)
1638  {
1639  /* This is committed memory */
1640  State = MEM_COMMIT;
1641 
1642  /* Convert the protection */
1643  Protect = MmProtectToValue[Vad->u.VadFlags.Protection];
1644  }
1645  }
1646 
1647  /* Return the protection code */
1648  *ReturnedProtect = Protect;
1649  return State;
1650 }
1651 
1652 NTSTATUS
1653 NTAPI
1656  OUT PVOID MemoryInformation,
1657  IN SIZE_T MemoryInformationLength,
1659 {
1660  PEPROCESS TargetProcess;
1662  PMMVAD Vad = NULL;
1663  PVOID Address, NextAddress;
1664  BOOLEAN Found = FALSE;
1665  ULONG NewProtect, NewState;
1666  ULONG_PTR BaseVpn;
1667  MEMORY_BASIC_INFORMATION MemoryInfo;
1672 
1673  /* Check for illegal addresses in user-space, or the shared memory area */
1676  {
1678 
1679  /* Make up an info structure describing this range */
1680  MemoryInfo.BaseAddress = Address;
1681  MemoryInfo.AllocationProtect = PAGE_READONLY;
1682  MemoryInfo.Type = MEM_PRIVATE;
1683 
1684  /* Special case for shared data */
1686  {
1688  MemoryInfo.State = MEM_COMMIT;
1689  MemoryInfo.Protect = PAGE_READONLY;
1690  MemoryInfo.RegionSize = PAGE_SIZE;
1691  }
1692  else
1693  {
1694  MemoryInfo.AllocationBase = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1;
1695  MemoryInfo.State = MEM_RESERVE;
1696  MemoryInfo.Protect = PAGE_NOACCESS;
1698  }
1699 
1700  /* Return the data, NtQueryInformation already probed it*/
1701  if (PreviousMode != KernelMode)
1702  {
1703  _SEH2_TRY
1704  {
1705  *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
1707  }
1709  {
1711  }
1712  _SEH2_END;
1713  }
1714  else
1715  {
1716  *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
1718  }
1719 
1720  return Status;
1721  }
1722 
1723  /* Check if this is for a local or remote process */
1725  {
1726  TargetProcess = PsGetCurrentProcess();
1727  }
1728  else
1729  {
1730  /* Reference the target process */
1733  PsProcessType,
1735  (PVOID*)&TargetProcess,
1736  NULL);
1737  if (!NT_SUCCESS(Status)) return Status;
1738 
1739  /* Attach to it now */
1740  KeStackAttachProcess(&TargetProcess->Pcb, &ApcState);
1741  }
1742 
1743  /* Lock the address space and make sure the process isn't already dead */
1744  MmLockAddressSpace(&TargetProcess->Vm);
1745  if (TargetProcess->VmDeleted)
1746  {
1747  /* Unlock the address space of the process */
1748  MmUnlockAddressSpace(&TargetProcess->Vm);
1749 
1750  /* Check if we were attached */
1752  {
1753  /* Detach and dereference the process */
1755  ObDereferenceObject(TargetProcess);
1756  }
1757 
1758  /* Bail out */
1759  DPRINT1("Process is dying\n");
1761  }
1762 
1763  /* Loop the VADs */
1764  ASSERT(TargetProcess->VadRoot.NumberGenericTableElements);
1765  if (TargetProcess->VadRoot.NumberGenericTableElements)
1766  {
1767  /* Scan on the right */
1768  Vad = (PMMVAD)TargetProcess->VadRoot.BalancedRoot.RightChild;
1769  BaseVpn = (ULONG_PTR)BaseAddress >> PAGE_SHIFT;
1770  while (Vad)
1771  {
1772  /* Check if this VAD covers the allocation range */
1773  if ((BaseVpn >= Vad->StartingVpn) &&
1774  (BaseVpn <= Vad->EndingVpn))
1775  {
1776  /* We're done */
1777  Found = TRUE;
1778  break;
1779  }
1780 
1781  /* Check if this VAD is too high */
1782  if (BaseVpn < Vad->StartingVpn)
1783  {
1784  /* Stop if there is no left child */
1785  if (!Vad->LeftChild) break;
1786 
1787  /* Search on the left next */
1788  Vad = Vad->LeftChild;
1789  }
1790  else
1791  {
1792  /* Then this VAD is too low, keep searching on the right */
1793  ASSERT(BaseVpn > Vad->EndingVpn);
1794 
1795  /* Stop if there is no right child */
1796  if (!Vad->RightChild) break;
1797 
1798  /* Search on the right next */
1799  Vad = Vad->RightChild;
1800  }
1801  }
1802  }
1803 
1804  /* Was a VAD found? */
1805  if (!Found)
1806  {
1808 
1809  /* Calculate region size */
1810  if (Vad)
1811  {
1812  if (Vad->StartingVpn >= BaseVpn)
1813  {
1814  /* Region size is the free space till the start of that VAD */
1815  MemoryInfo.RegionSize = (ULONG_PTR)(Vad->StartingVpn << PAGE_SHIFT) - (ULONG_PTR)Address;
1816  }
1817  else
1818  {
1819  /* Get the next VAD */
1820  Vad = (PMMVAD)MiGetNextNode((PMMADDRESS_NODE)Vad);
1821  if (Vad)
1822  {
1823  /* Region size is the free space till the start of that VAD */
1824  MemoryInfo.RegionSize = (ULONG_PTR)(Vad->StartingVpn << PAGE_SHIFT) - (ULONG_PTR)Address;
1825  }
1826  else
1827  {
1828  /* Maximum possible region size with that base address */
1829  MemoryInfo.RegionSize = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1 - (PCHAR)Address;
1830  }
1831  }
1832  }
1833  else
1834  {
1835  /* Maximum possible region size with that base address */
1836  MemoryInfo.RegionSize = (PCHAR)MM_HIGHEST_VAD_ADDRESS + 1 - (PCHAR)Address;
1837  }
1838 
1839  /* Unlock the address space of the process */
1840  MmUnlockAddressSpace(&TargetProcess->Vm);
1841 
1842  /* Check if we were attached */
1844  {
1845  /* Detach and derefernece the process */
1847  ObDereferenceObject(TargetProcess);
1848  }
1849 
1850  /* Build the rest of the initial information block */
1851  MemoryInfo.BaseAddress = Address;
1852  MemoryInfo.AllocationBase = NULL;
1853  MemoryInfo.AllocationProtect = 0;
1854  MemoryInfo.State = MEM_FREE;
1855  MemoryInfo.Protect = PAGE_NOACCESS;
1856  MemoryInfo.Type = 0;
1857 
1858  /* Return the data, NtQueryInformation already probed it*/
1859  if (PreviousMode != KernelMode)
1860  {
1861  _SEH2_TRY
1862  {
1863  *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
1865  }
1867  {
1869  }
1870  _SEH2_END;
1871  }
1872  else
1873  {
1874  *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
1876  }
1877 
1878  return Status;
1879  }
1880 
1881  /* Set the correct memory type based on what kind of VAD this is */
1882  if ((Vad->u.VadFlags.PrivateMemory) ||
1883  (Vad->u.VadFlags.VadType == VadRotatePhysical))
1884  {
1885  MemoryInfo.Type = MEM_PRIVATE;
1886  }
1887  else if (Vad->u.VadFlags.VadType == VadImageMap)
1888  {
1889  MemoryInfo.Type = MEM_IMAGE;
1890  }
1891  else
1892  {
1893  MemoryInfo.Type = MEM_MAPPED;
1894  }
1895 
1896  /* Find the memory area the specified address belongs to */
1898  ASSERT(MemoryArea != NULL);
1899 
1900  /* Determine information dependent on the memory area type */
1902  {
1904  if (!NT_SUCCESS(Status))
1905  {
1906  DPRINT1("MmQuerySectionView failed. MemoryArea=%p (%p-%p), BaseAddress=%p\n",
1909  }
1910  }
1911  else
1912  {
1913  /* Build the initial information block */
1915  MemoryInfo.BaseAddress = Address;
1916  MemoryInfo.AllocationBase = (PVOID)(Vad->StartingVpn << PAGE_SHIFT);
1918  MemoryInfo.Type = MEM_PRIVATE;
1919 
1920  /* Acquire the working set lock (shared is enough) */
1922 
1923  /* Find the largest chunk of memory which has the same state and protection mask */
1924  MemoryInfo.State = MiQueryAddressState(Address,
1925  Vad,
1926  TargetProcess,
1927  &MemoryInfo.Protect,
1928  &NextAddress);
1929  Address = NextAddress;
1930  while (((ULONG_PTR)Address >> PAGE_SHIFT) <= Vad->EndingVpn)
1931  {
1932  /* Keep going unless the state or protection mask changed */
1933  NewState = MiQueryAddressState(Address, Vad, TargetProcess, &NewProtect, &NextAddress);
1934  if ((NewState != MemoryInfo.State) || (NewProtect != MemoryInfo.Protect)) break;
1935  Address = NextAddress;
1936  }
1937 
1938  /* Release the working set lock */
1940 
1941  /* Check if we went outside of the VAD */
1942  if (((ULONG_PTR)Address >> PAGE_SHIFT) > Vad->EndingVpn)
1943  {
1944  /* Set the end of the VAD as the end address */
1945  Address = (PVOID)((Vad->EndingVpn + 1) << PAGE_SHIFT);
1946  }
1947 
1948  /* Now that we know the last VA address, calculate the region size */
1949  MemoryInfo.RegionSize = ((ULONG_PTR)Address - (ULONG_PTR)MemoryInfo.BaseAddress);
1950  }
1951 
1952  /* Unlock the address space of the process */
1953  MmUnlockAddressSpace(&TargetProcess->Vm);
1954 
1955  /* Check if we were attached */
1957  {
1958  /* Detach and derefernece the process */
1960  ObDereferenceObject(TargetProcess);
1961  }
1962 
1963  /* Return the data, NtQueryInformation already probed it */
1964  if (PreviousMode != KernelMode)
1965  {
1966  _SEH2_TRY
1967  {
1968  *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
1970  }
1972  {
1974  }
1975  _SEH2_END;
1976  }
1977  else
1978  {
1979  *(PMEMORY_BASIC_INFORMATION)MemoryInformation = MemoryInfo;
1981  }
1982 
1983  /* All went well */
1984  DPRINT("Base: %p AllocBase: %p AllocProtect: %lx Protect: %lx "
1985  "State: %lx Type: %lx Size: %lx\n",
1986  MemoryInfo.BaseAddress, MemoryInfo.AllocationBase,
1987  MemoryInfo.AllocationProtect, MemoryInfo.Protect,
1988  MemoryInfo.State, MemoryInfo.Type, MemoryInfo.RegionSize);
1989 
1990  return Status;
1991 }
1992 
1993 BOOLEAN
1994 NTAPI
1996  IN ULONG_PTR EndingAddress,
1997  IN PMMVAD Vad,
1999 {
2000  PMMPTE PointerPte, LastPte;
2001  PMMPDE PointerPde;
2002  BOOLEAN OnPdeBoundary = TRUE;
2003 #if _MI_PAGING_LEVELS >= 3
2004  PMMPPE PointerPpe;
2005  BOOLEAN OnPpeBoundary = TRUE;
2006 #if _MI_PAGING_LEVELS == 4
2007  PMMPXE PointerPxe;
2008  BOOLEAN OnPxeBoundary = TRUE;
2009 #endif
2010 #endif
2011 
2012  PAGED_CODE();
2013 
2014  /* Check that we hols the right locks */
2015  ASSERT(PsGetCurrentThread()->OwnsProcessWorkingSetExclusive || PsGetCurrentThread()->OwnsProcessWorkingSetShared);
2016 
2017  /* Get the PTE addresses */
2018  PointerPte = MiAddressToPte(StartingAddress);
2019  LastPte = MiAddressToPte(EndingAddress);
2020 
2021  /* Loop all the PTEs */
2022  while (PointerPte <= LastPte)
2023  {
2024 #if _MI_PAGING_LEVELS == 4
2025  /* Check for new PXE boundary */
2026  if (OnPxeBoundary)
2027  {
2028  PointerPxe = MiPteToPxe(PointerPte);
2029 
2030  /* Check that this loop is sane */
2031  ASSERT(OnPpeBoundary);
2032  ASSERT(OnPdeBoundary);
2033 
2034  if (PointerPxe->u.Long != 0)
2035  {
2036  /* Make it valid if needed */
2037  if (PointerPxe->u.Hard.Valid == 0)
2039  }
2040  else
2041  {
2042  /* Is the entire VAD committed? If not, fail */
2043  if (!Vad->u.VadFlags.MemCommit) return FALSE;
2044 
2045  PointerPxe++;
2046  PointerPte = MiPxeToPte(PointerPte);
2047  continue;
2048  }
2049  }
2050 #endif
2051 
2052 #if _MI_PAGING_LEVELS >= 3
2053  /* Check for new PPE boundary */
2054  if (OnPpeBoundary)
2055  {
2056  PointerPpe = MiPteToPpe(PointerPte);
2057 
2058  /* Check that this loop is sane */
2059  ASSERT(OnPdeBoundary);
2060 
2061  if (PointerPpe->u.Long != 0)
2062  {
2063  /* Make it valid if needed */
2064  if (PointerPpe->u.Hard.Valid == 0)
2066  }
2067  else
2068  {
2069  /* Is the entire VAD committed? If not, fail */
2070  if (!Vad->u.VadFlags.MemCommit) return FALSE;
2071 
2072  PointerPpe++;
2073  PointerPte = MiPpeToPte(PointerPpe);
2074 #if _MI_PAGING_LEVELS == 4
2075  OnPxeBoundary = MiIsPteOnPxeBoundary(PointerPte);
2076 #endif
2077  continue;
2078  }
2079  }
2080 #endif
2081  /* Check if we've hit a new PDE boundary */
2082  if (OnPdeBoundary)
2083  {
2084  /* Is this PDE demand zero? */
2085  PointerPde = MiPteToPde(PointerPte);
2086  if (PointerPde->u.Long != 0)
2087  {
2088  /* It isn't -- is it valid? */
2089  if (PointerPde->u.Hard.Valid == 0)
2090  {
2091  /* Nope, fault it in */
2092  MiMakeSystemAddressValid(PointerPte, Process);
2093  }
2094  }
2095  else
2096  {
2097  /* Is the entire VAD committed? If not, fail */
2098  if (!Vad->u.VadFlags.MemCommit) return FALSE;
2099 
2100  /* The PTE was already valid, so move to the next one */
2101  PointerPde++;
2102  PointerPte = MiPdeToPte(PointerPde);
2103 #if _MI_PAGING_LEVELS >= 3
2104  OnPpeBoundary = MiIsPteOnPpeBoundary(PointerPte);
2105 #if _MI_PAGING_LEVELS == 4
2106  OnPxeBoundary = MiIsPteOnPxeBoundary(PointerPte);
2107 #endif
2108 #endif
2109 
2110  /* New loop iteration with our new, on-boundary PTE. */
2111  continue;
2112  }
2113  }
2114 
2115  /* Is the PTE demand zero? */
2116  if (PointerPte->u.Long == 0)
2117  {
2118  /* Is the entire VAD committed? If not, fail */
2119  if (!Vad->u.VadFlags.MemCommit) return FALSE;
2120  }
2121  else
2122  {
2123  /* It isn't -- is it a decommited, invalid, or faulted PTE? */
2124  if ((PointerPte->u.Soft.Protection == MM_DECOMMIT) &&
2125  (PointerPte->u.Hard.Valid == 0) &&
2126  ((PointerPte->u.Soft.Prototype == 0) ||
2127  (PointerPte->u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED)))
2128  {
2129  /* Then part of the range is decommitted, so fail */
2130  return FALSE;
2131  }
2132  }
2133 
2134  /* Move to the next PTE */
2135  PointerPte++;
2136  OnPdeBoundary = MiIsPteOnPdeBoundary(PointerPte);
2137 #if _MI_PAGING_LEVELS >= 3
2138  OnPpeBoundary = MiIsPteOnPpeBoundary(PointerPte);
2139 #if _MI_PAGING_LEVELS == 4
2140  OnPxeBoundary = MiIsPteOnPxeBoundary(PointerPte);
2141 #endif
2142 #endif
2143  }
2144 
2145  /* All PTEs seem valid, and no VAD checks failed, the range is okay */
2146  return TRUE;
2147 }
2148 
2149 NTSTATUS
2150 NTAPI
2153  IN OUT PSIZE_T NumberOfBytesToProtect,
2154  IN ULONG NewAccessProtection,
2155  OUT PULONG OldAccessProtection OPTIONAL)
2156 {
2159  ULONG OldAccessProtection_;
2160  NTSTATUS Status;
2161 
2162  *NumberOfBytesToProtect = PAGE_ROUND_UP((ULONG_PTR)(*BaseAddress) + (*NumberOfBytesToProtect)) - PAGE_ROUND_DOWN(*BaseAddress);
2164 
2165  AddressSpace = &Process->Vm;
2169  {
2171  return STATUS_UNSUCCESSFUL;
2172  }
2173 
2174  if (OldAccessProtection == NULL) OldAccessProtection = &OldAccessProtection_;
2175 
2178  MemoryArea,
2179  *BaseAddress,
2180  *NumberOfBytesToProtect,
2181  NewAccessProtection,
2182  OldAccessProtection);
2183 
2185 
2186  return Status;
2187 }
2188 
2189 NTSTATUS
2190 NTAPI
2193  IN OUT PSIZE_T NumberOfBytesToProtect,
2194  IN ULONG NewAccessProtection,
2195  OUT PULONG OldAccessProtection OPTIONAL)
2196 {
2198  PMMVAD Vad;
2200  ULONG_PTR StartingAddress, EndingAddress;
2201  PMMPTE PointerPte, LastPte;
2202  PMMPDE PointerPde;
2203  MMPTE PteContents;
2204  PMMPFN Pfn1;
2205  ULONG ProtectionMask, OldProtect;
2206  BOOLEAN Committed;
2210 
2211  /* Calculate base address for the VAD */
2212  StartingAddress = (ULONG_PTR)PAGE_ALIGN((*BaseAddress));
2213  EndingAddress = (((ULONG_PTR)*BaseAddress + *NumberOfBytesToProtect - 1) | (PAGE_SIZE - 1));
2214 
2215  /* Calculate the protection mask and make sure it's valid */
2216  ProtectionMask = MiMakeProtectionMask(NewAccessProtection);
2217  if (ProtectionMask == MM_INVALID_PROTECTION)
2218  {
2219  DPRINT1("Invalid protection mask\n");
2221  }
2222 
2223  /* Check for ROS specific memory area */
2226  {
2227  /* Evil hack */
2229  BaseAddress,
2230  NumberOfBytesToProtect,
2231  NewAccessProtection,
2232  OldAccessProtection);
2233  }
2234 
2235  /* Lock the address space and make sure the process isn't already dead */
2238  if (Process->VmDeleted)
2239  {
2240  DPRINT1("Process is dying\n");
2242  goto FailPath;
2243  }
2244 
2245  /* Get the VAD for this address range, and make sure it exists */
2246  Result = MiCheckForConflictingNode(StartingAddress >> PAGE_SHIFT,
2247  EndingAddress >> PAGE_SHIFT,
2248  &Process->VadRoot,
2249  (PMMADDRESS_NODE*)&Vad);
2250  if (Result != TableFoundNode)
2251  {
2252  DPRINT("Could not find a VAD for this allocation\n");
2254  goto FailPath;
2255  }
2256 
2257  /* Make sure the address is within this VAD's boundaries */
2258  if ((((ULONG_PTR)StartingAddress >> PAGE_SHIFT) < Vad->StartingVpn) ||
2259  (((ULONG_PTR)EndingAddress >> PAGE_SHIFT) > Vad->EndingVpn))
2260  {
2262  goto FailPath;
2263  }
2264 
2265  /* These kinds of VADs are not supported atm */
2266  if ((Vad->u.VadFlags.VadType == VadAwe) ||
2267  (Vad->u.VadFlags.VadType == VadDevicePhysicalMemory) ||
2268  (Vad->u.VadFlags.VadType == VadLargePages))
2269  {
2270  DPRINT1("Illegal VAD for attempting to set protection\n");
2272  goto FailPath;
2273  }
2274 
2275  /* Check for a VAD whose protection can't be changed */
2276  if (Vad->u.VadFlags.NoChange == 1)
2277  {
2278  DPRINT1("Trying to change protection of a NoChange VAD\n");
2280  goto FailPath;
2281  }
2282 
2283  /* Is this section, or private memory? */
2284  if (Vad->u.VadFlags.PrivateMemory == 0)
2285  {
2286  /* Not yet supported */
2287  if (Vad->u.VadFlags.VadType == VadLargePageSection)
2288  {
2289  DPRINT1("Illegal VAD for attempting to set protection\n");
2291  goto FailPath;
2292  }
2293 
2294  /* Rotate VADs are not yet supported */
2295  if (Vad->u.VadFlags.VadType == VadRotatePhysical)
2296  {
2297  DPRINT1("Illegal VAD for attempting to set protection\n");
2299  goto FailPath;
2300  }
2301 
2302  /* Not valid on section files */
2303  if (NewAccessProtection & (PAGE_NOCACHE | PAGE_WRITECOMBINE))
2304  {
2305  /* Fail */
2306  DPRINT1("Invalid protection flags for section\n");
2308  goto FailPath;
2309  }
2310 
2311  /* Check if data or page file mapping protection PTE is compatible */
2312  if (!Vad->ControlArea->u.Flags.Image)
2313  {
2314  /* Not yet */
2315  DPRINT1("Fixme: Not checking for valid protection\n");
2316  }
2317 
2318  /* This is a section, and this is not yet supported */
2319  DPRINT1("Section protection not yet supported\n");
2320  OldProtect = 0;
2321  }
2322  else
2323  {
2324  /* Private memory, check protection flags */
2325  if ((NewAccessProtection & PAGE_WRITECOPY) ||
2326  (NewAccessProtection & PAGE_EXECUTE_WRITECOPY))
2327  {
2328  DPRINT1("Invalid protection flags for private memory\n");
2330  goto FailPath;
2331  }
2332 
2333  /* Lock the working set */
2335 
2336  /* Check if all pages in this range are committed */
2337  Committed = MiIsEntireRangeCommitted(StartingAddress,
2338  EndingAddress,
2339  Vad,
2340  Process);
2341  if (!Committed)
2342  {
2343  /* Fail */
2344  DPRINT1("The entire range is not committed\n");
2347  goto FailPath;
2348  }
2349 
2350  /* Compute starting and ending PTE and PDE addresses */
2351  PointerPde = MiAddressToPde(StartingAddress);
2352  PointerPte = MiAddressToPte(StartingAddress);
2353  LastPte = MiAddressToPte(EndingAddress);
2354 
2355  /* Make this PDE valid */
2357 
2358  /* Save protection of the first page */
2359  if (PointerPte->u.Long != 0)
2360  {
2361  /* Capture the page protection and make the PDE valid */
2362  OldProtect = MiGetPageProtection(PointerPte);
2364  }
2365  else
2366  {
2367  /* Grab the old protection from the VAD itself */
2368  OldProtect = MmProtectToValue[Vad->u.VadFlags.Protection];
2369  }
2370 
2371  /* Loop all the PTEs now */
2372  while (PointerPte <= LastPte)
2373  {
2374  /* Check if we've crossed a PDE boundary and make the new PDE valid too */
2375  if (MiIsPteOnPdeBoundary(PointerPte))
2376  {
2377  PointerPde = MiPteToPde(PointerPte);
2379  }
2380 
2381  /* Capture the PTE and check if it was empty */
2382  PteContents = *PointerPte;
2383  if (PteContents.u.Long == 0)
2384  {
2385  /* This used to be a zero PTE and it no longer is, so we must add a
2386  reference to the pagetable. */
2388  }
2389 
2390  /* Check what kind of PTE we are dealing with */
2391  if (PteContents.u.Hard.Valid == 1)
2392  {
2393  /* Get the PFN entry */
2394  Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(&PteContents));
2395 
2396  /* We don't support this yet */
2397  ASSERT(Pfn1->u3.e1.PrototypePte == 0);
2398 
2399  /* Check if the page should not be accessible at all */
2400  if ((NewAccessProtection & PAGE_NOACCESS) ||
2401  (NewAccessProtection & PAGE_GUARD))
2402  {
2403  KIRQL OldIrql = MiAcquirePfnLock();
2404 
2405  /* Mark the PTE as transition and change its protection */
2406  PteContents.u.Hard.Valid = 0;
2407  PteContents.u.Soft.Transition = 1;
2408  PteContents.u.Trans.Protection = ProtectionMask;
2409  /* Decrease PFN share count and write the PTE */
2410  MiDecrementShareCount(Pfn1, PFN_FROM_PTE(&PteContents));
2411  // FIXME: remove the page from the WS
2412  MI_WRITE_INVALID_PTE(PointerPte, PteContents);
2413 #ifdef CONFIG_SMP
2414  // FIXME: Should invalidate entry in every CPU TLB
2415  ASSERT(KeNumberProcessors == 1);
2416 #endif
2417  KeInvalidateTlbEntry(MiPteToAddress(PointerPte));
2418 
2419  /* We are done for this PTE */
2420  MiReleasePfnLock(OldIrql);
2421  }
2422  else
2423  {
2424  /* Write the protection mask and write it with a TLB flush */
2425  Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
2426  MiFlushTbAndCapture(Vad,
2427  PointerPte,
2428  ProtectionMask,
2429  Pfn1,
2430  TRUE);
2431  }
2432  }
2433  else
2434  {
2435  /* We don't support these cases yet */
2436  ASSERT(PteContents.u.Soft.Prototype == 0);
2437  //ASSERT(PteContents.u.Soft.Transition == 0);
2438 
2439  /* The PTE is already demand-zero, just update the protection mask */
2440  PteContents.u.Soft.Protection = ProtectionMask;
2441  MI_WRITE_INVALID_PTE(PointerPte, PteContents);
2442  ASSERT(PointerPte->u.Long != 0);
2443  }
2444 
2445  /* Move to the next PTE */
2446  PointerPte++;
2447  }
2448 
2449  /* Unlock the working set */
2451  }
2452 
2453  /* Unlock the address space */
2455 
2456  /* Return parameters and success */
2457  *NumberOfBytesToProtect = EndingAddress - StartingAddress + 1;
2458  *BaseAddress = (PVOID)StartingAddress;
2459  *OldAccessProtection = OldProtect;
2460  return STATUS_SUCCESS;
2461 
2462 FailPath:
2463  /* Unlock the address space and return the failure code */
2465  return Status;
2466 }
2467 
2468 VOID
2469 NTAPI
2471  IN PEPROCESS TargetProcess,
2472  IN KIRQL OldIrql)
2473 {
2474  PMMPTE PointerPte;
2475 #if _MI_PAGING_LEVELS >= 3
2476  PMMPPE PointerPpe = MiPdeToPpe(PointerPde);
2477 #if _MI_PAGING_LEVELS == 4
2478  PMMPXE PointerPxe = MiPdeToPxe(PointerPde);
2479 #endif
2480 #endif
2481 
2482  //
2483  // Sanity checks. The latter is because we only use this function with the
2484  // PFN lock not held, so it may go away in the future.
2485  //
2487  ASSERT(OldIrql == MM_NOIRQL);
2488 
2489  //
2490  // If everything is already valid, there is nothing to do.
2491  //
2492  if (
2493 #if _MI_PAGING_LEVELS == 4
2494  (PointerPxe->u.Hard.Valid) &&
2495 #endif
2496 #if _MI_PAGING_LEVELS >= 3
2497  (PointerPpe->u.Hard.Valid) &&
2498 #endif
2499  (PointerPde->u.Hard.Valid))
2500  {
2501  return;
2502  }
2503 
2504  //
2505  // At least something is invalid, so begin by getting the PTE for the PDE itself
2506  // and then lookup each additional level. We must do it in this precise order
2507  // because the pagfault.c code (as well as in Windows) depends that the next
2508  // level up (higher) must be valid when faulting a lower level
2509  //
2510  PointerPte = MiPteToAddress(PointerPde);
2511  do
2512  {
2513  //
2514  // Make sure APCs continued to be disabled
2515  //
2517 
2518 #if _MI_PAGING_LEVELS == 4
2519  //
2520  // First, make the PXE valid if needed
2521  //
2522  if (!PointerPxe->u.Hard.Valid)
2523  {
2524  MiMakeSystemAddressValid(PointerPpe, TargetProcess);
2525  ASSERT(PointerPxe->u.Hard.Valid == 1);
2526  }
2527 #endif
2528 
2529 #if _MI_PAGING_LEVELS >= 3
2530  //
2531  // Next, the PPE
2532  //
2533  if (!PointerPpe->u.Hard.Valid)
2534  {
2535  MiMakeSystemAddressValid(PointerPde, TargetProcess);
2536  ASSERT(PointerPpe->u.Hard.Valid == 1);
2537  }
2538 #endif
2539 
2540  //
2541  // And finally, make the PDE itself valid.
2542  //
2543  MiMakeSystemAddressValid(PointerPte, TargetProcess);
2544 
2545  /* Do not increment Page table refcount here for the PDE, this must be managed by caller */
2546 
2547  //
2548  // This should've worked the first time so the loop is really just for
2549  // show -- ASSERT that we're actually NOT going to be looping.
2550  //
2551  ASSERT(PointerPde->u.Hard.Valid == 1);
2552  } while (
2553 #if _MI_PAGING_LEVELS == 4
2554  !PointerPxe->u.Hard.Valid ||
2555 #endif
2556 #if _MI_PAGING_LEVELS >= 3
2557  !PointerPpe->u.Hard.Valid ||
2558 #endif
2559  !PointerPde->u.Hard.Valid);
2560 }
2561 
2562 VOID
2563 NTAPI
2565  IN ULONG Count)
2566 {
2567  KIRQL OldIrql;
2568  ULONG i;
2569  MMPTE TempPte;
2570  PFN_NUMBER PageFrameIndex;
2571  PMMPFN Pfn1, Pfn2;
2572 
2573  //
2574  // Acquire the PFN lock and loop all the PTEs in the list
2575  //
2576  OldIrql = MiAcquirePfnLock();
2577  for (i = 0; i != Count; i++)
2578  {
2579  //
2580  // The PTE must currently be valid
2581  //
2582  TempPte = *ValidPteList[i];
2583  ASSERT(TempPte.u.Hard.Valid == 1);
2584 
2585  //
2586  // Get the PFN entry for the page itself, and then for its page table
2587  //
2588  PageFrameIndex = PFN_FROM_PTE(&TempPte);
2589  Pfn1 = MiGetPfnEntry(PageFrameIndex);
2590  Pfn2 = MiGetPfnEntry(Pfn1->u4.PteFrame);
2591 
2592  //
2593  // Decrement the share count on the page table, and then on the page
2594  // itself
2595  //
2596  MiDecrementShareCount(Pfn2, Pfn1->u4.PteFrame);
2597  MI_SET_PFN_DELETED(Pfn1);
2598  MiDecrementShareCount(Pfn1, PageFrameIndex);
2599 
2600  //
2601  // Make the page decommitted
2602  //
2603  MI_WRITE_INVALID_PTE(ValidPteList[i], MmDecommittedPte);
2604  }
2605 
2606  //
2607  // All the PTEs have been dereferenced and made invalid, flush the TLB now
2608  // and then release the PFN lock
2609  //
2610  KeFlushCurrentTb();
2611  MiReleasePfnLock(OldIrql);
2612 }
2613 
2614 ULONG
2615 NTAPI
2616 MiDecommitPages(IN PVOID StartingAddress,
2617  IN PMMPTE EndingPte,
2619  IN PMMVAD Vad)
2620 {
2621  PMMPTE PointerPte, CommitPte = NULL;
2622  PMMPDE PointerPde;
2623  ULONG CommitReduction = 0;
2624  PMMPTE ValidPteList[256];
2625  ULONG PteCount = 0;
2626  PMMPFN Pfn1;
2627  MMPTE PteContents;
2628  PETHREAD CurrentThread = PsGetCurrentThread();
2629 
2630  //
2631  // Get the PTE and PTE for the address, and lock the working set
2632  // If this was a VAD for a MEM_COMMIT allocation, also figure out where the
2633  // commited range ends so that we can do the right accounting.
2634  //
2635  PointerPde = MiAddressToPde(StartingAddress);
2636  PointerPte = MiAddressToPte(StartingAddress);
2637  if (Vad->u.VadFlags.MemCommit) CommitPte = MiAddressToPte(Vad->EndingVpn << PAGE_SHIFT);
2638  MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
2639 
2640  //
2641  // Make the PDE valid, and now loop through each page's worth of data
2642  //
2644  while (PointerPte <= EndingPte)
2645  {
2646  //
2647  // Check if we've crossed a PDE boundary
2648  //
2649  if (MiIsPteOnPdeBoundary(PointerPte))
2650  {
2651  //
2652  // Get the new PDE and flush the valid PTEs we had built up until
2653  // now. This helps reduce the amount of TLB flushing we have to do.
2654  // Note that Windows does a much better job using timestamps and
2655  // such, and does not flush the entire TLB all the time, but right
2656  // now we have bigger problems to worry about than TLB flushing.
2657  //
2658  PointerPde = MiAddressToPde(StartingAddress);
2659  if (PteCount)
2660  {
2661  MiProcessValidPteList(ValidPteList, PteCount);
2662  PteCount = 0;
2663  }
2664 
2665  //
2666  // Make this PDE valid
2667  //
2669  }
2670 
2671  //
2672  // Read this PTE. It might be active or still demand-zero.
2673  //
2674  PteContents = *PointerPte;
2675  if (PteContents.u.Long)
2676  {
2677  //
2678  // The PTE is active. It might be valid and in a working set, or
2679  // it might be a prototype PTE or paged out or even in transition.
2680  //
2681  if (PointerPte->u.Long == MmDecommittedPte.u.Long)
2682  {
2683  //
2684  // It's already decommited, so there's nothing for us to do here
2685  //
2686  CommitReduction++;
2687  }
2688  else
2689  {
2690  //
2691  // Remove it from the counters, and check if it was valid or not
2692  //
2693  //Process->NumberOfPrivatePages--;
2694  if (PteContents.u.Hard.Valid)
2695  {
2696  //
2697  // It's valid. At this point make sure that it is not a ROS
2698  // PFN. Also, we don't support ProtoPTEs in this code path.
2699  //
2700  Pfn1 = MiGetPfnEntry(PteContents.u.Hard.PageFrameNumber);
2701  ASSERT(MI_IS_ROS_PFN(Pfn1) == FALSE);
2702  ASSERT(Pfn1->u3.e1.PrototypePte == FALSE);
2703 
2704  //
2705  // Flush any pending PTEs that we had not yet flushed, if our
2706  // list has gotten too big, then add this PTE to the flush list.
2707  //
2708  if (PteCount == 256)
2709  {
2710  MiProcessValidPteList(ValidPteList, PteCount);
2711  PteCount = 0;
2712  }
2713  ValidPteList[PteCount++] = PointerPte;
2714  }
2715  else
2716  {
2717  //
2718  // We do not support any of these other scenarios at the moment
2719  //
2720  ASSERT(PteContents.u.Soft.Prototype == 0);
2721  ASSERT(PteContents.u.Soft.Transition == 0);
2722  ASSERT(PteContents.u.Soft.PageFileHigh == 0);
2723 
2724  //
2725  // So the only other possibility is that it is still a demand
2726  // zero PTE, in which case we undo the accounting we did
2727  // earlier and simply make the page decommitted.
2728  //
2729  //Process->NumberOfPrivatePages++;
2731  }
2732  }
2733  }
2734  else
2735  {
2736  //
2737  // This used to be a zero PTE and it no longer is, so we must add a
2738  // reference to the pagetable.
2739  //
2740  MiIncrementPageTableReferences(StartingAddress);
2741 
2742  //
2743  // Next, we account for decommitted PTEs and make the PTE as such
2744  //
2745  if (PointerPte > CommitPte) CommitReduction++;
2747  }
2748 
2749  //
2750  // Move to the next PTE and the next address
2751  //
2752  PointerPte++;
2753  StartingAddress = (PVOID)((ULONG_PTR)StartingAddress + PAGE_SIZE);
2754  }
2755 
2756  //
2757  // Flush any dangling PTEs from the loop in the last page table, and then
2758  // release the working set and return the commit reduction accounting.
2759  //
2760  if (PteCount) MiProcessValidPteList(ValidPteList, PteCount);
2761  MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
2762  return CommitReduction;
2763 }
2764 
2765 /* PUBLIC FUNCTIONS ***********************************************************/
2766 
2767 /*
2768  * @unimplemented
2769  */
2770 PVOID
2771 NTAPI
2773 {
2774  UNIMPLEMENTED;
2775  return 0;
2776 }
2777 
2778 /*
2779  * @unimplemented
2780  */
2781 PVOID
2782 NTAPI
2784  IN SIZE_T Length,
2785  IN ULONG Mode)
2786 {
2787  static ULONG Warn; if (!Warn++) UNIMPLEMENTED;
2788  return Address;
2789 }
2790 
2791 /*
2792  * @unimplemented
2793  */
2794 VOID
2795 NTAPI
2797 {
2798  static ULONG Warn; if (!Warn++) UNIMPLEMENTED;
2799 }
2800 
2801 /* SYSTEM CALLS ***************************************************************/
2802 
2803 NTSTATUS
2804 NTAPI
2807  OUT PVOID Buffer,
2808  IN SIZE_T NumberOfBytesToRead,
2809  OUT PSIZE_T NumberOfBytesRead OPTIONAL)
2810 {
2814  SIZE_T BytesRead = 0;
2815  PAGED_CODE();
2816 
2817  //
2818  // Check if we came from user mode
2819  //
2820  if (PreviousMode != KernelMode)
2821  {
2822  //
2823  // Validate the read addresses
2824  //
2825  if ((((ULONG_PTR)BaseAddress + NumberOfBytesToRead) < (ULONG_PTR)BaseAddress) ||
2826  (((ULONG_PTR)Buffer + NumberOfBytesToRead) < (ULONG_PTR)Buffer) ||
2827  (((ULONG_PTR)BaseAddress + NumberOfBytesToRead) > MmUserProbeAddress) ||
2828  (((ULONG_PTR)Buffer + NumberOfBytesToRead) > MmUserProbeAddress))
2829  {
2830  //
2831  // Don't allow to write into kernel space
2832  //
2833  return STATUS_ACCESS_VIOLATION;
2834  }
2835 
2836  //
2837  // Enter SEH for probe
2838  //
2839  _SEH2_TRY
2840  {
2841  //
2842  // Probe the output value
2843  //
2844  if (NumberOfBytesRead) ProbeForWriteSize_t(NumberOfBytesRead);
2845  }
2847  {
2848  //
2849  // Get exception code
2850  //
2852  }
2853  _SEH2_END;
2854  }
2855 
2856  //
2857  // Don't do zero-byte transfers
2858  //
2859  if (NumberOfBytesToRead)
2860  {
2861  //
2862  // Reference the process
2863  //
2866  PsProcessType,
2867  PreviousMode,
2868  (PVOID*)(&Process),
2869  NULL);
2870  if (NT_SUCCESS(Status))
2871  {
2872  //
2873  // Do the copy
2874  //
2876  BaseAddress,
2878  Buffer,
2879  NumberOfBytesToRead,
2880  PreviousMode,
2881  &BytesRead);
2882 
2883  //
2884  // Dereference the process
2885  //
2887  }
2888  }
2889 
2890  //
2891  // Check if the caller sent this parameter
2892  //
2893  if (NumberOfBytesRead)
2894  {
2895  //
2896  // Enter SEH to guard write
2897  //
2898  _SEH2_TRY
2899  {
2900  //
2901  // Return the number of bytes read
2902  //
2903  *NumberOfBytesRead = BytesRead;
2904  }
2906  {
2907  }
2908  _SEH2_END;
2909  }
2910 
2911  //
2912  // Return status
2913  //
2914  return Status;
2915 }
2916 
2917 NTSTATUS
2918 NTAPI
2921  IN PVOID Buffer,
2922  IN SIZE_T NumberOfBytesToWrite,
2923  OUT PSIZE_T NumberOfBytesWritten OPTIONAL)
2924 {
2928  SIZE_T BytesWritten = 0;
2929  PAGED_CODE();
2930 
2931  //
2932  // Check if we came from user mode
2933  //
2934  if (PreviousMode != KernelMode)
2935  {
2936  //
2937  // Validate the read addresses
2938  //
2939  if ((((ULONG_PTR)BaseAddress + NumberOfBytesToWrite) < (ULONG_PTR)BaseAddress) ||
2940  (((ULONG_PTR)Buffer + NumberOfBytesToWrite) < (ULONG_PTR)Buffer) ||
2941  (((ULONG_PTR)BaseAddress + NumberOfBytesToWrite) > MmUserProbeAddress) ||
2942  (((ULONG_PTR)Buffer + NumberOfBytesToWrite) > MmUserProbeAddress))
2943  {
2944  //
2945  // Don't allow to write into kernel space
2946  //
2947  return STATUS_ACCESS_VIOLATION;
2948  }
2949 
2950  //
2951  // Enter SEH for probe
2952  //
2953  _SEH2_TRY
2954  {
2955  //
2956  // Probe the output value
2957  //
2958  if (NumberOfBytesWritten) ProbeForWriteSize_t(NumberOfBytesWritten);
2959  }
2961  {
2962  //
2963  // Get exception code
2964  //
2966  }
2967  _SEH2_END;
2968  }
2969 
2970  //
2971  // Don't do zero-byte transfers
2972  //
2973  if (NumberOfBytesToWrite)
2974  {
2975  //
2976  // Reference the process
2977  //
2980  PsProcessType,
2981  PreviousMode,
2982  (PVOID*)&Process,
2983  NULL);
2984  if (NT_SUCCESS(Status))
2985  {
2986  //
2987  // Do the copy
2988  //
2990  Buffer,
2991  Process,
2992  BaseAddress,
2993  NumberOfBytesToWrite,
2994  PreviousMode,
2995  &BytesWritten);
2996 
2997  //
2998  // Dereference the process
2999  //
3001  }
3002  }
3003 
3004  //
3005  // Check if the caller sent this parameter
3006  //
3007  if (NumberOfBytesWritten)
3008  {
3009  //
3010  // Enter SEH to guard write
3011  //
3012  _SEH2_TRY
3013  {
3014  //
3015  // Return the number of bytes written
3016  //
3017  *NumberOfBytesWritten = BytesWritten;
3018  }
3020  {
3021  }
3022  _SEH2_END;
3023  }
3024 
3025  //
3026  // Return status
3027  //
3028  return Status;
3029 }
3030 
3031 NTSTATUS
3032 NTAPI
3035  _In_ SIZE_T FlushSize)
3036 {
3039  NTSTATUS Status;
3040  PAGED_CODE();
3041 
3042  /* Is a base address given? */
3043  if (BaseAddress != NULL)
3044  {
3045  /* If the requested size is 0, there is nothing to do */
3046  if (FlushSize == 0)
3047  {
3048  return STATUS_SUCCESS;
3049  }
3050 
3051  /* Is this a user mode call? */
3052  if (ExGetPreviousMode() != KernelMode)
3053  {
3054  /* Make sure the base address is in user space */
3056  {
3057  DPRINT1("Invalid BaseAddress 0x%p\n", BaseAddress);
3058  return STATUS_ACCESS_VIOLATION;
3059  }
3060  }
3061  }
3062 
3063  /* Is another process requested? */
3065  {
3066  /* Reference the process */
3069  PsProcessType,
3071  (PVOID*)&Process,
3072  NULL);
3073  if (!NT_SUCCESS(Status))
3074  {
3075  DPRINT1("Failed to reference the process %p\n", ProcessHandle);
3076  return Status;
3077  }
3078 
3079  /* Attach to the process */
3081  }
3082 
3083  /* Forward to Ke */
3084  KeSweepICache(BaseAddress, FlushSize);
3085 
3086  /* Check if we attached */
3088  {
3089  /* Detach from the process and dereference it */
3092  }
3093 
3094  /* All done, return to caller */
3095  return STATUS_SUCCESS;
3096 }
3097 
3098 NTSTATUS
3099 NTAPI
3101  IN OUT PVOID *UnsafeBaseAddress,
3102  IN OUT SIZE_T *UnsafeNumberOfBytesToProtect,
3103  IN ULONG NewAccessProtection,
3104  OUT PULONG UnsafeOldAccessProtection)
3105 {
3107  ULONG OldAccessProtection;
3108  ULONG Protection;
3111  SIZE_T NumberOfBytesToProtect = 0;
3113  NTSTATUS Status;
3116  PAGED_CODE();
3117 
3118  //
3119  // Check for valid protection flags
3120  //
3121  Protection = NewAccessProtection & ~(PAGE_GUARD|PAGE_NOCACHE);
3122  if (Protection != PAGE_NOACCESS &&
3123  Protection != PAGE_READONLY &&
3124  Protection != PAGE_READWRITE &&
3125  Protection != PAGE_WRITECOPY &&
3126  Protection != PAGE_EXECUTE &&
3127  Protection != PAGE_EXECUTE_READ &&
3128  Protection != PAGE_EXECUTE_READWRITE &&
3129  Protection != PAGE_EXECUTE_WRITECOPY)
3130  {
3131  //
3132  // Fail
3133  //
3135  }
3136 
3137  //
3138  // Check if we came from user mode
3139  //
3140  if (PreviousMode != KernelMode)
3141  {
3142  //
3143  // Enter SEH for probing
3144  //
3145  _SEH2_TRY
3146  {
3147  //
3148  // Validate all outputs
3149  //
3150  ProbeForWritePointer(UnsafeBaseAddress);
3151  ProbeForWriteSize_t(UnsafeNumberOfBytesToProtect);
3152  ProbeForWriteUlong(UnsafeOldAccessProtection);
3153 
3154  //
3155  // Capture them
3156  //
3157  BaseAddress = *UnsafeBaseAddress;
3158  NumberOfBytesToProtect = *UnsafeNumberOfBytesToProtect;
3159  }
3161  {
3162  //
3163  // Get exception code
3164  //
3166  }
3167  _SEH2_END;
3168  }
3169  else
3170  {
3171  //
3172  // Capture directly
3173  //
3174  BaseAddress = *UnsafeBaseAddress;
3175  NumberOfBytesToProtect = *UnsafeNumberOfBytesToProtect;
3176  }
3177 
3178  //
3179  // Catch illegal base address
3180  //
3182 
3183  //
3184  // Catch illegal region size
3185  //
3186  if ((MmUserProbeAddress - (ULONG_PTR)BaseAddress) < NumberOfBytesToProtect)
3187  {
3188  //
3189  // Fail
3190  //
3192  }
3193 
3194  //
3195  // 0 is also illegal
3196  //
3197  if (!NumberOfBytesToProtect) return STATUS_INVALID_PARAMETER_3;
3198 
3199  //
3200  // Get a reference to the process
3201  //
3204  PsProcessType,
3205  PreviousMode,
3206  (PVOID*)(&Process),
3207  NULL);
3208  if (!NT_SUCCESS(Status)) return Status;
3209 
3210  //
3211  // Check if we should attach
3212  //
3213  if (CurrentProcess != Process)
3214  {
3215  //
3216  // Do it
3217  //
3219  Attached = TRUE;
3220  }
3221 
3222  //
3223  // Do the actual work
3224  //
3226  &BaseAddress,
3227  &NumberOfBytesToProtect,
3228  NewAccessProtection,
3229  &OldAccessProtection);
3230 
3231  //
3232  // Detach if needed
3233  //
3235 
3236  //
3237  // Release reference
3238  //
3240 
3241  //
3242  // Enter SEH to return data
3243  //
3244  _SEH2_TRY
3245  {
3246  //
3247  // Return data to user
3248  //
3249  *UnsafeOldAccessProtection = OldAccessProtection;
3250  *UnsafeBaseAddress = BaseAddress;
3251  *UnsafeNumberOfBytesToProtect = NumberOfBytesToProtect;
3252  }
3254  {
3255  }
3256  _SEH2_END;
3257 
3258  //
3259  // Return status
3260  //
3261  return Status;
3262 }
3263 
3265 BOOLEAN
3267  PMMPFN Pfn1,
3268  ULONG LockType)
3269 {
3270  // HACK until we have proper WSLIST support
3271  PMMWSLE Wsle = &Pfn1->Wsle;
3272 
3273  if ((LockType & MAP_PROCESS) && (Wsle->u1.e1.LockedInWs))
3274  return TRUE;
3275  if ((LockType & MAP_SYSTEM) && (Wsle->u1.e1.LockedInMemory))
3276  return TRUE;
3277 
3278  return FALSE;
3279 }
3280 
3282 VOID
3284  PMMPFN Pfn1,
3285  ULONG LockType)
3286 {
3287  // HACK until we have proper WSLIST support
3288  PMMWSLE Wsle = &Pfn1->Wsle;
3289 
3290  if (!Wsle->u1.e1.LockedInWs &&
3291  !Wsle->u1.e1.LockedInMemory)
3292  {
3294  }
3295 
3296  if (LockType & MAP_PROCESS)
3297  Wsle->u1.e1.LockedInWs = 1;
3298  if (LockType & MAP_SYSTEM)
3299  Wsle->u1.e1.LockedInMemory = 1;
3300 }
3301 
3303 VOID
3305  PMMPFN Pfn1,
3306  ULONG LockType)
3307 {
3308  // HACK until we have proper WSLIST support
3309  PMMWSLE Wsle = &Pfn1->Wsle;
3310 
3311  if (LockType & MAP_PROCESS)
3312  Wsle->u1.e1.LockedInWs = 0;
3313  if (LockType & MAP_SYSTEM)
3314  Wsle->u1.e1.LockedInMemory = 0;
3315 
3316  if (!Wsle->u1.e1.LockedInWs &&
3317  !Wsle->u1.e1.LockedInMemory)
3318  {
3320  }
3321 }
3322 
3323 static
3324 NTSTATUS
3328  _Inout_ PVOID *EndAddress)
3329 
3330 {
3331  PMMVAD Vad;
3332  PVOID CurrentVa;
3333 
3334  /* Get the base address and align the start address */
3335  *EndAddress = (PUCHAR)*BaseAddress + *RegionSize;
3336  *EndAddress = ALIGN_UP_POINTER_BY(*EndAddress, PAGE_SIZE);
3338 
3339  /* First loop and check all VADs */
3340  CurrentVa = *BaseAddress;
3341  while (CurrentVa < *EndAddress)
3342  {
3343  /* Get VAD */
3344  Vad = MiLocateAddress(CurrentVa);
3345  if (Vad == NULL)
3346  {
3348  return STATUS_ACCESS_VIOLATION;
3349  }
3350 
3351  /* Check VAD type */
3352  if ((Vad->u.VadFlags.VadType != VadNone) &&
3353  (Vad->u.VadFlags.VadType != VadImageMap) &&
3354  (Vad->u.VadFlags.VadType != VadWriteWatch))
3355  {
3356  *EndAddress = CurrentVa;
3357  *RegionSize = (PUCHAR)*EndAddress - (PUCHAR)*BaseAddress;
3359  }
3360 
3361  CurrentVa = (PVOID)((Vad->EndingVpn + 1) << PAGE_SHIFT);
3362  }
3363 
3364  *RegionSize = (PUCHAR)*EndAddress - (PUCHAR)*BaseAddress;
3365  return STATUS_SUCCESS;
3366 }
3367 
3368 static
3369 NTSTATUS
3373  IN ULONG MapType)
3374 {
3377  PVOID CurrentVa, EndAddress;
3378  PMMPTE PointerPte, LastPte;
3379  PMMPDE PointerPde;
3380 #if (_MI_PAGING_LEVELS >= 3)
3381  PMMPDE PointerPpe;
3382 #endif
3383 #if (_MI_PAGING_LEVELS == 4)
3384  PMMPDE PointerPxe;
3385 #endif
3386  PMMPFN Pfn1;
3387  NTSTATUS Status, TempStatus;
3388 
3389  /* Lock the address space */
3392 
3393  /* Make sure we still have an address space */
3395  if (CurrentProcess->VmDeleted)
3396  {
3398  goto Cleanup;
3399  }
3400 
3401  /* Check the VADs in the requested range */
3403  if (!NT_SUCCESS(Status))
3404  {
3405  goto Cleanup;
3406  }
3407 
3408  /* Enter SEH for probing */
3409  _SEH2_TRY
3410  {
3411  /* Loop all pages and probe them */
3412  CurrentVa = *BaseAddress;
3413  while (CurrentVa < EndAddress)
3414  {
3415  (void)(*(volatile CHAR*)CurrentVa);
3416  CurrentVa = (PUCHAR)CurrentVa + PAGE_SIZE;
3417  }
3418  }
3420  {
3422  goto Cleanup;
3423  }
3424  _SEH2_END;
3425 
3426  /* All pages were accessible, since we hold the address space lock, nothing
3427  can be de-committed. Assume success for now. */
3429 
3430  /* Get the PTE and PDE */
3431  PointerPte = MiAddressToPte(*BaseAddress);
3432  PointerPde = MiAddressToPde(*BaseAddress);
3433 #if (_MI_PAGING_LEVELS >= 3)
3434  PointerPpe = MiAddressToPpe(*BaseAddress);
3435 #endif
3436 #if (_MI_PAGING_LEVELS == 4)
3437  PointerPxe = MiAddressToPxe(*BaseAddress);
3438 #endif
3439 
3440  /* Get the last PTE */
3441  LastPte = MiAddressToPte((PVOID)((ULONG_PTR)EndAddress - 1));
3442 
3443  /* Lock the process working set */
3445 
3446  /* Loop the pages */
3447  do
3448  {
3449  /* Check for a page that is not accessible */
3450  while (
3451 #if (_MI_PAGING_LEVELS == 4)
3452  (PointerPxe->u.Hard.Valid == 0) ||
3453 #endif
3454 #if (_MI_PAGING_LEVELS >= 3)
3455  (PointerPpe->u.Hard.Valid == 0) ||
3456 #endif
3457  (PointerPde->u.Hard.Valid == 0) ||
3458  (PointerPte->u.Hard.Valid == 0))
3459  {
3460  /* Release process working set */
3462 
3463  /* Access the page */
3464  CurrentVa = MiPteToAddress(PointerPte);
3465 
3466  //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
3467  TempStatus = MmAccessFault(TRUE, CurrentVa, KernelMode, (PVOID)(ULONG_PTR)0xBADBADA3BADBADA3ULL);
3468  if (!NT_SUCCESS(TempStatus))
3469  {
3470  // This should only happen, when remote backing storage is not accessible
3471  ASSERT(FALSE);
3472  Status = TempStatus;
3473  goto Cleanup;
3474  }
3475 
3476  /* Lock the process working set */
3478  }
3479 
3480  /* Get the PFN */
3481  Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
3482  ASSERT(Pfn1 != NULL);
3483 
3484  /* Check the previous lock status */
3485  if (MI_IS_LOCKED_VA(Pfn1, MapType))
3486  {
3488  }
3489 
3490  /* Lock it */
3491  MI_LOCK_VA(Pfn1, MapType);
3492 
3493  /* Go to the next PTE */
3494  PointerPte++;
3495 
3496  /* Check if we're on a PDE boundary */
3497  if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
3498 #if (_MI_PAGING_LEVELS >= 3)
3499  if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
3500 #endif
3501 #if (_MI_PAGING_LEVELS == 4)
3502  if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
3503 #endif
3504  } while (PointerPte <= LastPte);
3505 
3506  /* Release process working set */
3508 
3509 Cleanup:
3510  /* Unlock address space */
3512 
3513  return Status;
3514 }
3515 
3516 NTSTATUS
3517 NTAPI
3520  IN OUT PSIZE_T NumberOfBytesToLock,
3521  IN ULONG MapType)
3522 {
3525  NTSTATUS Status;
3529  PVOID CapturedBaseAddress;
3530  SIZE_T CapturedBytesToLock;
3531  PAGED_CODE();
3532 
3533  //
3534  // Validate flags
3535  //
3536  if ((MapType & ~(MAP_PROCESS | MAP_SYSTEM)))
3537  {
3538  //
3539  // Invalid set of flags
3540  //
3541  return STATUS_INVALID_PARAMETER;
3542  }
3543 
3544  //
3545  // At least one flag must be specified
3546  //
3547  if (!(MapType & (MAP_PROCESS | MAP_SYSTEM)))
3548  {
3549  //
3550  // No flag given
3551  //
3552  return STATUS_INVALID_PARAMETER;
3553  }
3554 
3555  //
3556  // Enter SEH for probing
3557  //
3558  _SEH2_TRY
3559  {
3560  //
3561  // Validate output data
3562  //
3564  ProbeForWriteSize_t(NumberOfBytesToLock);
3565 
3566  //
3567  // Capture it
3568  //
3569  CapturedBaseAddress = *BaseAddress;
3570  CapturedBytesToLock = *NumberOfBytesToLock;
3571  }
3573  {
3574  //
3575  // Get exception code
3576  //
3578  }
3579  _SEH2_END;
3580 
3581  //
3582  // Catch illegal base address
3583  //
3584  if (CapturedBaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER;
3585 
3586  //
3587  // Catch illegal region size
3588  //
3589  if ((MmUserProbeAddress - (ULONG_PTR)CapturedBaseAddress) < CapturedBytesToLock)
3590  {
3591  //
3592  // Fail
3593  //
3594  return STATUS_INVALID_PARAMETER;
3595  }
3596 
3597  //
3598  // 0 is also illegal
3599  //
3600  if (!CapturedBytesToLock) return STATUS_INVALID_PARAMETER;
3601 
3602  //
3603  // Get a reference to the process
3604  //
3607  PsProcessType,
3608  PreviousMode,
3609  (PVOID*)(&Process),
3610  NULL);
3611  if (!NT_SUCCESS(Status)) return Status;
3612 
3613  //
3614  // Check if this is is system-mapped
3615  //
3616  if (MapType & MAP_SYSTEM)
3617  {
3618  //
3619  // Check for required privilege
3620  //
3622  {
3623  //
3624  // Fail: Don't have it
3625  //
3628  }
3629  }
3630 
3631  //
3632  // Check if we should attach
3633  //
3634  if (CurrentProcess != Process)
3635  {
3636  //
3637  // Do it
3638  //
3640  Attached = TRUE;
3641  }
3642 
3643  //
3644  // Call the internal function
3645  //
3646  Status = MiLockVirtualMemory(&CapturedBaseAddress,
3647  &CapturedBytesToLock,
3648  MapType);
3649 
3650  //
3651  // Detach if needed
3652  //
3654 
3655  //
3656  // Release reference
3657  //
3659 
3660  //
3661  // Enter SEH to return data
3662  //
3663  _SEH2_TRY
3664  {
3665  //
3666  // Return data to user
3667  //
3668  *BaseAddress = CapturedBaseAddress;
3669  *NumberOfBytesToLock = CapturedBytesToLock;
3670  }
3672  {
3673  //
3674  // Get exception code
3675  //
3677  }
3678  _SEH2_END;
3679 
3680  //
3681  // Return status
3682  //
3683  return Status;
3684 }
3685 
3686 
3687 static
3688 NTSTATUS
3692  IN ULONG MapType)
3693 {
3696  PVOID EndAddress;
3697  PMMPTE PointerPte, LastPte;
3698  PMMPDE PointerPde;
3699 #if (_MI_PAGING_LEVELS >= 3)
3700  PMMPDE PointerPpe;
3701 #endif
3702 #if (_MI_PAGING_LEVELS == 4)
3703  PMMPDE PointerPxe;
3704 #endif
3705  PMMPFN Pfn1;
3706  NTSTATUS Status;
3707 
3708  /* Lock the address space */
3711 
3712  /* Make sure we still have an address space */
3714  if (CurrentProcess->VmDeleted)
3715  {
3717  goto Cleanup;
3718  }
3719 
3720  /* Check the VADs in the requested range */
3722 
3723  /* Note: only bail out, if we hit an area without a VAD. If we hit an
3724  incompatible VAD we continue, like Windows does */
3726  {
3728  goto Cleanup;
3729  }
3730 
3731  /* Get the PTE and PDE */
3732  PointerPte = MiAddressToPte(*BaseAddress);
3733  PointerPde = MiAddressToPde(*BaseAddress);
3734 #if (_MI_PAGING_LEVELS >= 3)
3735  PointerPpe = MiAddressToPpe(*BaseAddress);
3736 #endif
3737 #if (_MI_PAGING_LEVELS == 4)
3738  PointerPxe = MiAddressToPxe(*BaseAddress);
3739 #endif
3740 
3741  /* Get the last PTE */
3742  LastPte = MiAddressToPte((PVOID)((ULONG_PTR)EndAddress - 1));
3743 
3744  /* Lock the process working set */
3746 
3747  /* Loop the pages */
3748  do
3749  {
3750  /* Check for a page that is not present */
3751  if (
3752 #if (_MI_PAGING_LEVELS == 4)
3753  (PointerPxe->u.Hard.Valid == 0) ||
3754 #endif
3755 #if (_MI_PAGING_LEVELS >= 3)
3756  (PointerPpe->u.Hard.Valid == 0) ||
3757 #endif
3758  (PointerPde->u.Hard.Valid == 0) ||
3759  (PointerPte->u.Hard.Valid == 0))
3760  {
3761  /* Remember it, but keep going */
3763  }
3764  else
3765  {
3766  /* Get the PFN */
3767  Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
3768  ASSERT(Pfn1 != NULL);
3769 
3770  /* Check if all of the requested locks are present */
3771  if (((MapType & MAP_SYSTEM) && !MI_IS_LOCKED_VA(Pfn1, MAP_SYSTEM)) ||
3772  ((MapType & MAP_PROCESS) && !MI_IS_LOCKED_VA(Pfn1, MAP_PROCESS)))
3773  {
3774  /* Remember it, but keep going */
3776 
3777  /* Check if no lock is present */
3778  if (!MI_IS_LOCKED_VA(Pfn1, MAP_PROCESS | MAP_SYSTEM))
3779  {
3780  DPRINT1("FIXME: Should remove the page from WS\n");
3781  }
3782  }
3783  }
3784 
3785  /* Go to the next PTE */
3786  PointerPte++;
3787 
3788  /* Check if we're on a PDE boundary */
3789  if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
3790 #if (_MI_PAGING_LEVELS >= 3)
3791  if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
3792 #endif
3793 #if (_MI_PAGING_LEVELS == 4)
3794  if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
3795 #endif
3796  } while (PointerPte <= LastPte);
3797 
3798  /* Check if we hit a page that was not locked */
3799  if (Status == STATUS_NOT_LOCKED)
3800  {
3801  goto CleanupWithWsLock;
3802  }
3803 
3804  /* All pages in the region were locked, so unlock them all */
3805 
3806  /* Get the PTE and PDE */
3807  PointerPte = MiAddressToPte(*BaseAddress);
3808  PointerPde = MiAddressToPde(*BaseAddress);
3809 #if (_MI_PAGING_LEVELS >= 3)
3810  PointerPpe = MiAddressToPpe(*BaseAddress);
3811 #endif
3812 #if (_MI_PAGING_LEVELS == 4)
3813  PointerPxe = MiAddressToPxe(*BaseAddress);
3814 #endif
3815 
3816  /* Loop the pages */
3817  do
3818  {
3819  /* Unlock it */
3820  Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
3821  MI_UNLOCK_VA(Pfn1, MapType);
3822 
3823  /* Go to the next PTE */
3824  PointerPte++;
3825 
3826  /* Check if we're on a PDE boundary */
3827  if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
3828 #if (_MI_PAGING_LEVELS >= 3)
3829  if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
3830 #endif
3831 #if (_MI_PAGING_LEVELS == 4)
3832  if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
3833 #endif
3834  } while (PointerPte <= LastPte);
3835 
3836  /* Everything is done */
3838 
3839 CleanupWithWsLock:
3840 
3841  /* Release process working set */
3843 
3844 Cleanup:
3845  /* Unlock address space */
3847 
3848  return Status;
3849 }
3850 
3851 
3852 NTSTATUS
3853 NTAPI
3856  IN OUT PSIZE_T NumberOfBytesToUnlock,
3857  IN ULONG MapType)
3858 {
3861  NTSTATUS Status;
3865  PVOID CapturedBaseAddress;
3866  SIZE_T CapturedBytesToUnlock;
3867  PAGED_CODE();
3868 
3869  //
3870  // Validate flags
3871  //
3872  if ((MapType & ~(MAP_PROCESS | MAP_SYSTEM)))
3873  {
3874  //
3875  // Invalid set of flags
3876  //
3877  return STATUS_INVALID_PARAMETER;
3878  }
3879 
3880  //
3881  // At least one flag must be specified
3882  //
3883  if (!(MapType & (MAP_PROCESS | MAP_SYSTEM)))
3884  {
3885  //
3886  // No flag given
3887  //
3888  return STATUS_INVALID_PARAMETER;
3889  }
3890 
3891  //
3892  // Enter SEH for probing
3893  //
3894  _SEH2_TRY
3895  {
3896  //
3897  // Validate output data
3898  //
3900  ProbeForWriteSize_t(NumberOfBytesToUnlock);
3901 
3902  //
3903  // Capture it
3904  //
3905  CapturedBaseAddress = *BaseAddress;
3906  CapturedBytesToUnlock = *NumberOfBytesToUnlock;
3907  }
3909  {
3910  //
3911  // Get exception code
3912  //
3914  }
3915  _SEH2_END;
3916 
3917  //
3918  // Catch illegal base address
3919  //
3920  if (CapturedBaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER;
3921 
3922  //
3923  // Catch illegal region size
3924  //
3925  if ((MmUserProbeAddress - (ULONG_PTR)CapturedBaseAddress) < CapturedBytesToUnlock)
3926  {
3927  //
3928  // Fail
3929  //
3930  return STATUS_INVALID_PARAMETER;
3931  }
3932 
3933  //
3934  // 0 is also illegal
3935  //
3936  if (!CapturedBytesToUnlock) return STATUS_INVALID_PARAMETER;
3937 
3938  //
3939  // Get a reference to the process
3940  //
3943  PsProcessType,
3944  PreviousMode,
3945  (PVOID*)(&Process),
3946  NULL);
3947  if (!NT_SUCCESS(Status)) return Status;
3948 
3949  //
3950  // Check if this is is system-mapped
3951  //
3952  if (MapType & MAP_SYSTEM)
3953  {
3954  //
3955  // Check for required privilege
3956  //
3958  {
3959  //
3960  // Fail: Don't have it
3961  //
3964  }
3965  }
3966 
3967  //
3968  // Check if we should attach
3969  //
3970  if (CurrentProcess != Process)
3971  {
3972  //
3973  // Do it
3974  //
3976  Attached = TRUE;
3977  }
3978 
3979  //
3980  // Call the internal function
3981  //
3982  Status = MiUnlockVirtualMemory(&CapturedBaseAddress,
3983  &CapturedBytesToUnlock,
3984  MapType);
3985 
3986  //
3987  // Detach if needed
3988  //
3990 
3991  //
3992  // Release reference
3993  //
3995 
3996  //
3997  // Enter SEH to return data
3998  //
3999  _SEH2_TRY
4000  {
4001  //
4002  // Return data to user
4003  //
4004  *BaseAddress = CapturedBaseAddress;
4005  *NumberOfBytesToUnlock = CapturedBytesToUnlock;
4006  }
4008  {
4009  //
4010  // Get exception code
4011  //
4013  }
4014  _SEH2_END;
4015 
4016  //
4017  // Return status
4018  //
4019  return STATUS_SUCCESS;
4020 }
4021 
4022 NTSTATUS
4023 NTAPI
4026  IN OUT PSIZE_T NumberOfBytesToFlush,
4028 {
4030  NTSTATUS Status;
4032  PVOID CapturedBaseAddress;
4033  SIZE_T CapturedBytesToFlush;
4034  IO_STATUS_BLOCK LocalStatusBlock;
4035  PAGED_CODE();
4036 
4037  //
4038  // Check if we came from user mode
4039  //
4040  if (PreviousMode != KernelMode)
4041  {
4042  //
4043  // Enter SEH for probing
4044  //
4045  _SEH2_TRY
4046  {
4047  //
4048  // Validate all outputs
4049  //
4051  ProbeForWriteSize_t(NumberOfBytesToFlush);
4053 
4054  //
4055  // Capture them
4056  //
4057  CapturedBaseAddress = *BaseAddress;
4058  CapturedBytesToFlush = *NumberOfBytesToFlush;
4059  }
4061  {
4062  //
4063  // Get exception code
4064  //
4066  }
4067  _SEH2_END;
4068  }
4069  else
4070  {
4071  //
4072  // Capture directly
4073  //
4074  CapturedBaseAddress = *BaseAddress;
4075  CapturedBytesToFlush = *NumberOfBytesToFlush;
4076  }
4077 
4078  //
4079  // Catch illegal base address
4080  //
4081  if (CapturedBaseAddress > MM_HIGHEST_USER_ADDRESS) return STATUS_INVALID_PARAMETER;
4082 
4083  //
4084  // Catch illegal region size
4085  //
4086  if ((MmUserProbeAddress - (ULONG_PTR)CapturedBaseAddress) < CapturedBytesToFlush)
4087  {
4088  //
4089  // Fail
4090  //
4091  return STATUS_INVALID_PARAMETER;
4092  }
4093 
4094  //
4095  // Get a reference to the process
4096  //
4099  PsProcessType,
4100  PreviousMode,
4101  (PVOID*)(&Process),
4102  NULL);
4103  if (!NT_SUCCESS(Status)) return Status;
4104 
4105  //
4106  // Do it
4107  //
4109  &CapturedBaseAddress,
4110  &CapturedBytesToFlush,
4111  &LocalStatusBlock);
4112 
4113  //
4114  // Release reference
4115  //
4117 
4118  //
4119  // Enter SEH to return data
4120  //
4121  _SEH2_TRY
4122  {
4123  //
4124  // Return data to user
4125  //
4126  *BaseAddress = PAGE_ALIGN(CapturedBaseAddress);
4127  *NumberOfBytesToFlush = 0;
4128  *IoStatusBlock = LocalStatusBlock;
4129  }
4131  {
4132  }
4133  _SEH2_END;
4134 
4135  //
4136  // Return status
4137  //
4138  return Status;
4139 }
4140 
4141 /*
4142  * @unimplemented
4143  */
4144 NTSTATUS
4145 NTAPI
4147  IN ULONG Flags,
4150  IN PVOID *UserAddressArray,
4151  OUT PULONG_PTR EntriesInUserAddressArray,
4152  OUT PULONG Granularity)
4153 {
4155  NTSTATUS Status;
4156  PVOID EndAddress;
4158  ULONG_PTR CapturedEntryCount;
4159  PAGED_CODE();
4160 
4161  //
4162  // Check if we came from user mode
4163  //
4164  if (PreviousMode != KernelMode)
4165  {
4166  //
4167  // Enter SEH for probing
4168  //
4169  _SEH2_TRY
4170  {
4171  //
4172  // Catch illegal base address
4173  //
4175 
4176  //
4177  // Catch illegal region size
4178  //
4180  {
4181  //
4182  // Fail
4183  //
4185  }
4186 
4187  //
4188  // Validate all data
4189  //
4190  ProbeForWriteSize_t(EntriesInUserAddressArray);
4191  ProbeForWriteUlong(Granularity);
4192 
4193  //
4194  // Capture them
4195  //
4196  CapturedEntryCount = *EntriesInUserAddressArray;
4197 
4198  //
4199  // Must have a count
4200  //
4201  if (CapturedEntryCount == 0) _SEH2_YIELD(return STATUS_INVALID_PARAMETER_5);
4202 
4203  //
4204  // Can't be larger than the maximum
4205  //
4206  if (CapturedEntryCount > (MAXULONG_PTR / sizeof(ULONG_PTR)))
4207  {
4208  //
4209  // Fail
4210  //
4212  }
4213 
4214  //
4215  // Probe the actual array
4216  //
4217  ProbeForWrite(UserAddressArray,
4218  CapturedEntryCount * sizeof(PVOID),
4219  sizeof(PVOID));
4220  }
4222  {
4223  //
4224  // Get exception code
4225  //
4227  }
4228  _SEH2_END;
4229  }
4230  else
4231  {
4232  //
4233  // Capture directly
4234  //
4235  CapturedEntryCount = *EntriesInUserAddressArray;
4236  ASSERT(CapturedEntryCount != 0);
4237  }
4238 
4239  //
4240  // Check if this is a local request
4241  //
4243  {
4244  //
4245  // No need to reference the process
4246  //
4248  }
4249  else
4250  {
4251  //
4252  // Reference the target
4253  //
4256  PsProcessType,
4257  PreviousMode,
4258  (PVOID *)&Process,
4259  NULL);
4260  if (!NT_SUCCESS(Status)) return Status;
4261  }
4262 
4263  //
4264  // Compute the last address and validate it
4265  //
4266  EndAddress = (PVOID)((ULONG_PTR)BaseAddress + RegionSize - 1);
4267  if (BaseAddress > EndAddress)
4268  {
4269  //
4270  // Fail
4271  //
4274  }
4275 
4276  //
4277  // Oops :(
4278  //
4279  UNIMPLEMENTED;
4280 
4281  //
4282  // Dereference if needed
4283  //
4285 
4286  //
4287  // Enter SEH to return data
4288  //
4289  _SEH2_TRY
4290  {
4291  //
4292  // Return data to user
4293  //
4294  *EntriesInUserAddressArray = 0;
4295  *Granularity = PAGE_SIZE;
4296  }
4298  {
4299  //
4300  // Get exception code
4301  //
4303  }
4304  _SEH2_END;
4305 
4306  //
4307  // Return success
4308  //
4309  return STATUS_SUCCESS;
4310 }
4311 
4312 /*
4313  * @unimplemented
4314  */
4315 NTSTATUS
4316 NTAPI
4320 {
4321  PVOID EndAddress;
4323  NTSTATUS Status;
4326 
4327  //
4328  // Catch illegal base address
4329  //
4331 
4332  //
4333  // Catch illegal region size
4334  //
4336  {
4337  //
4338  // Fail
4339  //
4341  }
4342 
4343  //
4344  // Check if this is a local request
4345  //
4347  {
4348  //
4349  // No need to reference the process
4350  //
4352  }
4353  else
4354  {
4355  //
4356  // Reference the target
4357  //
4360  PsProcessType,
4361  PreviousMode,
4362  (PVOID *)&Process,
4363  NULL);
4364  if (!NT_SUCCESS(Status)) return Status;
4365  }
4366 
4367  //
4368  // Compute the last address and validate it
4369  //
4370  EndAddress = (PVOID)((ULONG_PTR)BaseAddress + RegionSize - 1);
4371  if (BaseAddress > EndAddress)
4372  {
4373  //
4374  // Fail
4375  //
4378  }
4379 
4380  //
4381  // Oops :(
4382  //
4383  UNIMPLEMENTED;
4384 
4385  //
4386  // Dereference if needed
4387  //
4389 
4390  //
4391  // Return success
4392  //
4393  return STATUS_SUCCESS;
4394 }
4395 
4396 NTSTATUS
4397 NTAPI
4400  IN MEMORY_INFORMATION_CLASS MemoryInformationClass,
4401  OUT PVOID MemoryInformation,
4402  IN SIZE_T MemoryInformationLength,
4404 {
4407 
4408  DPRINT("Querying class %d about address: %p\n", MemoryInformationClass, BaseAddress);
4409 
4410  /* Bail out if the address is invalid */
4412 
4413  /* Probe return buffer */
4415  if (PreviousMode != KernelMode)
4416  {
4417  _SEH2_TRY
4418  {
4419  ProbeForWrite(MemoryInformation,
4420  MemoryInformationLength,
4421  sizeof(ULONG_PTR));
4422 
4424  }
4426  {
4428  }
4429  _SEH2_END;
4430 
4431  if (!NT_SUCCESS(Status))
4432  {
4433  return Status;
4434  }
4435  }
4436 
4437  switch(MemoryInformationClass)
4438  {
4440  /* Validate the size information of the class */
4441  if (MemoryInformationLength < sizeof(MEMORY_BASIC_INFORMATION))
4442  {
4443  /* The size is invalid */
4445  }
4447  BaseAddress,
4448  MemoryInformation,
4449  MemoryInformationLength,
4450  ReturnLength);
4451  break;
4452 
4453  case MemorySectionName:
4454  /* Validate the size information of the class */
4455  if (MemoryInformationLength < sizeof(MEMORY_SECTION_NAME))
4456  {
4457  /* The size is invalid */
4459  }
4461  BaseAddress,
4462  MemoryInformation,
4463  MemoryInformationLength,
4464  ReturnLength);
4465  break;
4466  case MemoryWorkingSetList:
4468  default:
4469  DPRINT1("Unhandled memory information class %d\n", MemoryInformationClass);
4470  break;
4471  }
4472 
4473  return Status;
4474 }
4475 
4476 /*
4477  * @implemented
4478  */
4479 NTSTATUS
4480 NTAPI
4482  IN OUT PVOID* UBaseAddress,
4484  IN OUT PSIZE_T URegionSize,
4486  IN ULONG Protect)
4487 {
4490  PMMVAD Vad = NULL, FoundVad;
4491  NTSTATUS Status;
4493  PVOID PBaseAddress;
4494  ULONG_PTR PRegionSize, StartingAddress, EndingAddress;
4495  ULONG_PTR HighestAddress = (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS;
4498  PETHREAD CurrentThread = PsGetCurrentThread();
4500  ULONG ProtectionMask, QuotaCharge = 0, QuotaFree = 0;
4501  BOOLEAN Attached = FALSE, ChangeProtection = FALSE;
4502  MMPTE TempPte;
4503  PMMPTE PointerPte, LastPte;
4504  PMMPDE PointerPde;
4506  PAGED_CODE();
4507 
4508  /* Check for valid Zero bits */
4509  if (ZeroBits > MI_MAX_ZERO_BITS)
4510  {
4511  DPRINT1("Too many zero bits\n");
4513  }
4514 
4515  /* Check for valid Allocation Types */
4518  {
4519  DPRINT1("Invalid Allocation Type\n");
4521  }
4522 
4523  /* Check for at least one of these Allocation Types to be set */
4525  {
4526  DPRINT1("No memory allocation base type\n");
4528  }
4529 
4530  /* MEM_RESET is an exclusive flag, make sure that is valid too */
4532  {
4533  DPRINT1("Invalid use of MEM_RESET\n");
4535  }
4536 
4537  /* Check if large pages are being used */
4539  {
4540  /* Large page allocations MUST be committed */
4541  if (!(AllocationType & MEM_COMMIT))
4542  {
4543  DPRINT1("Must supply MEM_COMMIT with MEM_LARGE_PAGES\n");
4545  }
4546 
4547  /* These flags are not allowed with large page allocations */
4549  {
4550  DPRINT1("Using illegal flags with MEM_LARGE_PAGES\n");
4552  }
4553  }
4554 
4555  /* MEM_WRITE_WATCH can only be used if MEM_RESERVE is also used */
4557  {
4558  DPRINT1("MEM_WRITE_WATCH used without MEM_RESERVE\n");
4560  }
4561 
4562  /* Check for valid MEM_PHYSICAL usage */
4564  {
4565  /* MEM_PHYSICAL can only be used if MEM_RESERVE is also used */
4566  if (!(AllocationType & MEM_RESERVE))
4567  {
4568  DPRINT1("MEM_PHYSICAL used without MEM_RESERVE\n");
4570  }
4571 
4572  /* Only these flags are allowed with MEM_PHYSIAL */
4574  {
4575  DPRINT1("Using illegal flags with MEM_PHYSICAL\n");
4577  }
4578 
4579  /* Then make sure PAGE_READWRITE is used */
4580  if (Protect != PAGE_READWRITE)
4581  {
4582  DPRINT1("MEM_PHYSICAL used without PAGE_READWRITE\n");
4584  }
4585  }
4586 
4587  /* Calculate the protection mask and make sure it's valid */
4588  ProtectionMask = MiMakeProtectionMask(Protect);
4589  if (ProtectionMask == MM_INVALID_PROTECTION)
4590  {
4591  DPRINT1("Invalid protection mask\n");
4593  }
4594 
4595  /* Enter SEH */
4596  _SEH2_TRY
4597  {
4598  /* Check for user-mode parameters */
4599  if (PreviousMode != KernelMode)
4600  {
4601  /* Make sure they are writable */
4602  ProbeForWritePointer(UBaseAddress);
4603  ProbeForWriteSize_t(URegionSize);
4604  }
4605 
4606  /* Capture their values */
4607  PBaseAddress = *UBaseAddress;
4608  PRegionSize = *URegionSize;
4609  }
4611  {
4612  /* Return the exception code */
4614  }
4615  _SEH2_END;
4616 
4617  /* Make sure the allocation isn't past the VAD area */
4618  if (PBaseAddress > MM_HIGHEST_VAD_ADDRESS)
4619  {
4620  DPRINT1("Virtual allocation base above User Space\n");
4622  }
4623 
4624  /* Make sure the allocation wouldn't overflow past the VAD area */
4625  if ((((ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1) - (ULONG_PTR)PBaseAddress) < PRegionSize)
4626  {
4627  DPRINT1("Region size would overflow into kernel-memory\n");
4629  }
4630 
4631  /* Make sure there's a size specified */
4632  if (!PRegionSize)
4633  {
4634  DPRINT1("Region size is invalid (zero)\n");
4636  }
4637 
4638  //
4639  // If this is for the current process, just use PsGetCurrentProcess
4640  //
4642  {
4644  }
4645  else
4646  {
4647  //
4648  // Otherwise, reference the process with VM rights and attach to it if
4649  // this isn't the current process. We must attach because we'll be touching
4650  // PTEs and PDEs that belong to user-mode memory, and also touching the
4651  // Working Set which is stored in Hyperspace.
4652  //
4655  PsProcessType,
4656  PreviousMode,
4657  (PVOID*)&Process,
4658  NULL);
4659  if (!NT_SUCCESS(Status)) return Status;
4660  if (CurrentProcess != Process)
4661  {
4663  Attached = TRUE;
4664  }
4665  }
4666 
4667  DPRINT("NtAllocateVirtualMemory: Process 0x%p, Address 0x%p, Zerobits %lu , RegionSize 0x%x, Allocation type 0x%x, Protect 0x%x.\n",
4668  Process, PBaseAddress, ZeroBits, PRegionSize, AllocationType, Protect);
4669 
4670  //
4671  // Check for large page allocations and make sure that the required privilege
4672  // is being held, before attempting to handle them.
4673  //
4674  if ((AllocationType & MEM_LARGE_PAGES) &&
4676  {
4677  /* Fail without it */
4678  DPRINT1("Privilege not held for MEM_LARGE_PAGES\n");
4680  goto FailPathNoLock;
4681  }
4682 
4683  //
4684  // Fail on the things we don't yet support
4685  //
4687  {
4688  DPRINT1("MEM_LARGE_PAGES not supported\n");
4690  goto FailPathNoLock;
4691  }
4693  {
4694  DPRINT1("MEM_PHYSICAL not supported\n");
4696  goto FailPathNoLock;
4697  }
4699  {
4700  DPRINT1("MEM_WRITE_WATCH not supported\n");
4702  goto FailPathNoLock;
4703  }
4704 
4705  //
4706  // Check if the caller is reserving memory, or committing memory and letting
4707  // us pick the base address
4708  //
4709  if (!(PBaseAddress) || (AllocationType & MEM_RESERVE))
4710  {
4711  //
4712  // Do not allow COPY_ON_WRITE through this API
4713  //
4715  {
4716  DPRINT1("Copy on write not allowed through this path\n");
4718  goto FailPathNoLock;
4719  }
4720 
4721  //
4722  // Does the caller have an address in mind, or is this a blind commit?
4723  //
4724  if (!PBaseAddress)
4725  {
4726  //
4727  // This is a blind commit, all we need is the region size
4728  //
4729  PRegionSize = ROUND_TO_PAGES(PRegionSize);
4730  EndingAddress = 0;
4731  StartingAddress = 0;
4732 
4733  //
4734  // Check if ZeroBits were specified
4735  //
4736  if (ZeroBits != 0)
4737  {
4738  //
4739  // Calculate the highest address and check if it's valid
4740  //
4741  HighestAddress = MAXULONG_PTR >> ZeroBits;
4742  if (HighestAddress > (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS)
4743  {
4745  goto FailPathNoLock;
4746  }
4747  }
4748  }
4749  else
4750  {
4751  //
4752  // This is a reservation, so compute the starting address on the
4753  // expected 64KB granularity, and see where the ending address will
4754  // fall based on the aligned address and the passed in region size
4755  //
4756  EndingAddress = ((ULONG_PTR)PBaseAddress + PRegionSize - 1) | (PAGE_SIZE - 1);
4757  PRegionSize = EndingAddress + 1 - ROUND_DOWN((ULONG_PTR)PBaseAddress, _64K);
4758  StartingAddress = (ULONG_PTR)PBaseAddress;
4759  }
4760 
4761  //
4762  // Allocate and initialize the VAD
4763  //
4764  Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'SdaV');
4765  if (Vad == NULL)
4766  {
4767  DPRINT1("Failed to allocate a VAD!\n");
4769  goto FailPathNoLock;
4770  }
4771 
4772  RtlZeroMemory(Vad, sizeof(MMVAD_LONG));
4773  if (AllocationType & MEM_COMMIT) Vad->u.VadFlags.MemCommit = 1;
4774  Vad->u.VadFlags.Protection = ProtectionMask;
4775  Vad->u.VadFlags.PrivateMemory = 1;
4776  Vad->ControlArea = NULL; // For Memory-Area hack
4777 
4778  //
4779  // Insert the VAD
4780  //
4781  Status = MiInsertVadEx(Vad,
4782  &StartingAddress,
4783  PRegionSize,
4784  HighestAddress,
4786  AllocationType);
4787  if (!NT_SUCCESS(Status))
4788  {
4789  DPRINT1("Failed to insert the VAD!\n");
4790  goto FailPathNoLock;
4791  }
4792 
4793  //
4794  // Detach and dereference the target process if
4795  // it was different from the current process
4796  //
4799 
4800  //
4801  // Use SEH to write back the base address and the region size. In the case
4802  // of an exception, we do not return back the exception code, as the memory
4803  // *has* been allocated. The caller would now have to call VirtualQuery
4804  // or do some other similar trick to actually find out where its memory
4805  // allocation ended up
4806  //
4807  _SEH2_TRY
4808  {
4809  *URegionSize = PRegionSize;
4810  *UBaseAddress = (PVOID)StartingAddress;
4811  }
4813  {
4814  //
4815  // Ignore exception!
4816  //
4817  }
4818  _SEH2_END;
4819  DPRINT("Reserved %x bytes at %p.\n", PRegionSize, StartingAddress);
4820  return STATUS_SUCCESS;
4821  }
4822 
4823  //
4824  // This is a MEM_COMMIT on top of an existing address which must have been
4825  // MEM_RESERVED already. Compute the start and ending base addresses based
4826  // on the user input, and then compute the actual region size once all the
4827  // alignments have been done.
4828  //
4829  EndingAddress = (((ULONG_PTR)PBaseAddress + PRegionSize - 1) | (PAGE_SIZE - 1));
4830  StartingAddress = (ULONG_PTR)PAGE_ALIGN(PBaseAddress);
4831  PRegionSize = EndingAddress - StartingAddress + 1;
4832 
4833  //
4834  // Lock the address space and make sure the process isn't already dead
4835  //
4838  if (Process->VmDeleted)
4839  {
4840  DPRINT1("Process is dying\n");
4842  goto FailPath;
4843  }
4844 
4845  //
4846  // Get the VAD for this address range, and make sure it exists
4847  //
4848  Result = MiCheckForConflictingNode(StartingAddress >> PAGE_SHIFT,
4849  EndingAddress >> PAGE_SHIFT,
4850  &Process->VadRoot,
4851  (PMMADDRESS_NODE*)&FoundVad);
4852  if (Result != TableFoundNode)
4853  {
4854  DPRINT1("Could not find a VAD for this allocation\n");
4856  goto FailPath;
4857  }
4858 
4859  if ((AllocationType & MEM_RESET) == MEM_RESET)
4860  {
4862  DPRINT("MEM_RESET not supported\n");
4864  goto FailPath;
4865  }
4866 
4867  //
4868  // These kinds of VADs are illegal for this Windows function when trying to
4869  // commit an existing range
4870  //
4871  if ((FoundVad->u.VadFlags.VadType == VadAwe) ||
4872  (FoundVad->u.VadFlags.VadType == VadDevicePhysicalMemory) ||
4873  (FoundVad->u.VadFlags.VadType == VadLargePages))
4874  {
4875  DPRINT1("Illegal VAD for attempting a MEM_COMMIT\n");
4877  goto FailPath;
4878  }
4879 
4880  //
4881  // Make sure that this address range actually fits within the VAD for it
4882  //
4883  if (((StartingAddress >> PAGE_SHIFT) < FoundVad->StartingVpn) ||
4884  ((EndingAddress >> PAGE_SHIFT) > FoundVad->EndingVpn))
4885  {
4886  DPRINT1("Address range does not fit into the VAD\n");
4888  goto FailPath;
4889  }
4890 
4891  //
4892  // Make sure this is an ARM3 section
4893  //
4895  ASSERT(MemoryArea != NULL);
4897  {
4898  DPRINT1("Illegal commit of non-ARM3 section!\n");
4900  goto FailPath;
4901  }
4902 
4903  // Is this a previously reserved section being committed? If so, enter the
4904  // special section path
4905  //
4906  if (FoundVad->u.VadFlags.PrivateMemory == FALSE)
4907  {
4908  //
4909  // You cannot commit large page sections through this API
4910  //
4911  if (FoundVad->u.VadFlags.VadType == VadLargePageSection)
4912  {
4913  DPRINT1("Large page sections cannot be VirtualAlloc'd\n");
4915  goto FailPath;
4916  }
4917 
4918  //
4919  // You can only use caching flags on a rotate VAD
4920  //
4921  if ((Protect & (PAGE_NOCACHE | PAGE_WRITECOMBINE)) &&
4922  (FoundVad->u.VadFlags.VadType != VadRotatePhysical))
4923  {
4924  DPRINT1("Cannot use caching flags with anything but rotate VADs\n");
4926  goto FailPath;
4927  }
4928 
4929  //
4930  // We should make sure that the section's permissions aren't being
4931  // messed with
4932  //
4933  if (FoundVad->u.VadFlags.NoChange)
4934  {
4935  //
4936  // Make sure it's okay to touch it
4937  // Note: The Windows 2003 kernel has a bug here, passing the
4938  // unaligned base address together with the aligned size,
4939  // potentially covering a region larger than the actual allocation.
4940  // Might be exposed through NtGdiCreateDIBSection w/ section handle
4941  // For now we keep this behavior.
4942  // TODO: analyze possible implications, create test case
4943  //
4944  Status = MiCheckSecuredVad(FoundVad,
4945  PBaseAddress,
4946  PRegionSize,
4947  ProtectionMask);
4948  if (!NT_SUCCESS(Status))
4949  {
4950  DPRINT1("Secured VAD being messed around with\n");
4951  goto FailPath;
4952  }
4953  }
4954 
4955  //
4956  // ARM3 does not support file-backed sections, only shared memory
4957  //
4958  ASSERT(FoundVad->ControlArea->FilePointer == NULL);
4959 
4960  //
4961  // Rotate VADs cannot be guard pages or inaccessible, nor copy on write
4962  //
4963  if ((FoundVad->u.VadFlags.VadType == VadRotatePhysical) &&
4965  {
4966  DPRINT1("Invalid page protection for rotate VAD\n");
4968  goto FailPath;
4969  }
4970 
4971  //
4972  // Compute PTE addresses and the quota charge, then grab the commit lock
4973  //
4974  PointerPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(FoundVad, StartingAddress >> PAGE_SHIFT);
4975  LastPte = MI_GET_PROTOTYPE_PTE_FOR_VPN(FoundVad, EndingAddress >> PAGE_SHIFT);
4976  QuotaCharge = (ULONG)(LastPte - PointerPte + 1);
4978 
4979  //
4980  // Get the segment template PTE and start looping each page
4981  //
4982  TempPte = FoundVad->ControlArea->Segment->SegmentPteTemplate;
4983  ASSERT(TempPte.u.Long != 0);
4984  while (PointerPte <= LastPte)
4985  {
4986  //
4987  // For each non-already-committed page, write the invalid template PTE
4988  //
4989  if (PointerPte->u.Long == 0)
4990  {
4991  MI_WRITE_INVALID_PTE(PointerPte, TempPte);
4992  }
4993  else
4994  {
4995  QuotaFree++;
4996  }
4997  PointerPte++;
4998  }
4999 
5000  //
5001  // Now do the commit accounting and release the lock
5002  //
5003  ASSERT(QuotaCharge >= QuotaFree);
5004  QuotaCharge -= QuotaFree;
5005  FoundVad->ControlArea->Segment->NumberOfCommittedPages += QuotaCharge;
5007 
5008  //
5009  // We are done with committing the section pages
5010  //
5012  goto FailPath;
5013  }
5014 
5015  //
5016  // This is a specific ReactOS check because we only use normal VADs
5017  //
5018  ASSERT(FoundVad->u.VadFlags.VadType == VadNone);
5019 
5020  //
5021  // While this is an actual Windows check
5022  //
5023  ASSERT(FoundVad->u.VadFlags.VadType != VadRotatePhysical);
5024 
5025  //
5026  // Throw out attempts to use copy-on-write through this API path
5027  //
5029  {
5030  DPRINT1("Write copy attempted when not allowed\n");
5032  goto FailPath;
5033  }
5034 
5035  //
5036  // Initialize a demand-zero PTE
5037  //
5038  TempPte.u.Long = 0;
5039  TempPte.u.Soft.Protection = ProtectionMask;
5040  ASSERT(TempPte.u.Long != 0);
5041 
5042  //
5043  // Get the PTE, PDE and the last PTE for this address range
5044  //
5045  PointerPde = MiAddressToPde(StartingAddress);
5046  PointerPte = MiAddressToPte(StartingAddress);
5047  LastPte = MiAddressToPte(EndingAddress);
5048 
5049  //
5050  // Update the commit charge in the VAD as well as in the process, and check
5051  // if this commit charge was now higher than the last recorded peak, in which
5052  // case we also update the peak
5053  //
5054  FoundVad->u.VadFlags.CommitCharge += (1 + LastPte - PointerPte);
5055  Process->CommitCharge += (1 + LastPte - PointerPte);
5056  if (Process->CommitCharge > Process->CommitChargePeak)
5057  {
5058  Process->CommitChargePeak = Process->CommitCharge;
5059  }
5060 
5061  //
5062  // Lock the working set while we play with user pages and page tables
5063  //
5064  MiLockProcessWorkingSetUnsafe(Process, CurrentThread);
5065 
5066  //
5067  // Make the current page table valid, and then loop each page within it
5068  //
5070  while (PointerPte <= LastPte)
5071  {
5072  //
5073  // Have we crossed into a new page table?
5074  //
5075  if (MiIsPteOnPdeBoundary(PointerPte))
5076  {
5077  //
5078  // Get the PDE and now make it valid too
5079  //
5080  PointerPde = MiPteToPde(PointerPte);
5082  }
5083 
5084  //
5085  // Is this a zero PTE as expected?
5086  //
5087  if (PointerPte->u.Long == 0)
5088  {
5089  //
5090  // First increment the count of pages in the page table for this
5091  // process
5092  //
5094 
5095  //
5096  // And now write the invalid demand-zero PTE as requested
5097  //
5098  MI_WRITE_INVALID_PTE(PointerPte, TempPte);
5099  }
5100  else if (PointerPte->u.Long == MmDecommittedPte.u.Long)
5101  {
5102  //
5103  // If the PTE was already decommitted, there is nothing else to do
5104  // but to write the new demand-zero PTE
5105  //
5106  MI_WRITE_INVALID_PTE(PointerPte, TempPte);
5107  }
5108  else if (!(ChangeProtection) && (Protect != MiGetPageProtection(PointerPte)))
5109  {
5110  //
5111  // We don't handle these scenarios yet
5112  //
5113  if (PointerPte->u.Soft.Valid == 0)
5114  {
5115  ASSERT(PointerPte->u.Soft.Prototype == 0);
5116  ASSERT((PointerPte->u.Soft.PageFileHigh == 0) || (PointerPte->u.Soft.Transition == 1));
5117  }
5118 
5119  //
5120  // There's a change in protection, remember this for later, but do
5121  // not yet handle it.
5122  //
5123  ChangeProtection = TRUE;
5124  }
5125 
5126  //
5127  // Move to the next PTE
5128  //
5129  PointerPte++;
5130  }
5131 
5132  //
5133  // Release the working set lock, unlock the address space, and detach from
5134  // the target process if it was not the current process. Also dereference the
5135  // target process if this wasn't the case.
5136  //
5137  MiUnlockProcessWorkingSetUnsafe(Process, CurrentThread);
5139 FailPath:
5141 
5142  if (!NT_SUCCESS(Status))
5143  {
5144  if (Vad != NULL)
5145  {
5146  ExFreePoolWithTag(Vad, 'SdaV');
5147  }
5148  }
5149 
5150  //
5151  // Check if we need to update the protection
5152  //
5153  if (ChangeProtection)
5154  {
5155  PVOID ProtectBaseAddress = (PVOID)StartingAddress;
5156  SIZE_T ProtectSize = PRegionSize;
5157  ULONG OldProtection;
5158 
5159  //
5160  // Change the protection of the region
5161  //
5163  &ProtectBaseAddress,
5164  &ProtectSize,
5165  Protect,
5166  &OldProtection);
5167  }
5168 
5169 FailPathNoLock:
5172 
5173  //
5174  // Only write back results on success
5175  //
5176  if (NT_SUCCESS(Status))
5177  {
5178  //
5179  // Use SEH to write back the base address and the region size. In the case
5180  // of an exception, we strangely do return back the exception code, even
5181  // though the memory *has* been allocated. This mimics Windows behavior and
5182  // there is not much we can do about it.
5183  //
5184  _SEH2_TRY
5185  {
5186  *URegionSize = PRegionSize;
5187  *UBaseAddress = (PVOID)StartingAddress;
5188  }
5190  {
5192  }
5193  _SEH2_END;
5194  }
5195 
5196  return Status;
5197 }
5198 
5199 /*
5200  * @implemented
5201  */
5202 NTSTATUS
5203 NTAPI
5205  IN PVOID* UBaseAddress,
5206  IN PSIZE_T URegionSize,
5207  IN ULONG FreeType)
5208 {
5210  SIZE_T PRegionSize;
5211  PVOID PBaseAddress;
5212  LONG_PTR AlreadyDecommitted, CommitReduction = 0;
5213  ULONG_PTR StartingAddress, EndingAddress;
5214  PMMVAD Vad;
5215  NTSTATUS Status;
5218  PETHREAD CurrentThread = PsGetCurrentThread();
5223  PAGED_CODE();
5224 
5225  //
5226  // Only two flags are supported, exclusively.
5227  //
5229  {
5230  DPRINT1("Invalid FreeType (0x%08lx)\n", FreeType);
5232  }
5233 
5234  //
5235  // Enter SEH for probe and capture. On failure, return back to the caller
5236  // with an exception violation.
5237  //
5238  _SEH2_TRY
5239  {
5240  //
5241  // Check for user-mode parameters and make sure that they are writeable
5242  //
5243  if (PreviousMode != KernelMode)
5244  {
5245  ProbeForWritePointer(UBaseAddress);
5246  ProbeForWriteUlong(URegionSize);
5247  }
5248 
5249  //
5250  // Capture the current values
5251  //
5252  PBaseAddress = *UBaseAddress;
5253  PRegionSize = *URegionSize;
5254  }
5256  {
5258  }
5259  _SEH2_END;
5260 
5261  //
5262  // Make sure the allocation isn't past the user area
5263  //
5264  if (PBaseAddress >= MM_HIGHEST_USER_ADDRESS)
5265  {
5266  DPRINT1("Virtual free base above User Space\n");
5268  }
5269 
5270  //
5271  // Make sure the allocation wouldn't overflow past the user area
5272  //
5273  if (((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (ULONG_PTR)PBaseAddress) < PRegionSize)
5274  {
5275  DPRINT1("Region size would overflow into kernel-memory\n");
5277  }
5278 
5279  //
5280  // If this is for the current process, just use PsGetCurrentProcess
5281  //
5283  {
5285  }
5286  else
5287  {
5288  //
5289  // Otherwise, reference the process with VM rights and attach to it if
5290  // this isn't the current process. We must attach because we'll be touching
5291  // PTEs and PDEs that belong to user-mode memory, and also touching the
5292  // Working Set which is stored in Hyperspace.
5293  //
5296  PsProcessType,
5297  PreviousMode,
5298  (PVOID*)&Process,
5299  NULL);
5300  if (!NT_SUCCESS(Status)) return Status;
5301  if (CurrentProcess != Process)
5302  {
5304  Attached = TRUE;
5305  }
5306  }
5307 
5308  DPRINT("NtFreeVirtualMemory: Process 0x%p, Address 0x%p, Size 0x%Ix, FreeType 0x%08lx\n",
5309  Process, PBaseAddress, PRegionSize, FreeType);
5310 
5311  //<