ReactOS  0.4.15-dev-492-ga1108f6
allocsup.c
Go to the documentation of this file.
1 /*++
2 
3 Copyright (c) 1990-2000 Microsoft Corporation
4 
5 Module Name:
6 
7  AllocSup.c
8 
9 Abstract:
10 
11  This module implements the Allocation support routines for Fat.
12 
13 
14 --*/
15 
16 #include "fatprocs.h"
17 
18 //
19 // The Bug check file id for this module
20 //
21 
22 #define BugCheckFileId (FAT_BUG_CHECK_ALLOCSUP)
23 
24 //
25 // Local debug trace level
26 //
27 
28 #define Dbg (DEBUG_TRACE_ALLOCSUP)
29 
30 #define FatMin(a, b) ((a) < (b) ? (a) : (b))
31 
32 //
33 // Define prefetch page count for the FAT
34 //
35 
36 #define FAT_PREFETCH_PAGE_COUNT 0x100
37 
38 //
39 // Local support routine prototypes
40 //
41 
42 VOID
44  IN PIRP_CONTEXT IrpContext,
45  IN PVCB Vcb,
49  );
50 
51 VOID
53  IN PIRP_CONTEXT IrpContext,
54  IN PVCB Vcb,
55  IN ULONG StartingFatIndex,
56  IN ULONG ClusterCount,
57  IN BOOLEAN ChainTogether
58  );
59 
60 UCHAR
61 FatLogOf(
62  IN ULONG Value
63  );
64 
65 //
66 // Note that the KdPrint below will ONLY fire when the assert does. Leave it
67 // alone.
68 //
69 
70 #if DBG
71 #define ASSERT_CURRENT_WINDOW_GOOD(VCB) { \
72  ULONG FreeClusterBitMapClear; \
73  NT_ASSERT( (VCB)->FreeClusterBitMap.Buffer != NULL ); \
74  FreeClusterBitMapClear = RtlNumberOfClearBits(&(VCB)->FreeClusterBitMap); \
75  if ((VCB)->CurrentWindow->ClustersFree != FreeClusterBitMapClear) { \
76  KdPrint(("FAT: ClustersFree %x h != FreeClusterBitMapClear %x h\n", \
77  (VCB)->CurrentWindow->ClustersFree, \
78  FreeClusterBitMapClear)); \
79  } \
80  NT_ASSERT( (VCB)->CurrentWindow->ClustersFree == FreeClusterBitMapClear ); \
81 }
82 #else
83 #define ASSERT_CURRENT_WINDOW_GOOD(VCB)
84 #endif
85 
86 //
87 // The following macros provide a convenient way of hiding the details
88 // of bitmap allocation schemes.
89 //
90 
91 
92 //
93 // VOID
94 // FatLockFreeClusterBitMap (
95 // IN PVCB Vcb
96 // );
97 //
98 
99 #define FatLockFreeClusterBitMap(VCB) { \
100  NT_ASSERT(KeAreApcsDisabled()); \
101  ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
102  ASSERT_CURRENT_WINDOW_GOOD(VCB) \
103 }
104 
105 //
106 // VOID
107 // FatUnlockFreeClusterBitMap (
108 // IN PVCB Vcb
109 // );
110 //
111 
112 #define FatUnlockFreeClusterBitMap(VCB) { \
113  ASSERT_CURRENT_WINDOW_GOOD(VCB) \
114  NT_ASSERT(KeAreApcsDisabled()); \
115  ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
116 }
117 
118 //
119 // BOOLEAN
120 // FatIsClusterFree (
121 // IN PIRP_CONTEXT IrpContext,
122 // IN PVCB Vcb,
123 // IN ULONG FatIndex
124 // );
125 //
126 
127 #define FatIsClusterFree(IRPCONTEXT,VCB,FAT_INDEX) \
128  (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)
129 
130 //
131 // VOID
132 // FatFreeClusters (
133 // IN PIRP_CONTEXT IrpContext,
134 // IN PVCB Vcb,
135 // IN ULONG FatIndex,
136 // IN ULONG ClusterCount
137 // );
138 //
139 
140 #define FatFreeClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
141  if ((CLUSTER_COUNT) == 1) { \
142  FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \
143  } else { \
144  FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \
145  } \
146 }
147 
148 //
149 // VOID
150 // FatAllocateClusters (
151 // IN PIRP_CONTEXT IrpContext,
152 // IN PVCB Vcb,
153 // IN ULONG FatIndex,
154 // IN ULONG ClusterCount
155 // );
156 //
157 
158 #define FatAllocateClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
159  if ((CLUSTER_COUNT) == 1) { \
160  FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \
161  } else { \
162  FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \
163  } \
164 }
165 
166 //
167 // VOID
168 // FatUnreserveClusters (
169 // IN PIRP_CONTEXT IrpContext,
170 // IN PVCB Vcb,
171 // IN ULONG FatIndex,
172 // IN ULONG ClusterCount
173 // );
174 //
175 
176 #define FatUnreserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
177  NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
178  NT_ASSERT( (FAT_INDEX) >= 2); \
179  RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
180  if ((FAT_INDEX) < (VCB)->ClusterHint) { \
181  (VCB)->ClusterHint = (FAT_INDEX); \
182  } \
183 }
184 
185 //
186 // VOID
187 // FatReserveClusters (
188 // IN PIRP_CONTEXT IrpContext,
189 // IN PVCB Vcb,
190 // IN ULONG FatIndex,
191 // IN ULONG ClusterCount
192 // );
193 //
194 // Handle wrapping the hint back to the front.
195 //
196 
197 #define FatReserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
198  ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \
199  NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
200  NT_ASSERT( (FAT_INDEX) >= 2); \
201  RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
202  \
203  if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \
204  _AfterRun = 2; \
205  } \
206  if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \
207  (VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \
208  if (1 == (VCB)->ClusterHint) { \
209  (VCB)->ClusterHint = 2; \
210  } \
211  } \
212  else { \
213  (VCB)->ClusterHint = _AfterRun; \
214  } \
215 }
216 
217 //
218 // ULONG
219 // FatFindFreeClusterRun (
220 // IN PIRP_CONTEXT IrpContext,
221 // IN PVCB Vcb,
222 // IN ULONG ClusterCount,
223 // IN ULONG AlternateClusterHint
224 // );
225 //
226 // Do a special check if only one cluster is desired.
227 //
228 
229 #define FatFindFreeClusterRun(IRPCONTEXT,VCB,CLUSTER_COUNT,CLUSTER_HINT) ( \
230  (CLUSTER_COUNT == 1) && \
231  FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \
232  (CLUSTER_HINT) : \
233  RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
234  (CLUSTER_COUNT), \
235  (CLUSTER_HINT) - 2) + 2 \
236 )
237 
238 //
239 // FAT32: Define the maximum size of the FreeClusterBitMap to be the
240 // maximum size of a FAT16 FAT. If there are more clusters on the
241 // volume than can be represented by this many bytes of bitmap, the
242 // FAT will be split into "buckets", each of which does fit.
243 //
244 // Note this count is in clusters/bits of bitmap.
245 //
246 
247 #define MAX_CLUSTER_BITMAP_SIZE (1 << 16)
248 
249 //
250 // Calculate the window a given cluster number is in.
251 //
252 
253 #define FatWindowOfCluster(C) (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)
254 
255 #ifdef ALLOC_PRAGMA
256 #pragma alloc_text(PAGE, FatAddFileAllocation)
257 #pragma alloc_text(PAGE, FatAllocateDiskSpace)
258 #pragma alloc_text(PAGE, FatDeallocateDiskSpace)
259 #pragma alloc_text(PAGE, FatExamineFatEntries)
260 #pragma alloc_text(PAGE, FatInterpretClusterType)
261 #pragma alloc_text(PAGE, FatLogOf)
262 #pragma alloc_text(PAGE, FatLookupFatEntry)
263 #pragma alloc_text(PAGE, FatLookupFileAllocation)
264 #pragma alloc_text(PAGE, FatLookupFileAllocationSize)
265 #pragma alloc_text(PAGE, FatMergeAllocation)
266 #pragma alloc_text(PAGE, FatSetFatEntry)
267 #pragma alloc_text(PAGE, FatSetFatRun)
268 #pragma alloc_text(PAGE, FatSetupAllocationSupport)
269 #pragma alloc_text(PAGE, FatSplitAllocation)
270 #pragma alloc_text(PAGE, FatTearDownAllocationSupport)
271 #pragma alloc_text(PAGE, FatTruncateFileAllocation)
272 #endif
273 
274 
275 INLINE
276 ULONG
278  IN PVCB Vcb
279  )
280 /*++
281 
282 Routine Description:
283 
284  Choose a window to allocate clusters from. Order of preference is:
285 
286  1. First window with >50% free clusters
287  2. First empty window
288  3. Window with greatest number of free clusters.
289 
290 Arguments:
291 
292  Vcb - Supplies the Vcb for the volume
293 
294 Return Value:
295 
296  'Best window' number (index into Vcb->Windows[])
297 
298 --*/
299 {
300  ULONG i, Fave = 0;
301  ULONG MaxFree = 0;
302  ULONG FirstEmpty = (ULONG)-1;
303  ULONG ClustersPerWindow = MAX_CLUSTER_BITMAP_SIZE;
304 
305  NT_ASSERT( 1 != Vcb->NumberOfWindows);
306 
307  for (i = 0; i < Vcb->NumberOfWindows; i++) {
308 
309  if (Vcb->Windows[i].ClustersFree == ClustersPerWindow) {
310 
311  if (-1 == FirstEmpty) {
312 
313  //
314  // Keep note of the first empty window on the disc
315  //
316 
317  FirstEmpty = i;
318  }
319  }
320  else if (Vcb->Windows[i].ClustersFree > MaxFree) {
321 
322  //
323  // This window has the most free clusters, so far
324  //
325 
326  MaxFree = Vcb->Windows[i].ClustersFree;
327  Fave = i;
328 
329  //
330  // If this window has >50% free clusters, then we will take it,
331  // so don't bother considering more windows.
332  //
333 
334  if (MaxFree >= (ClustersPerWindow >> 1)) {
335 
336  break;
337  }
338  }
339  }
340 
341  //
342  // If there were no windows with 50% or more freespace, then select the
343  // first empty window on the disc, if any - otherwise we'll just go with
344  // the one with the most free clusters.
345  //
346 
347  if ((MaxFree < (ClustersPerWindow >> 1)) && (-1 != FirstEmpty)) {
348 
349  Fave = FirstEmpty;
350  }
351 
352  return Fave;
353 }
354 
355 
356 VOID
358  IN PIRP_CONTEXT IrpContext,
359  IN PVCB Vcb
360  )
361 
362 /*++
363 
364 Routine Description:
365 
366  This routine fills in the Allocation Support structure in the Vcb.
367  Most entries are computed using fat.h macros supplied with data from
368  the Bios Parameter Block. The free cluster count, however, requires
369  going to the Fat and actually counting free sectors. At the same time
370  the free cluster bit map is initalized.
371 
372 Arguments:
373 
374  Vcb - Supplies the Vcb to fill in.
375 
376 --*/
377 
378 {
379  ULONG BitIndex;
380  ULONG ClustersDescribableByFat;
381 
382  PAGED_CODE();
383 
384  DebugTrace(+1, Dbg, "FatSetupAllocationSupport\n", 0);
385  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
386 
387  //
388  // Compute a number of fields for Vcb.AllocationSupport
389  //
390 
391  Vcb->AllocationSupport.RootDirectoryLbo = FatRootDirectoryLbo( &Vcb->Bpb );
392  Vcb->AllocationSupport.RootDirectorySize = FatRootDirectorySize( &Vcb->Bpb );
393 
394  Vcb->AllocationSupport.FileAreaLbo = FatFileAreaLbo( &Vcb->Bpb );
395 
396  Vcb->AllocationSupport.NumberOfClusters = FatNumberOfClusters( &Vcb->Bpb );
397 
398  Vcb->AllocationSupport.FatIndexBitSize = FatIndexBitSize( &Vcb->Bpb );
399 
400  Vcb->AllocationSupport.LogOfBytesPerSector = FatLogOf(Vcb->Bpb.BytesPerSector);
401  Vcb->AllocationSupport.LogOfBytesPerCluster = FatLogOf(FatBytesPerCluster( &Vcb->Bpb ));
402  Vcb->AllocationSupport.NumberOfFreeClusters = 0;
403 
404 
405  //
406  // Deal with a bug in DOS 5 format, if the Fat is not big enough to
407  // describe all the clusters on the disk, reduce this number. We expect
408  // that fat32 volumes will not have this problem.
409  //
410  // Turns out this was not a good assumption. We have to do this always now.
411  //
412 
413  ClustersDescribableByFat = ( ((FatIsFat32(Vcb)? Vcb->Bpb.LargeSectorsPerFat :
414  Vcb->Bpb.SectorsPerFat) *
415  Vcb->Bpb.BytesPerSector * 8)
416  / FatIndexBitSize(&Vcb->Bpb) ) - 2;
417 
418  if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByFat) {
419 
420  Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByFat;
421  }
422 
423  //
424  // Extend the virtual volume file to include the Fat
425  //
426 
427  {
429 
432  FatBytesPerFat( &Vcb->Bpb ));
434 
435  if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) {
436 
437  FatInitializeCacheMap( Vcb->VirtualVolumeFile,
438  &FileSizes,
439  TRUE,
441  Vcb );
442 
443  } else {
444 
445  CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes );
446  }
447  }
448 
449  _SEH2_TRY {
450 
451  if (FatIsFat32(Vcb) &&
452  Vcb->AllocationSupport.NumberOfClusters > MAX_CLUSTER_BITMAP_SIZE) {
453 
454  Vcb->NumberOfWindows = (Vcb->AllocationSupport.NumberOfClusters +
457 
458  } else {
459 
460  Vcb->NumberOfWindows = 1;
461  }
462 
464  Vcb->NumberOfWindows * sizeof(FAT_WINDOW),
465  TAG_FAT_WINDOW );
466 
467  RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
468  NULL,
469  0 );
470 
471  //
472  // Chose a FAT window to begin operation in.
473  //
474 
475  if (Vcb->NumberOfWindows > 1) {
476 
477  //
478  // Read the fat and count up free clusters. We bias by the two reserved
479  // entries in the FAT.
480  //
481 
482  FatExamineFatEntries( IrpContext, Vcb,
483  2,
484  Vcb->AllocationSupport.NumberOfClusters + 2 - 1,
485  TRUE,
486  NULL,
487  NULL);
488 
489 
490  //
491  // Pick a window to begin allocating from
492  //
493 
494  Vcb->CurrentWindow = &Vcb->Windows[ FatSelectBestWindow( Vcb)];
495 
496  } else {
497 
498  Vcb->CurrentWindow = &Vcb->Windows[0];
499 
500  //
501  // Carefully bias ourselves by the two reserved entries in the FAT.
502  //
503 
504  Vcb->CurrentWindow->FirstCluster = 2;
505  Vcb->CurrentWindow->LastCluster = Vcb->AllocationSupport.NumberOfClusters + 2 - 1;
506  }
507 
508  //
509  // Now transition to the FAT window we have chosen.
510  //
511 
512  FatExamineFatEntries( IrpContext, Vcb,
513  0,
514  0,
515  FALSE,
516  Vcb->CurrentWindow,
517  NULL);
518 
519  //
520  // Now set the ClusterHint to the first free bit in our favorite
521  // window (except the ClusterHint is off by two).
522  //
523 
524  Vcb->ClusterHint =
525  (BitIndex = RtlFindClearBits( &Vcb->FreeClusterBitMap, 1, 0 )) != -1 ?
526  BitIndex + 2 : 2;
527 
528  } _SEH2_FINALLY {
529 
531 
532  //
533  // If we hit an exception, back out.
534  //
535 
537 
538  FatTearDownAllocationSupport( IrpContext, Vcb );
539  }
540  } _SEH2_END;
541 
542  return;
543 }
544 
545 
546 VOID
548  IN PIRP_CONTEXT IrpContext,
549  IN PVCB Vcb
550  )
551 
552 /*++
553 
554 Routine Description:
555 
556  This routine prepares the volume for closing. Specifically, we must
557  release the free fat bit map buffer, and uninitialize the dirty fat
558  Mcb.
559 
560 Arguments:
561 
562  Vcb - Supplies the Vcb to fill in.
563 
564 Return Value:
565 
566  VOID
567 
568 --*/
569 
570 {
571  DebugTrace(+1, Dbg, "FatTearDownAllocationSupport\n", 0);
572  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
573 
574  PAGED_CODE();
575 
576  //
577  // If there are FAT buckets, free them.
578  //
579 
580  if ( Vcb->Windows != NULL ) {
581 
582  ExFreePool( Vcb->Windows );
583  Vcb->Windows = NULL;
584  }
585 
586  //
587  // Free the memory associated with the free cluster bitmap.
588  //
589 
590  if ( Vcb->FreeClusterBitMap.Buffer != NULL ) {
591 
592  ExFreePool( Vcb->FreeClusterBitMap.Buffer );
593 
594  //
595  // NULL this field as an flag.
596  //
597 
598  Vcb->FreeClusterBitMap.Buffer = NULL;
599  }
600 
601  //
602  // And remove all the runs in the dirty fat Mcb
603  //
604 
605  FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 0, 0xFFFFFFFF );
606 
607  DebugTrace(-1, Dbg, "FatTearDownAllocationSupport -> (VOID)\n", 0);
608 
609  UNREFERENCED_PARAMETER( IrpContext );
610 
611  return;
612 }
613 
614 
615 _Requires_lock_held_(_Global_critical_region_)
616 VOID
617 FatLookupFileAllocation (
618  IN PIRP_CONTEXT IrpContext,
619  IN PFCB FcbOrDcb,
620  IN VBO Vbo,
621  OUT PLBO Lbo,
626  )
627 
628 /*++
629 
630 Routine Description:
631 
632  This routine looks up the existing mapping of VBO to LBO for a
633  file/directory. The information it queries is either stored in the
634  mcb field of the fcb/dcb or it is stored on in the fat table and
635  needs to be retrieved and decoded, and updated in the mcb.
636 
637 Arguments:
638 
639  FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being queried
640 
641  Vbo - Supplies the VBO whose LBO we want returned
642 
643  Lbo - Receives the LBO corresponding to the input Vbo if one exists
644 
645  ByteCount - Receives the number of bytes within the run the run
646  that correpond between the input vbo and output lbo.
647 
648  Allocated - Receives TRUE if the Vbo does have a corresponding Lbo
649  and FALSE otherwise.
650 
651  EndOnMax - Receives TRUE if the run ends in the maximal FAT cluster,
652  which results in a fractional bytecount.
653 
654  Index - Receives the Index of the run
655 
656 --*/
657 
658 {
659  VBO CurrentVbo;
660  LBO CurrentLbo;
661  LBO PriorLbo;
662 
663  VBO FirstVboOfCurrentRun = 0;
664  LBO FirstLboOfCurrentRun;
665 
666  BOOLEAN LastCluster;
667  ULONG Runs;
668 
669  PVCB Vcb;
671  ULONG BytesPerCluster;
672  ULARGE_INTEGER BytesOnVolume;
673 
675 
676  PAGED_CODE();
677 
678  Vcb = FcbOrDcb->Vcb;
679 
680 
681  DebugTrace(+1, Dbg, "FatLookupFileAllocation\n", 0);
682  DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
683  DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo);
684  DebugTrace( 0, Dbg, " pLbo = %8lx\n", Lbo);
685  DebugTrace( 0, Dbg, " pByteCount = %8lx\n", ByteCount);
686  DebugTrace( 0, Dbg, " pAllocated = %8lx\n", Allocated);
687 
688  Context.Bcb = NULL;
689 
690  *EndOnMax = FALSE;
691 
692  //
693  // Check the trivial case that the mapping is already in our
694  // Mcb.
695  //
696 
698 
699  *Allocated = TRUE;
700 
701  NT_ASSERT( *ByteCount != 0 );
702 
703  //
704  // Detect the overflow case, trim and claim the condition.
705  //
706 
707  if (Vbo + *ByteCount == 0) {
708 
709  *EndOnMax = TRUE;
710  }
711 
712  DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0);
713  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
714  return;
715  }
716 
717  //
718  // Initialize the Vcb, the cluster size, LastCluster, and
719  // FirstLboOfCurrentRun (to be used as an indication of the first
720  // iteration through the following while loop).
721  //
722 
723  BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
724 
725  BytesOnVolume.QuadPart = UInt32x32To64( Vcb->AllocationSupport.NumberOfClusters, BytesPerCluster );
726 
727  LastCluster = FALSE;
728  FirstLboOfCurrentRun = 0;
729 
730  //
731  // Discard the case that the request extends beyond the end of
732  // allocation. Note that if the allocation size if not known
733  // AllocationSize is set to 0xffffffff.
734  //
735 
736  if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) {
737 
738  *Allocated = FALSE;
739 
740  DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0);
741  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
742  return;
743  }
744 
745  //
746  // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
747  // and FatEntry to describe the beginning of the last entry in the Mcb.
748  // This is used as initialization for the following loop.
749  //
750  // If the Mcb was empty, we start at the beginning of the file with
751  // CurrentVbo set to 0 to indicate a new run.
752  //
753 
754  if (FatLookupLastMcbEntry( Vcb, &FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo, &Runs )) {
755 
756  DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1);
757 
758  CurrentVbo -= (BytesPerCluster - 1);
759  CurrentLbo -= (BytesPerCluster - 1);
760 
761  //
762  // Convert an index to a count.
763  //
764 
765  Runs += 1;
766 
767  } else {
768 
769  DebugTrace( 0, Dbg, "Mcb empty.\n", 0);
770 
771  //
772  // Check for an FcbOrDcb that has no allocation
773  //
774 
775  if (FcbOrDcb->FirstClusterOfFile == 0) {
776 
777  *Allocated = FALSE;
778 
779  DebugTrace( 0, Dbg, "File has no allocation.\n", 0);
780  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
781  return;
782 
783  } else {
784 
785  CurrentVbo = 0;
787  FirstVboOfCurrentRun = CurrentVbo;
788  FirstLboOfCurrentRun = CurrentLbo;
789 
790  Runs = 0;
791 
792  DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo);
793  }
794  }
795 
796  //
797  // Now we know that we are looking up a valid Vbo, but it is
798  // not in the Mcb, which is a monotonically increasing list of
799  // Vbo's. Thus we have to go to the Fat, and update
800  // the Mcb as we go. We use a try-finally to unpin the page
801  // of fat hanging around. Also we mark *Allocated = FALSE, so that
802  // the caller wont try to use the data if we hit an exception.
803  //
804 
805  *Allocated = FALSE;
806 
807  _SEH2_TRY {
808 
809  FatEntry = (FAT_ENTRY)FatGetIndexFromLbo( Vcb, CurrentLbo );
810 
811  //
812  // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
813  // The assumption here, is that only whole clusters of Vbos and Lbos
814  // are mapped in the Mcb.
815  //
816 
817  NT_ASSERT( ((CurrentLbo - Vcb->AllocationSupport.FileAreaLbo)
818  % BytesPerCluster == 0) &&
819  (CurrentVbo % BytesPerCluster == 0) );
820 
821  //
822  // Starting from the first Vbo after the last Mcb entry, scan through
823  // the Fat looking for our Vbo. We continue through the Fat until we
824  // hit a noncontiguity beyond the desired Vbo, or the last cluster.
825  //
826 
827  while ( !LastCluster ) {
828 
829  //
830  // Get the next fat entry, and update our Current variables.
831  //
832 
833  FatLookupFatEntry( IrpContext, Vcb, FatEntry, (PULONG)&FatEntry, &Context );
834 
835  PriorLbo = CurrentLbo;
836  CurrentLbo = FatGetLboFromIndex( Vcb, FatEntry );
837  CurrentVbo += BytesPerCluster;
838 
839  switch ( FatInterpretClusterType( Vcb, FatEntry )) {
840 
841  //
842  // Check for a break in the Fat allocation chain.
843  //
844 
845  case FatClusterAvailable:
846  case FatClusterReserved:
847  case FatClusterBad:
848 
849  DebugTrace( 0, Dbg, "Break in allocation chain, entry = %d\n", FatEntry);
850  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0);
851 
852  FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
854  break;
855 
856  //
857  // If this is the last cluster, we must update the Mcb and
858  // exit the loop.
859  //
860 
861  case FatClusterLast:
862 
863  //
864  // Assert we know where the current run started. If the
865  // Mcb was empty when we were called, thenFirstLboOfCurrentRun
866  // was set to the start of the file. If the Mcb contained an
867  // entry, then FirstLboOfCurrentRun was set on the first
868  // iteration through the loop. Thus if FirstLboOfCurrentRun
869  // is 0, then there was an Mcb entry and we are on our first
870  // iteration, meaing that the last cluster in the Mcb was
871  // really the last allocated cluster, but we checked Vbo
872  // against AllocationSize, and found it OK, thus AllocationSize
873  // must be too large.
874  //
875  // Note that, when we finally arrive here, CurrentVbo is actually
876  // the first Vbo beyond the file allocation and CurrentLbo is
877  // meaningless.
878  //
879 
880  DebugTrace( 0, Dbg, "Read last cluster of file.\n", 0);
881 
882  //
883  // Detect the case of the maximal file. Note that this really isn't
884  // a proper Vbo - those are zero-based, and this is a one-based number.
885  // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of
886  // 2^32 - 2.
887  //
888  // Just so we don't get confused here.
889  //
890 
891  if (CurrentVbo == 0) {
892 
893  *EndOnMax = TRUE;
894  CurrentVbo -= 1;
895  }
896 
897  LastCluster = TRUE;
898 
899  if (FirstLboOfCurrentRun != 0 ) {
900 
901  DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
902  DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
903  DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
904  DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
905 
907  &FcbOrDcb->Mcb,
908  FirstVboOfCurrentRun,
909  FirstLboOfCurrentRun,
910  CurrentVbo - FirstVboOfCurrentRun );
911 
912  Runs += 1;
913  }
914 
915  //
916  // Being at the end of allocation, make sure we have found
917  // the Vbo. If we haven't, seeing as we checked VBO
918  // against AllocationSize, the real disk allocation is less
919  // than that of AllocationSize. This comes about when the
920  // real allocation is not yet known, and AllocaitonSize
921  // contains MAXULONG.
922  //
923  // KLUDGE! - If we were called by FatLookupFileAllocationSize
924  // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup
925  // hint. Thus we merrily go along looking for a match that isn't
926  // there, but in the meantime building an Mcb. If this is
927  // the case, fill in AllocationSize and return.
928  //
929 
930  if ( Vbo == MAXULONG - 1 ) {
931 
932  *Allocated = FALSE;
933 
934  FcbOrDcb->Header.AllocationSize.QuadPart = CurrentVbo;
935 
936  DebugTrace( 0, Dbg, "New file allocation size = %08lx.\n", CurrentVbo);
937  try_return ( NOTHING );
938  }
939 
940  //
941  // We will lie ever so slightly if we really terminated on the
942  // maximal byte of a file. It is really allocated.
943  //
944 
945  if (Vbo >= CurrentVbo && !*EndOnMax) {
946 
947  *Allocated = FALSE;
948  try_return ( NOTHING );
949  }
950 
951  break;
952 
953  //
954  // This is a continuation in the chain. If the run has a
955  // discontiguity at this point, update the Mcb, and if we are beyond
956  // the desired Vbo, this is the end of the run, so set LastCluster
957  // and exit the loop.
958  //
959 
960  case FatClusterNext:
961 
962  //
963  // This is the loop check. The Vbo must not be bigger than the size of
964  // the volume, and the Vbo must not have a) wrapped and b) not been at the
965  // very last cluster in the chain, for the case of the maximal file.
966  //
967 
968  if ( CurrentVbo == 0 ||
969  (BytesOnVolume.HighPart == 0 && CurrentVbo > BytesOnVolume.LowPart)) {
970 
971  FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
973  }
974 
975  if ( PriorLbo + BytesPerCluster != CurrentLbo ) {
976 
977  //
978  // Note that on the first time through the loop
979  // (FirstLboOfCurrentRun == 0), we don't add the
980  // run to the Mcb since it curresponds to the last
981  // run already stored in the Mcb.
982  //
983 
984  if ( FirstLboOfCurrentRun != 0 ) {
985 
986  DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
987  DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
988  DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
989  DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
990 
992  &FcbOrDcb->Mcb,
993  FirstVboOfCurrentRun,
994  FirstLboOfCurrentRun,
995  CurrentVbo - FirstVboOfCurrentRun );
996 
997  Runs += 1;
998  }
999 
1000  //
1001  // Since we are at a run boundry, with CurrentLbo and
1002  // CurrentVbo being the first cluster of the next run,
1003  // we see if the run we just added encompases the desired
1004  // Vbo, and if so exit. Otherwise we set up two new
1005  // First*boOfCurrentRun, and continue.
1006  //
1007 
1008  if (CurrentVbo > Vbo) {
1009 
1010  LastCluster = TRUE;
1011 
1012  } else {
1013 
1014  FirstVboOfCurrentRun = CurrentVbo;
1015  FirstLboOfCurrentRun = CurrentLbo;
1016  }
1017  }
1018  break;
1019 
1020  default:
1021 
1022  DebugTrace(0, Dbg, "Illegal Cluster Type.\n", FatEntry);
1023 
1024 #ifdef _MSC_VER
1025 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
1026 #endif
1027  FatBugCheck( 0, 0, 0 );
1028 
1029  break;
1030 
1031  } // switch()
1032  } // while()
1033 
1034  //
1035  // Load up the return parameters.
1036  //
1037  // On exit from the loop, Vbo still contains the desired Vbo, and
1038  // CurrentVbo is the first byte after the run that contained the
1039  // desired Vbo.
1040  //
1041 
1042  *Allocated = TRUE;
1043 
1044  *Lbo = FirstLboOfCurrentRun + (Vbo - FirstVboOfCurrentRun);
1045 
1046  *ByteCount = CurrentVbo - Vbo;
1047 
1048  if (ARGUMENT_PRESENT(Index)) {
1049 
1050  //
1051  // Note that Runs only needs to be accurate with respect to where we
1052  // ended. Since partial-lookup cases will occur without exclusive
1053  // synchronization, the Mcb itself may be much bigger by now.
1054  //
1055 
1056  *Index = Runs - 1;
1057  }
1058 
1059  try_exit: NOTHING;
1060 
1061  } _SEH2_FINALLY {
1062 
1063  DebugUnwind( FatLookupFileAllocation );
1064 
1065  //
1066  // We are done reading the Fat, so unpin the last page of fat
1067  // that is hanging around
1068  //
1069 
1070  FatUnpinBcb( IrpContext, Context.Bcb );
1071 
1072  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
1073  } _SEH2_END;
1074 
1075  return;
1076 }
1077 
1078 
1079 _Requires_lock_held_(_Global_critical_region_)
1080 VOID
1081 FatAddFileAllocation (
1082  IN PIRP_CONTEXT IrpContext,
1083  IN PFCB FcbOrDcb,
1085  IN ULONG DesiredAllocationSize
1086  )
1087 
1088 /*++
1089 
1090 Routine Description:
1091 
1092  This routine adds additional allocation to the specified file/directory.
1093  Additional allocation is added by appending clusters to the file/directory.
1094 
1095  If the file already has a sufficient allocation then this procedure
1096  is effectively a noop.
1097 
1098 Arguments:
1099 
1100  FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified.
1101  This parameter must not specify the root dcb.
1102 
1103  FileObject - If supplied inform the cache manager of the change.
1104 
1105  DesiredAllocationSize - Supplies the minimum size, in bytes, that we want
1106  allocated to the file/directory.
1107 
1108 --*/
1109 
1110 {
1111  PVCB Vcb;
1112  LARGE_MCB NewMcb = {0};
1113  PLARGE_MCB McbToCleanup = NULL;
1114  PDIRENT Dirent = NULL;
1115  ULONG NewAllocation = 0;
1116  PBCB Bcb = NULL;
1117  BOOLEAN UnwindWeAllocatedDiskSpace = FALSE;
1118  BOOLEAN UnwindAllocationSizeSet = FALSE;
1119  BOOLEAN UnwindCacheManagerInformed = FALSE;
1120  BOOLEAN UnwindWeInitializedMcb = FALSE;
1121 
1122  PAGED_CODE();
1123 
1124  DebugTrace(+1, Dbg, "FatAddFileAllocation\n", 0);
1125  DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
1126  DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
1127 
1128  Vcb = FcbOrDcb->Vcb;
1129 
1130  //
1131  // If we haven't yet set the correct AllocationSize, do so.
1132  //
1133 
1134  if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
1135 
1136  FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
1137  }
1138 
1139  //
1140  // Check for the benign case that the desired allocation is already
1141  // within the allocation size.
1142  //
1143 
1144  if (DesiredAllocationSize <= FcbOrDcb->Header.AllocationSize.LowPart) {
1145 
1146  DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
1147 
1148  DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0);
1149  return;
1150  }
1151 
1152  DebugTrace( 0, Dbg, "InitialAllocation = %08lx.\n", FcbOrDcb->Header.AllocationSize.LowPart);
1153 
1154  //
1155  // Get a chunk of disk space that will fullfill our needs. If there
1156  // was no initial allocation, start from the hint in the Vcb, otherwise
1157  // try to allocate from the cluster after the initial allocation.
1158  //
1159  // If there was no initial allocation to the file, we can just use the
1160  // Mcb in the FcbOrDcb, otherwise we have to use a new one, and merge
1161  // it to the one in the FcbOrDcb.
1162  //
1163 
1164  _SEH2_TRY {
1165 
1166  if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
1167 
1168  LBO FirstLboOfFile;
1169 
1171 
1172  FatGetDirentFromFcbOrDcb( IrpContext,
1173  FcbOrDcb,
1174  FALSE,
1175  &Dirent,
1176  &Bcb );
1177  //
1178  // Set this dirty right now since this call can fail.
1179  //
1180 
1181  FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE );
1182 
1183  FatAllocateDiskSpace( IrpContext,
1184  Vcb,
1185  0,
1186  &DesiredAllocationSize,
1187  FALSE,
1188  &FcbOrDcb->Mcb );
1189 
1190  UnwindWeAllocatedDiskSpace = TRUE;
1191  McbToCleanup = &FcbOrDcb->Mcb;
1192 
1193  //
1194  // We have to update the dirent and FcbOrDcb copies of
1195  // FirstClusterOfFile since before it was 0
1196  //
1197 
1199  &FcbOrDcb->Mcb,
1200  0,
1201  &FirstLboOfFile,
1202  (PULONG)NULL,
1203  NULL );
1204 
1205  DebugTrace( 0, Dbg, "First Lbo of file will be %08lx.\n", FirstLboOfFile );
1206 
1207  FcbOrDcb->FirstClusterOfFile = FatGetIndexFromLbo( Vcb, FirstLboOfFile );
1208 
1210 
1211  if ( FatIsFat32(Vcb) ) {
1212 
1213  Dirent->FirstClusterOfFileHi = (USHORT)(FcbOrDcb->FirstClusterOfFile >> 16);
1214  }
1215 
1216  //
1217  // Note the size of the allocation we need to tell the cache manager about.
1218  //
1219 
1220  NewAllocation = DesiredAllocationSize;
1221 
1222  } else {
1223 
1224  LBO LastAllocatedLbo;
1225  VBO DontCare;
1226 
1227  //
1228  // Get the first cluster following the current allocation. It is possible
1229  // the Mcb is empty (or short, etc.) so we need to be slightly careful
1230  // about making sure we don't lie with the hint.
1231  //
1232 
1233  (void)FatLookupLastMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, &DontCare, &LastAllocatedLbo, NULL );
1234 
1235  //
1236  // Try to get some disk space starting from there.
1237  //
1238 
1239  NewAllocation = DesiredAllocationSize - FcbOrDcb->Header.AllocationSize.LowPart;
1240 
1241  FsRtlInitializeLargeMcb( &NewMcb, PagedPool );
1242  UnwindWeInitializedMcb = TRUE;
1243  McbToCleanup = &NewMcb;
1244 
1245  FatAllocateDiskSpace( IrpContext,
1246  Vcb,
1247  (LastAllocatedLbo != ~0 ?
1248  FatGetIndexFromLbo(Vcb,LastAllocatedLbo + 1) :
1249  0),
1250  &NewAllocation,
1251  FALSE,
1252  &NewMcb );
1253 
1254  UnwindWeAllocatedDiskSpace = TRUE;
1255  }
1256 
1257  //
1258  // Now that we increased the allocation of the file, mark it in the
1259  // FcbOrDcb. Carefully prepare to handle an inability to grow the cache
1260  // structures.
1261  //
1262 
1263  FcbOrDcb->Header.AllocationSize.LowPart += NewAllocation;
1264 
1265  //
1266  // Handle the maximal file case, where we may have just wrapped. Note
1267  // that this must be the precise boundary case wrap, i.e. by one byte,
1268  // so that the new allocation is actually one byte "less" as far as we're
1269  // concerned. This is important for the extension case.
1270  //
1271 
1272  if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
1273 
1274  NewAllocation -= 1;
1275  FcbOrDcb->Header.AllocationSize.LowPart = 0xffffffff;
1276  }
1277 
1278  UnwindAllocationSizeSet = TRUE;
1279 
1280  //
1281  // Inform the cache manager to increase the section size
1282  //
1283 
1285 
1287  (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
1288  UnwindCacheManagerInformed = TRUE;
1289  }
1290 
1291  //
1292  // In the extension case, we have held off actually gluing the new
1293  // allocation onto the file. This simplifies exception cleanup since
1294  // if it was already added and the section grow failed, we'd have to
1295  // do extra work to unglue it. This way, we can assume that if we
1296  // raise the only thing we need to do is deallocate the disk space.
1297  //
1298  // Merge the allocation now.
1299  //
1300 
1301  if (FcbOrDcb->Header.AllocationSize.LowPart != NewAllocation) {
1302 
1303  //
1304  // Tack the new Mcb onto the end of the FcbOrDcb one.
1305  //
1306 
1307  FatMergeAllocation( IrpContext,
1308  Vcb,
1309  &FcbOrDcb->Mcb,
1310  &NewMcb );
1311  }
1312 
1313  } _SEH2_FINALLY {
1314 
1315  DebugUnwind( FatAddFileAllocation );
1316 
1317  //
1318  // Give FlushFileBuffer/Cleanup a clue here, regardless of success/fail..
1319  //
1320 
1322 
1323  //
1324  // If we were dogged trying to complete this operation, we need to go
1325  // back various things out.
1326  //
1327 
1328  if (_SEH2_AbnormalTermination()) {
1329 
1330  //
1331  // Pull off the allocation size we tried to add to this object if
1332  // we failed to grow cache structures or Mcb structures.
1333  //
1334 
1335  if (UnwindAllocationSizeSet) {
1336 
1337  FcbOrDcb->Header.AllocationSize.LowPart -= NewAllocation;
1338  }
1339 
1340  if (UnwindCacheManagerInformed) {
1341 
1343  (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
1344  }
1345 
1346  //
1347  // In the case of initial allocation, we used the Fcb's Mcb and have
1348  // to clean that up as well as the FAT chain references.
1349  //
1350 
1351  if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
1352 
1353  if (Dirent != NULL) {
1354 
1357 
1358  if ( FatIsFat32(Vcb) ) {
1359 
1360  Dirent->FirstClusterOfFileHi = 0;
1361  }
1362  }
1363  }
1364 
1365  //
1366  // ... and drop the dirent Bcb if we got it. Do it now
1367  // so we can afford to take the exception if we have to.
1368  //
1369 
1370  FatUnpinBcb( IrpContext, Bcb );
1371 
1372  _SEH2_TRY {
1373 
1374  //
1375  // Note this can re-raise.
1376  //
1377 
1378  if ( UnwindWeAllocatedDiskSpace ) {
1379 
1380  FatDeallocateDiskSpace( IrpContext, Vcb, McbToCleanup, FALSE );
1381  }
1382 
1383  } _SEH2_FINALLY {
1384 
1385  //
1386  // We always want to clean up the non-initial allocation temporary Mcb,
1387  // otherwise we have the Fcb's Mcb and we just truncate it away.
1388  //
1389 
1390  if (UnwindWeInitializedMcb == TRUE) {
1391 
1392  //
1393  // Note that we already know a raise is in progress. No danger
1394  // of encountering the normal case code below and doing this again.
1395  //
1396 
1397  FsRtlUninitializeLargeMcb( McbToCleanup );
1398 
1399  } else {
1400 
1401  if (McbToCleanup) {
1402 
1403  FsRtlTruncateLargeMcb( McbToCleanup, 0 );
1404  }
1405  }
1406  } _SEH2_END;
1407  }
1408 
1409  DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0);
1410  } _SEH2_END;
1411 
1412  //
1413  // Non-exceptional cleanup we always want to do. In handling the re-raise possibilities
1414  // during exceptions we had to make sure these two steps always happened there beforehand.
1415  // So now we handle the usual case.
1416  //
1417 
1418  FatUnpinBcb( IrpContext, Bcb );
1419 
1420  if (UnwindWeInitializedMcb == TRUE) {
1421 
1422  FsRtlUninitializeLargeMcb( &NewMcb );
1423  }
1424 }
1425 
1426 _Requires_lock_held_(_Global_critical_region_)
1427 VOID
1428 FatTruncateFileAllocation (
1429  IN PIRP_CONTEXT IrpContext,
1430  IN PFCB FcbOrDcb,
1431  IN ULONG DesiredAllocationSize
1432  )
1433 
1434 /*++
1435 
1436 Routine Description:
1437 
1438  This routine truncates the allocation to the specified file/directory.
1439 
1440  If the file is already smaller than the indicated size then this procedure
1441  is effectively a noop.
1442 
1443 
1444 Arguments:
1445 
1446  FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1447  This parameter must not specify the root dcb.
1448 
1449  DesiredAllocationSize - Supplies the maximum size, in bytes, that we want
1450  allocated to the file/directory. It is rounded
1451  up to the nearest cluster.
1452 
1453 Return Value:
1454 
1455  VOID - TRUE if the operation completed and FALSE if it had to
1456  block but could not.
1457 
1458 --*/
1459 
1460 {
1461  PVCB Vcb;
1462  PBCB Bcb = NULL;
1463  LARGE_MCB RemainingMcb = {0};
1464  ULONG BytesPerCluster;
1465  PDIRENT Dirent = NULL;
1466  BOOLEAN UpdatedDirent = FALSE;
1467 
1468  ULONG UnwindInitialAllocationSize;
1469  ULONG UnwindInitialFirstClusterOfFile;
1470  BOOLEAN UnwindWeAllocatedMcb = FALSE;
1471 
1472  PAGED_CODE();
1473 
1474  Vcb = FcbOrDcb->Vcb;
1475 
1476  DebugTrace(+1, Dbg, "FatTruncateFileAllocation\n", 0);
1477  DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
1478  DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
1479 
1480  //
1481  // If the Fcb isn't in good condition, we have no business whacking around on
1482  // the disk after "its" clusters.
1483  //
1484  // Inspired by a Prefix complaint.
1485  //
1486 
1488 
1489  //
1490  // If we haven't yet set the correct AllocationSize, do so.
1491  //
1492 
1493  if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
1494 
1495  FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
1496  }
1497 
1498  //
1499  // Round up the Desired Allocation Size to the next cluster size
1500  //
1501 
1502  BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
1503 
1504  //
1505  // Note if the desired allocation is zero, to distinguish this from
1506  // the wrap case below.
1507  //
1508 
1509  if (DesiredAllocationSize != 0) {
1510 
1511  DesiredAllocationSize = (DesiredAllocationSize + (BytesPerCluster - 1)) &
1512  ~(BytesPerCluster - 1);
1513  //
1514  // Check for the benign case that the file is already smaller than
1515  // the desired truncation. Note that if it wraps, then a) it was
1516  // specifying an offset in the maximally allocatable cluster and
1517  // b) we're not asking to extend the file, either. So stop.
1518  //
1519 
1520  if (DesiredAllocationSize == 0 ||
1521  DesiredAllocationSize >= FcbOrDcb->Header.AllocationSize.LowPart) {
1522 
1523  DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
1524 
1525  DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
1526  return;
1527  }
1528 
1529  }
1530 
1531  //
1532  // This is a no-op if the allocation size is already what we want.
1533  //
1534 
1535  if (DesiredAllocationSize == FcbOrDcb->Header.AllocationSize.LowPart) {
1536 
1537  DebugTrace(0, Dbg, "Desired size equals current allocation.\n", 0);
1538  DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
1539  return;
1540  }
1541 
1542  UnwindInitialAllocationSize = FcbOrDcb->Header.AllocationSize.LowPart;
1543  UnwindInitialFirstClusterOfFile = FcbOrDcb->FirstClusterOfFile;
1544 
1545  //
1546  // Update the FcbOrDcb allocation size. If it is now zero, we have the
1547  // additional task of modifying the FcbOrDcb and Dirent copies of
1548  // FirstClusterInFile.
1549  //
1550  // Note that we must pin the dirent before actually deallocating the
1551  // disk space since, in unwind, it would not be possible to reallocate
1552  // deallocated disk space as someone else may have reallocated it and
1553  // may cause an exception when you try to get some more disk space.
1554  // Thus FatDeallocateDiskSpace must be the final dangerous operation.
1555  //
1556 
1557  _SEH2_TRY {
1558 
1559  FcbOrDcb->Header.AllocationSize.QuadPart = DesiredAllocationSize;
1560 
1561  //
1562  // Special case 0
1563  //
1564 
1565  if (DesiredAllocationSize == 0) {
1566 
1567  //
1568  // We have to update the dirent and FcbOrDcb copies of
1569  // FirstClusterOfFile since before it was 0
1570  //
1571 
1573 
1574  FatGetDirentFromFcbOrDcb( IrpContext, FcbOrDcb, FALSE, &Dirent, &Bcb );
1575 
1577 
1578  if (FatIsFat32(Vcb)) {
1579 
1580  Dirent->FirstClusterOfFileHi = 0;
1581  }
1582 
1584 
1585  FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE );
1586  UpdatedDirent = TRUE;
1587 
1588  FatDeallocateDiskSpace( IrpContext, Vcb, &FcbOrDcb->Mcb, ((FcbOrDcb->FcbState & FCB_STATE_ZERO_ON_DEALLOCATION) != 0));
1589 
1590  FatRemoveMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
1591 
1592  } else {
1593 
1594  //
1595  // Split the existing allocation into two parts, one we will keep, and
1596  // one we will deallocate.
1597  //
1598 
1600  UnwindWeAllocatedMcb = TRUE;
1601 
1602  FatSplitAllocation( IrpContext,
1603  Vcb,
1604  &FcbOrDcb->Mcb,
1605  DesiredAllocationSize,
1606  &RemainingMcb );
1607 
1608  FatDeallocateDiskSpace( IrpContext, Vcb, &RemainingMcb, ((FcbOrDcb->FcbState & FCB_STATE_ZERO_ON_DEALLOCATION) != 0) );
1609 
1611  }
1612 
1613  } _SEH2_FINALLY {
1614 
1615  DebugUnwind( FatTruncateFileAllocation );
1616 
1617  //
1618  // Is this really the right backout strategy? It would be nice if we could
1619  // pretend the truncate worked if we knew that the file had gotten into
1620  // a consistent state. Leaving dangled clusters is probably quite preferable.
1621  //
1622 
1623  if ( _SEH2_AbnormalTermination() ) {
1624 
1625  FcbOrDcb->Header.AllocationSize.LowPart = UnwindInitialAllocationSize;
1626 
1627  if ( (DesiredAllocationSize == 0) && (Dirent != NULL)) {
1628 
1629  if (UpdatedDirent) {
1630 
1631  //
1632  // If the dirent has been updated ok and marked dirty, then we
1633  // failed in deallocatediscspace, and don't know what state
1634  // the on disc fat chain is in. So we throw away the mcb,
1635  // and potentially loose a few clusters until the next
1636  // chkdsk. The operation has succeeded, but the exception
1637  // will still propogate. 5.1
1638  //
1639 
1640  FatRemoveMcbEntry( Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
1641  FcbOrDcb->Header.AllocationSize.QuadPart = 0;
1642  }
1643  else if (FcbOrDcb->FirstClusterOfFile == 0) {
1644 
1645  Dirent->FirstClusterOfFile = (USHORT)UnwindInitialFirstClusterOfFile;
1646 
1647  if ( FatIsFat32(Vcb) ) {
1648 
1649  Dirent->FirstClusterOfFileHi =
1650  (USHORT)(UnwindInitialFirstClusterOfFile >> 16);
1651  }
1652 
1653  FcbOrDcb->FirstClusterOfFile = UnwindInitialFirstClusterOfFile;
1654  }
1655  }
1656 
1657  if ( UnwindWeAllocatedMcb ) {
1658 
1660  }
1661 
1662  //
1663  // Note that in the non zero truncation case, we will also
1664  // leak clusters. However, apart from this, the in memory and on disc
1665  // structures will agree.
1666  }
1667 
1668  FatUnpinBcb( IrpContext, Bcb );
1669 
1670  //
1671  // Give FlushFileBuffer/Cleanup a clue here, regardless of success/fail.
1672  //
1673 
1675 
1676  DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
1677  } _SEH2_END;
1678 }
1679 
1680 
1681 _Requires_lock_held_(_Global_critical_region_)
1682 VOID
1683 FatLookupFileAllocationSize (
1684  IN PIRP_CONTEXT IrpContext,
1685  IN PFCB FcbOrDcb
1686  )
1687 
1688 /*++
1689 
1690 Routine Description:
1691 
1692  This routine retrieves the current file allocatio size for the
1693  specified file/directory.
1694 
1695 Arguments:
1696 
1697  FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1698 
1699 --*/
1700 
1701 {
1702  LBO Lbo;
1703  ULONG ByteCount;
1704  BOOLEAN DontCare;
1705 
1706  PAGED_CODE();
1707 
1708  DebugTrace(+1, Dbg, "FatLookupAllocationSize\n", 0);
1709  DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
1710 
1711  //
1712  // We call FatLookupFileAllocation with Vbo of 0xffffffff - 1.
1713  //
1714 
1715  FatLookupFileAllocation( IrpContext,
1716  FcbOrDcb,
1717  MAXULONG - 1,
1718  &Lbo,
1719  &ByteCount,
1720  &DontCare,
1721  &DontCare,
1722  NULL );
1723 
1724  //
1725  // FileSize was set at Fcb creation time from the contents of the directory entry,
1726  // and we are only now looking up the real length of the allocation chain. If it
1727  // cannot be contained, this is trash. Probably more where that came from.
1728  //
1729 
1730  if (FcbOrDcb->Header.FileSize.LowPart > FcbOrDcb->Header.AllocationSize.LowPart) {
1731 
1732  FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1734  }
1735 
1736  DebugTrace(-1, Dbg, "FatLookupFileAllocationSize -> (VOID)\n", 0);
1737  return;
1738 }
1739 
1740 
1741 _Requires_lock_held_(_Global_critical_region_)
1742 VOID
1743 FatAllocateDiskSpace (
1744  IN PIRP_CONTEXT IrpContext,
1745  IN PVCB Vcb,
1750  )
1751 
1752 /*++
1753 
1754 Routine Description:
1755 
1756  This procedure allocates additional disk space and builds an mcb
1757  representing the newly allocated space. If the space cannot be
1758  allocated then this procedure raises an appropriate status.
1759 
1760  Searching starts from the hint index in the Vcb unless an alternative
1761  non-zero hint is given in AlternateClusterHint. If we are using the
1762  hint field in the Vcb, it is set to the cluster following our allocation
1763  when we are done.
1764 
1765  Disk space can only be allocated in cluster units so this procedure
1766  will round up any byte count to the next cluster boundary.
1767 
1768  Pictorially what is done is the following (where ! denotes the end of
1769  the fat chain (i.e., FAT_CLUSTER_LAST)):
1770 
1771 
1772  Mcb (empty)
1773 
1774  becomes
1775 
1776  Mcb |--a--|--b--|--c--!
1777 
1778  ^
1779  ByteCount ----------+
1780 
1781 Arguments:
1782 
1783  Vcb - Supplies the VCB being modified
1784 
1785  AbsoluteClusterHint - Supplies an alternate hint index to start the
1786  search from. If this is zero we use, and update,
1787  the Vcb hint field.
1788 
1789  ByteCount - Supplies the number of bytes that we are requesting, and
1790  receives the number of bytes that we got.
1791 
1792  ExactMatchRequired - Caller should set this to TRUE if only the precise run requested
1793  is acceptable.
1794 
1795  Mcb - Receives the MCB describing the newly allocated disk space. The
1796  caller passes in an initialized Mcb that is filled in by this procedure.
1797 
1798  Return Value:
1799 
1800  TRUE - Allocated ok
1801  FALSE - Failed to allocate exactly as requested (=> ExactMatchRequired was TRUE)
1802 
1803 --*/
1804 
1805 {
1806  UCHAR LogOfBytesPerCluster;
1807  ULONG BytesPerCluster;
1808  ULONG StartingCluster;
1809  ULONG ClusterCount;
1810  ULONG WindowRelativeHint;
1811 #if DBG
1812  ULONG PreviousClear = 0;
1813 #endif
1814 
1816  BOOLEAN Wait = FALSE;
1817  BOOLEAN Result = TRUE;
1818 
1819  PAGED_CODE();
1820 
1821  DebugTrace(+1, Dbg, "FatAllocateDiskSpace\n", 0);
1822  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
1823  DebugTrace( 0, Dbg, " *ByteCount = %8lx\n", *ByteCount);
1824  DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb);
1825  DebugTrace( 0, Dbg, " Hint = %8lx\n", AbsoluteClusterHint);
1826 
1827  NT_ASSERT((AbsoluteClusterHint <= Vcb->AllocationSupport.NumberOfClusters + 2) && (1 != AbsoluteClusterHint));
1828 
1829  //
1830  // Make sure byte count is not zero
1831  //
1832 
1833  if (*ByteCount == 0) {
1834 
1835  DebugTrace(0, Dbg, "Nothing to allocate.\n", 0);
1836 
1837  DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0);
1838  return;
1839  }
1840 
1841  //
1842  // Compute the cluster count based on the byte count, rounding up
1843  // to the next cluster if there is any remainder. Note that the
1844  // pathalogical case BytesCount == 0 has been eliminated above.
1845  //
1846 
1847  LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
1848  BytesPerCluster = 1 << LogOfBytesPerCluster;
1849 
1850  *ByteCount = (*ByteCount + (BytesPerCluster - 1))
1851  & ~(BytesPerCluster - 1);
1852 
1853  //
1854  // If ByteCount is NOW zero, then we were asked for the maximal
1855  // filesize (or at least for bytes in the last allocatable sector).
1856  //
1857 
1858  if (*ByteCount == 0) {
1859 
1860  *ByteCount = 0xffffffff;
1861  ClusterCount = 1 << (32 - LogOfBytesPerCluster);
1862 
1863  } else {
1864 
1865  ClusterCount = (*ByteCount >> LogOfBytesPerCluster);
1866  }
1867 
1868  //
1869  // Analysis tools don't figure out that ClusterCount is not zero because
1870  // of the ByteCount == 0 checks, so give them a hint.
1871  //
1872  _Analysis_assume_(ClusterCount > 0);
1873 
1874  //
1875  // Make sure there are enough free clusters to start with, and
1876  // take them now so that nobody else takes them from us.
1877  //
1878 
1879  ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE);
1881 
1882  if (ClusterCount <= Vcb->AllocationSupport.NumberOfFreeClusters) {
1883 
1884  Vcb->AllocationSupport.NumberOfFreeClusters -= ClusterCount;
1885 
1886  } else {
1887 
1889  ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
1890 
1891  DebugTrace(0, Dbg, "Disk Full. Raise Status.\n", 0);
1892  FatRaiseStatus( IrpContext, STATUS_DISK_FULL );
1893  }
1894 
1895  //
1896  // Did the caller supply a hint?
1897  //
1898 
1899  if ((0 != AbsoluteClusterHint) && (AbsoluteClusterHint < (Vcb->AllocationSupport.NumberOfClusters + 2))) {
1900 
1901  if (Vcb->NumberOfWindows > 1) {
1902 
1903  //
1904  // If we're being called upon to allocate clusters outside the
1905  // current window (which happens only via MoveFile), it's a problem.
1906  // We address this by changing the current window to be the one which
1907  // contains the alternate cluster hint. Note that if the user's
1908  // request would cross a window boundary, he doesn't really get what
1909  // he wanted.
1910  //
1911 
1912  if (AbsoluteClusterHint < Vcb->CurrentWindow->FirstCluster ||
1913  AbsoluteClusterHint > Vcb->CurrentWindow->LastCluster) {
1914 
1916 
1917  NT_ASSERT( BucketNum < Vcb->NumberOfWindows);
1918 
1919  //
1920  // Drop our shared lock on the ChangeBitMapResource, and pick it up again
1921  // exclusive in preparation for making the window swap.
1922  //
1923 
1925  ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
1926  ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE);
1928 
1929  Window = &Vcb->Windows[BucketNum];
1930 
1931  //
1932  // Again, test the current window against the one we want - some other
1933  // thread could have sneaked in behind our backs and kindly set it to the one
1934  // we need, when we dropped and reacquired the ChangeBitMapResource above.
1935  //
1936 
1937  if (Window != Vcb->CurrentWindow) {
1938 
1939  _SEH2_TRY {
1940 
1941  Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
1942  SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
1943 
1944  //
1945  // Change to the new window (update Vcb->CurrentWindow) and scan it
1946  // to build up a freespace bitmap etc.
1947  //
1948 
1949  FatExamineFatEntries( IrpContext, Vcb,
1950  0,
1951  0,
1952  FALSE,
1953  Window,
1954  NULL);
1955 
1956  } _SEH2_FINALLY {
1957 
1958  if (!Wait) {
1959 
1960  ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
1961  }
1962 
1963  if (_SEH2_AbnormalTermination()) {
1964 
1965  //
1966  // We will have raised as a result of failing to pick up the
1967  // chunk of the FAT for this window move. Release our resources
1968  // and return the cluster count to the volume.
1969  //
1970 
1971  Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
1972 
1974  ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
1975  }
1976  } _SEH2_END;
1977  }
1978  }
1979 
1980  //
1981  // Make the hint cluster number relative to the base of the current window...
1982  //
1983  // Currentwindow->Firstcluster is baised by +2 already, so we will lose the
1984  // bias already in AbsoluteClusterHint. Put it back....
1985  //
1986 
1987  WindowRelativeHint = AbsoluteClusterHint - Vcb->CurrentWindow->FirstCluster + 2;
1988  }
1989  else {
1990 
1991  //
1992  // Only one 'window', ie fat16/12. No modification necessary.
1993  //
1994 
1995  WindowRelativeHint = AbsoluteClusterHint;
1996  }
1997  }
1998  else {
1999 
2000  //
2001  // Either no hint supplied, or it was out of range, so grab one from the Vcb
2002  //
2003  // NOTE: Clusterhint in the Vcb is not guaranteed to be set (may be -1)
2004  //
2005 
2006  WindowRelativeHint = Vcb->ClusterHint;
2007  AbsoluteClusterHint = 0;
2008 
2009  //
2010  // Vcb hint may not have been initialized yet. Force to valid cluster.
2011  //
2012 
2013  if (-1 == WindowRelativeHint) {
2014 
2015  WindowRelativeHint = 2;
2016  }
2017  }
2018 
2019  NT_ASSERT((WindowRelativeHint >= 2) && (WindowRelativeHint < Vcb->FreeClusterBitMap.SizeOfBitMap + 2));
2020 
2021  //
2022  // Keep track of the window we're allocating from, so we can clean
2023  // up correctly if the current window changes after we unlock the
2024  // bitmap.
2025  //
2026 
2027  Window = Vcb->CurrentWindow;
2028 
2029  //
2030  // Try to find a run of free clusters large enough for us.
2031  //
2032 
2033  StartingCluster = FatFindFreeClusterRun( IrpContext,
2034  Vcb,
2035  ClusterCount,
2036  WindowRelativeHint );
2037  //
2038  // If the above call was successful, we can just update the fat
2039  // and Mcb and exit. Otherwise we have to look for smaller free
2040  // runs.
2041  //
2042  // This test is a bit funky. Note that the error return from
2043  // RtlFindClearBits is -1, and adding two to that is 1.
2044  //
2045 
2046  if ((StartingCluster != 1) &&
2047  ((0 == AbsoluteClusterHint) || (StartingCluster == WindowRelativeHint))
2048  ) {
2049 
2050 #if DBG
2051  PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2052 #endif // DBG
2053 
2054  //
2055  // Take the clusters we found, and unlock the bit map.
2056  //
2057 
2058  FatReserveClusters(IrpContext, Vcb, StartingCluster, ClusterCount);
2059 
2060  Window->ClustersFree -= ClusterCount;
2061 
2062  StartingCluster += Window->FirstCluster;
2063  StartingCluster -= 2;
2064 
2065  NT_ASSERT( PreviousClear - ClusterCount == Window->ClustersFree );
2066 
2068 
2069  //
2070  // Note that this call will never fail since there is always
2071  // room for one entry in an empty Mcb.
2072  //
2073 
2075  0,
2076  FatGetLboFromIndex( Vcb, StartingCluster ),
2077  *ByteCount);
2078  _SEH2_TRY {
2079 
2080  //
2081  // Update the fat.
2082  //
2083 
2084  FatAllocateClusters(IrpContext, Vcb,
2085  StartingCluster,
2086  ClusterCount);
2087 
2088  } _SEH2_FINALLY {
2089 
2090  DebugUnwind( FatAllocateDiskSpace );
2091 
2092  //
2093  // If the allocate clusters failed, remove the run from the Mcb,
2094  // unreserve the clusters, and reset the free cluster count.
2095  //
2096 
2097  if (_SEH2_AbnormalTermination()) {
2098 
2100 
2102 
2103  // Only clear bits if the bitmap window is the same.
2104 
2105  if (Window == Vcb->CurrentWindow) {
2106 
2107  // Both values (startingcluster and window->firstcluster) are
2108  // already biased by 2, so will cancel, so we need to add in the 2 again.
2109 
2110  FatUnreserveClusters( IrpContext, Vcb,
2111  StartingCluster - Window->FirstCluster + 2,
2112  ClusterCount );
2113  }
2114 
2115  Window->ClustersFree += ClusterCount;
2116  Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
2117 
2119  }
2120 
2121  ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2122  } _SEH2_END;
2123 
2124  } else {
2125 
2126  //
2127  // Note that Index is a zero-based window-relative number. When appropriate
2128  // it'll get converted into a true cluster number and put in Cluster, which
2129  // will be a volume relative true cluster number.
2130  //
2131 
2132  ULONG Index = 0;
2133  ULONG Cluster = 0;
2134  ULONG CurrentVbo = 0;
2135  ULONG PriorLastCluster = 0;
2136  ULONG BytesFound = 0;
2137 
2138  ULONG ClustersFound = 0;
2139  ULONG ClustersRemaining = 0;
2140 
2141  BOOLEAN LockedBitMap = FALSE;
2142  BOOLEAN SelectNextContigWindow = FALSE;
2143 
2144  //
2145  // Drop our shared lock on the ChangeBitMapResource, and pick it up again
2146  // exclusive in preparation for making a window swap.
2147  //
2148 
2150  ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2151  ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE);
2153  LockedBitMap = TRUE;
2154 
2155  _SEH2_TRY {
2156 
2157  if ( ExactMatchRequired && (1 == Vcb->NumberOfWindows)) {
2158 
2159  //
2160  // Give up right now, there are no more windows to search! RtlFindClearBits
2161  // searchs the whole bitmap, so we would have found any contiguous run
2162  // large enough.
2163  //
2164 
2165  try_leave( Result = FALSE);
2166  }
2167 
2168  //
2169  // While the request is still incomplete, look for the largest
2170  // run of free clusters, mark them taken, allocate the run in
2171  // the Mcb and Fat, and if this isn't the first time through
2172  // the loop link it to prior run on the fat. The Mcb will
2173  // coalesce automatically.
2174  //
2175 
2176  ClustersRemaining = ClusterCount;
2177  CurrentVbo = 0;
2178  PriorLastCluster = 0;
2179 
2180  while (ClustersRemaining != 0) {
2181 
2182  //
2183  // If we just entered the loop, the bit map is already locked
2184  //
2185 
2186  if ( !LockedBitMap ) {
2187 
2189  LockedBitMap = TRUE;
2190  }
2191 
2192  //
2193  // Find the largest run of free clusters. If the run is
2194  // bigger than we need, only use what we need. Note that
2195  // this will then be the last while() iteration.
2196  //
2197 
2198  // 12/3/95: need to bias bitmap by 2 bits for the defrag
2199  // hooks and the below macro became impossible to do without in-line
2200  // procedures.
2201  //
2202  // ClustersFound = FatLongestFreeClusterRun( IrpContext, Vcb, &Index );
2203 
2204  ClustersFound = 0;
2205 
2206  if (!SelectNextContigWindow) {
2207 
2208  if ( 0 != WindowRelativeHint) {
2209 
2210  ULONG Desired = Vcb->FreeClusterBitMap.SizeOfBitMap - (WindowRelativeHint - 2);
2211 
2212  //
2213  // We will try to allocate contiguously. Try from the current hint the to
2214  // end of current window. Don't try for more than we actually need.
2215  //
2216 
2217  if (Desired > ClustersRemaining) {
2218 
2219  Desired = ClustersRemaining;
2220  }
2221 
2222  if (RtlAreBitsClear( &Vcb->FreeClusterBitMap,
2223  WindowRelativeHint - 2,
2224  Desired))
2225  {
2226  //
2227  // Clusters from hint->...windowend are free. Take them.
2228  //
2229 
2230  Index = WindowRelativeHint - 2;
2231  ClustersFound = Desired;
2232 
2233  if (FatIsFat32(Vcb)) {
2234 
2235  //
2236  // We're now up against the end of the current window, so indicate that we
2237  // want the next window in the sequence next time around. (If we're not up
2238  // against the end of the window, then we got what we needed and won't be
2239  // coming around again anyway).
2240  //
2241 
2242  SelectNextContigWindow = TRUE;
2243  WindowRelativeHint = 2;
2244  }
2245  else {
2246 
2247  //
2248  // FAT 12/16 - we've run up against the end of the volume. Clear the
2249  // hint, since we now have no idea where to look.
2250  //
2251 
2252  WindowRelativeHint = 0;
2253  }
2254 #if DBG
2255  PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2256 #endif // DBG
2257  }
2258  else {
2259 
2260  if (ExactMatchRequired) {
2261 
2262  //
2263  // If our caller required an exact match, then we're hosed. Bail out now.
2264  //
2265 
2266  try_leave( Result = FALSE);
2267  }
2268 
2269  //
2270  // Hint failed, drop back to pot luck
2271  //
2272 
2273  WindowRelativeHint = 0;
2274  }
2275  }
2276 
2277  if ((0 == WindowRelativeHint) && (0 == ClustersFound)) {
2278 
2279  if (ClustersRemaining <= Vcb->CurrentWindow->ClustersFree) {
2280 
2281  //
2282  // The remaining allocation could be satisfied entirely from this
2283  // window. We will ask only for what we need, to try and avoid
2284  // unnecessarily fragmenting large runs of space by always using
2285  // (part of) the largest run we can find. This call will return the
2286  // first run large enough.
2287  //
2288 
2289  Index = RtlFindClearBits( &Vcb->FreeClusterBitMap, ClustersRemaining, 0);
2290 
2291  if (-1 != Index) {
2292 
2293  ClustersFound = ClustersRemaining;
2294  }
2295  }
2296 
2297  if (0 == ClustersFound) {
2298 
2299  //
2300  // Still nothing, so just take the largest free run we can find.
2301  //
2302 
2303  ClustersFound = RtlFindLongestRunClear( &Vcb->FreeClusterBitMap, &Index );
2304 
2305  }
2306 #if DBG
2307  PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2308 #endif // DBG
2309  if (ClustersFound >= ClustersRemaining) {
2310 
2311  ClustersFound = ClustersRemaining;
2312  }
2313  else {
2314 
2315  //
2316  // If we just ran up to the end of a window, set up a hint that
2317  // we'd like the next consecutive window after this one. (FAT32 only)
2318  //
2319 
2320  if ( ((Index + ClustersFound) == Vcb->FreeClusterBitMap.SizeOfBitMap) &&
2321  FatIsFat32( Vcb)
2322  ) {
2323 
2324  SelectNextContigWindow = TRUE;
2325  WindowRelativeHint = 2;
2326  }
2327  }
2328  }
2329  }
2330 
2331  if (ClustersFound == 0) {
2332 
2333  ULONG FaveWindow = 0;
2334  BOOLEAN SelectedWindow;
2335 
2336  //
2337  // If we found no free clusters on a single-window FAT,
2338  // there was a bad problem with the free cluster count.
2339  //
2340 
2341  if (1 == Vcb->NumberOfWindows) {
2342 
2343 #ifdef _MSC_VER
2344 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
2345 #endif
2346  FatBugCheck( 0, 5, 0 );
2347  }
2348 
2349  //
2350  // Switch to a new bucket. Possibly the next one if we're
2351  // currently on a roll (allocating contiguously)
2352  //
2353 
2354  SelectedWindow = FALSE;
2355 
2356  if ( SelectNextContigWindow) {
2357 
2358  ULONG NextWindow;
2359 
2360  NextWindow = (((ULONG)((PUCHAR)Vcb->CurrentWindow - (PUCHAR)Vcb->Windows)) / sizeof( FAT_WINDOW)) + 1;
2361 
2362  if ((NextWindow < Vcb->NumberOfWindows) &&
2363  ( Vcb->Windows[ NextWindow].ClustersFree > 0)
2364  ) {
2365 
2366  FaveWindow = NextWindow;
2367  SelectedWindow = TRUE;
2368  }
2369  else {
2370 
2371  if (ExactMatchRequired) {
2372 
2373  //
2374  // Some dope tried to allocate a run past the end of the volume...
2375  //
2376 
2377  try_leave( Result = FALSE);
2378  }
2379 
2380  //
2381  // Give up on the contiguous allocation attempts
2382  //
2383 
2384  WindowRelativeHint = 0;
2385  }
2386 
2387  SelectNextContigWindow = FALSE;
2388  }
2389 
2390  if (!SelectedWindow) {
2391 
2392  //
2393  // Select a new window to begin allocating from
2394  //
2395 
2396  FaveWindow = FatSelectBestWindow( Vcb);
2397  }
2398 
2399  //
2400  // By now we'd better have found a window with some free clusters
2401  //
2402 
2403  if (0 == Vcb->Windows[ FaveWindow].ClustersFree) {
2404 
2405 #ifdef _MSC_VER
2406 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
2407 #endif
2408  FatBugCheck( 0, 5, 1 );
2409  }
2410 
2411  Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
2412  SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
2413 
2414  FatExamineFatEntries( IrpContext, Vcb,
2415  0,
2416  0,
2417  FALSE,
2418  &Vcb->Windows[FaveWindow],
2419  NULL);
2420 
2421  if (!Wait) {
2422 
2423  ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
2424  }
2425 
2426  //
2427  // Now we'll just go around the loop again, having switched windows,
2428  // and allocate....
2429  //
2430 #if DBG
2431  PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2432 #endif //DBG
2433  } // if (clustersfound == 0)
2434  else {
2435 
2436  //
2437  // Take the clusters we found, convert our index to a cluster number
2438  // and unlock the bit map.
2439  //
2440 
2441  Window = Vcb->CurrentWindow;
2442 
2443  FatReserveClusters( IrpContext, Vcb, (Index + 2), ClustersFound );
2444 
2445  Cluster = Index + Window->FirstCluster;
2446 
2447  Window->ClustersFree -= ClustersFound;
2448  NT_ASSERT( PreviousClear - ClustersFound == Window->ClustersFree );
2449 
2451  LockedBitMap = FALSE;
2452 
2453  //
2454  // Add the newly alloced run to the Mcb.
2455  //
2456 
2457  BytesFound = ClustersFound << LogOfBytesPerCluster;
2458 
2460  CurrentVbo,
2461  FatGetLboFromIndex( Vcb, Cluster ),
2462  BytesFound );
2463 
2464  //
2465  // Connect the last allocated run with this one, and allocate
2466  // this run on the Fat.
2467  //
2468 
2469  if (PriorLastCluster != 0) {
2470 
2471  FatSetFatEntry( IrpContext,
2472  Vcb,
2473  PriorLastCluster,
2474  (FAT_ENTRY)Cluster );
2475  }
2476 
2477  //
2478  // Update the fat
2479  //
2480 
2481  FatAllocateClusters( IrpContext, Vcb, Cluster, ClustersFound );
2482 
2483  //
2484  // Prepare for the next iteration.
2485  //
2486 
2487  CurrentVbo += BytesFound;
2488  ClustersRemaining -= ClustersFound;
2489  PriorLastCluster = Cluster + ClustersFound - 1;
2490  }
2491  } // while (clustersremaining)
2492 
2493  } _SEH2_FINALLY {
2494 
2495  DebugUnwind( FatAllocateDiskSpace );
2496 
2497  ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2498 
2499  //
2500  // Is there any unwinding to do?
2501  //
2502 
2503  if ( _SEH2_AbnormalTermination() || (FALSE == Result)) {
2504 
2505  //
2506  // Flag to the caller that they're getting nothing
2507  //
2508 
2509  *ByteCount = 0;
2510 
2511  //
2512  // There are three places we could have taken this exception:
2513  // when switching the window (FatExamineFatEntries), adding
2514  // a found run to the Mcb (FatAddMcbEntry), or when writing
2515  // the changes to the FAT (FatSetFatEntry). In the first case
2516  // we don't have anything to unwind before deallocation, and
2517  // can detect this by seeing if we have the ClusterBitmap
2518  // mutex out.
2519 
2520  if (!LockedBitMap) {
2521 
2523 
2524  //
2525  // In these cases, we have the possiblity that the FAT
2526  // window is still in place and we need to clear the bits.
2527  // If the Mcb entry isn't there (we raised trying to add
2528  // it), the effect of trying to remove it is a noop.
2529  //
2530 
2531  if (Window == Vcb->CurrentWindow) {
2532 
2533  //
2534  // Cluster reservation works on cluster 2 based window-relative
2535  // numbers, so we must convert. The subtraction will lose the
2536  // cluster 2 base, so bias the result.
2537  //
2538 
2539  FatUnreserveClusters( IrpContext, Vcb,
2540  (Cluster - Window->FirstCluster) + 2,
2541  ClustersFound );
2542  }
2543 
2544  //
2545  // Note that FatDeallocateDiskSpace will take care of adjusting
2546  // to account for the entries in the Mcb. All we have to account
2547  // for is the last run that didn't make it.
2548  //
2549 
2550  Window->ClustersFree += ClustersFound;
2551  Vcb->AllocationSupport.NumberOfFreeClusters += ClustersFound;
2552 
2554 
2555  FatRemoveMcbEntry( Vcb, Mcb, CurrentVbo, BytesFound );
2556 
2557  } else {
2558 
2559  //
2560  // Just drop the mutex now - we didn't manage to do anything
2561  // that needs to be backed out.
2562  //
2563 
2565  }
2566 
2567  _SEH2_TRY {
2568 
2569  //
2570  // Now we have tidied up, we are ready to just send the Mcb
2571  // off to deallocate disk space
2572  //
2573 
2574  FatDeallocateDiskSpace( IrpContext, Vcb, Mcb, FALSE );
2575 
2576  } _SEH2_FINALLY {
2577 
2578  //
2579  // Now finally (really), remove all the entries from the mcb
2580  //
2581 
2582  FatRemoveMcbEntry( Vcb, Mcb, 0, 0xFFFFFFFF );
2583  } _SEH2_END;
2584  }
2585 
2586  DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0);
2587 
2588  } _SEH2_END; // finally
2589  }
2590 
2591  return;
2592 }
2593 
2594 
2595 
2596 //
2597 // Limit our zeroing writes to 1 MB.
2598 //
2599 
2600 #define MAX_ZERO_MDL_SIZE (1*1024*1024)
2601 
2602 _Requires_lock_held_(_Global_critical_region_)
2603 VOID
2604 FatDeallocateDiskSpace (
2605  IN PIRP_CONTEXT IrpContext,
2606  IN PVCB Vcb,
2607  IN PLARGE_MCB Mcb,
2609  )
2610 
2611 /*++
2612 
2613 Routine Description:
2614 
2615  This procedure deallocates the disk space denoted by an input
2616  mcb. Note that the input MCB does not need to necessarily describe
2617  a chain that ends with a FAT_CLUSTER_LAST entry.
2618 
2619  Pictorially what is done is the following
2620 
2621  Fat |--a--|--b--|--c--|
2622  Mcb |--a--|--b--|--c--|
2623 
2624  becomes
2625 
2626  Fat |--0--|--0--|--0--|
2627  Mcb |--a--|--b--|--c--|
2628 
2629 Arguments:
2630 
2631  Vcb - Supplies the VCB being modified
2632 
2633  Mcb - Supplies the MCB describing the disk space to deallocate. Note
2634  that Mcb is unchanged by this procedure.
2635 
2636 
2637 Return Value:
2638 
2639  None.
2640 
2641 --*/
2642 
2643 {
2644  LBO Lbo;
2645  VBO Vbo;
2646 
2647  ULONG RunsInMcb;
2648  ULONG ByteCount;
2649  ULONG ClusterCount = 0;
2650  ULONG ClusterIndex = 0;
2651  ULONG McbIndex = 0;
2652 
2653  UCHAR LogOfBytesPerCluster;
2654 
2656 
2657  NTSTATUS ZeroingStatus = STATUS_SUCCESS;
2658 
2659  PAGED_CODE();
2660 
2661  DebugTrace(+1, Dbg, "FatDeallocateDiskSpace\n", 0);
2662  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
2663  DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb);
2664 
2665  LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
2666 
2667  RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb );
2668 
2669  if ( RunsInMcb == 0 ) {
2670 
2671  DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0);
2672  return;
2673  }
2674 
2675  //
2676  // If we are supposed to zero out the allocation before freeing it, do so.
2677  //
2678 
2679  if (ZeroOnDeallocate) {
2680 
2681  _SEH2_TRY {
2682 
2683  PIRP IoIrp;
2684  KEVENT IoEvent;
2686  PVOID Buffer = NULL;
2687  PMDL Mdl;
2688  ULONG ByteCountToZero;
2689  ULONG MdlSizeMapped;
2690 
2691  //
2692  // Issue the writes down for each run in the Mcb
2693  //
2694 
2695  KeInitializeEvent( &IoEvent,
2697  FALSE );
2698 
2699  for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
2700 
2701  FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
2702 
2703  //
2704  // Assert that Fat files have no holes.
2705  //
2706 
2707  NT_ASSERT( Lbo != 0 );
2708 
2709  //
2710  // Setup our MDL for the this run.
2711  //
2712 
2713  if (ByteCount > MAX_ZERO_MDL_SIZE) {
2714  Mdl = FatBuildZeroMdl( IrpContext, MAX_ZERO_MDL_SIZE);
2715  } else {
2716  Mdl = FatBuildZeroMdl( IrpContext, ByteCount);
2717  }
2718 
2719  if (!Mdl) {
2720  ZeroingStatus = STATUS_INSUFFICIENT_RESOURCES;
2721  goto try_exit;
2722  }
2723 
2724  _SEH2_TRY {
2725 
2726  //
2727  // Map the MDL.
2728  //
2729 
2730  Buffer = MmGetSystemAddressForMdlSafe(Mdl, HighPagePriority|MdlMappingNoExecute);
2731  if (!Buffer) {
2732  NT_ASSERT( FALSE );
2733  ZeroingStatus = STATUS_INSUFFICIENT_RESOURCES;
2734  goto try_exit2;
2735  }
2736 
2737  //
2738  // We might not have not been able to get an MDL big enough to map the whole
2739  // run. In this case, break up the write.
2740  //
2741 
2742  MdlSizeMapped = min( ByteCount, Mdl->ByteCount );
2743  ByteCountToZero = ByteCount;
2744 
2745  //
2746  // Loop until there are no bytes left to write
2747  //
2748 
2749  while (ByteCountToZero != 0) {
2750 
2751  //
2752  // Write zeros to each run.
2753  //
2754 
2755  KeClearEvent( &IoEvent );
2756 
2758  Vcb->TargetDeviceObject,
2759  Buffer,
2760  MdlSizeMapped,
2761  (PLARGE_INTEGER)&Lbo,
2762  &IoEvent,
2763  &Iosb );
2764 
2765  if (IoIrp == NULL) {
2766  NT_ASSERT( FALSE );
2767  ZeroingStatus = STATUS_INSUFFICIENT_RESOURCES;
2768  goto try_exit2;
2769  }
2770 
2771  //
2772  // Set a flag indicating that we want to write through any
2773  // cache on the controller. This eliminates the need for
2774  // an explicit flush-device after the write.
2775  //
2776 
2778 
2779  ZeroingStatus = IoCallDriver( Vcb->TargetDeviceObject, IoIrp );
2780 
2781  if (ZeroingStatus == STATUS_PENDING) {
2782 
2783  (VOID)KeWaitForSingleObject( &IoEvent,
2784  Executive,
2785  KernelMode,
2786  FALSE,
2787  (PLARGE_INTEGER)NULL );
2788 
2789  ZeroingStatus = Iosb.Status;
2790  }
2791 
2792  if (!NT_SUCCESS( ZeroingStatus )) {
2793  NT_ASSERT( FALSE );
2794  goto try_exit2;
2795  }
2796 
2797  //
2798  // Increment the starting offset where we will zero.
2799  //
2800 
2801  Lbo += MdlSizeMapped;
2802 
2803  //
2804  // Decrement ByteCount
2805  //
2806 
2807  ByteCountToZero -= MdlSizeMapped;
2808 
2809  if (ByteCountToZero < MdlSizeMapped) {
2810  MdlSizeMapped = ByteCountToZero;
2811  }
2812 
2813  }
2814 
2815  try_exit2:
2816 
2817  NOTHING;
2818 
2819  } _SEH2_FINALLY {
2820 
2821  if (!FlagOn( Mdl->MdlFlags, MDL_SOURCE_IS_NONPAGED_POOL) &&
2822  FlagOn( Mdl->MdlFlags, MDL_MAPPED_TO_SYSTEM_VA )) {
2823 
2824  MmUnmapLockedPages( Mdl->MappedSystemVa, Mdl );
2825  }
2826  IoFreeMdl( Mdl );
2827  } _SEH2_END;
2828 
2829  }
2830 
2831  try_exit:
2832 
2833  NOTHING;
2834 
2836 
2837  //
2838  // If we failed to zero for some reason, still go ahead and deallocate
2839  // the clusters. Otherwise we'll leak space from the volume.
2840  //
2841 
2842  ZeroingStatus = _SEH2_GetExceptionCode();
2843 
2844  } _SEH2_END;
2845 
2846  }
2847 
2848  NT_ASSERT( NT_SUCCESS(ZeroingStatus) );
2849 
2850  _SEH2_TRY {
2851 
2852  //
2853  // Run though the Mcb, freeing all the runs in the fat.
2854  //
2855  // We do this in two steps (first update the fat, then the bitmap
2856  // (which can't fail)) to prevent other people from taking clusters
2857  // that we need to re-allocate in the event of unwind.
2858  //
2859 
2860  ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE);
2861 
2862  RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb );
2863 
2864  for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
2865 
2866  FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
2867 
2868  //
2869  // Assert that Fat files have no holes.
2870  //
2871 
2872  NT_ASSERT( Lbo != 0 );
2873 
2874  //
2875  // Write FAT_CLUSTER_AVAILABLE to each cluster in the run.
2876  //
2877 
2878  if (ByteCount == 0xFFFFFFFF) {
2879 
2880  //
2881  // Special case the computation of ClusterCout
2882  // when file is of max size (4GiB - 1).
2883  //
2884 
2885  ClusterCount = (1 << (32 - LogOfBytesPerCluster));
2886 
2887  } else {
2888 
2889  ClusterCount = ByteCount >> LogOfBytesPerCluster;
2890  }
2891 
2892  ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo );
2893 
2894  FatFreeClusters( IrpContext, Vcb, ClusterIndex, ClusterCount );
2895  }
2896 
2897  //
2898  // From now on, nothing can go wrong .... (as in raise)
2899  //
2900 
2902 
2903  for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
2904 
2905  ULONG ClusterEnd;
2906  ULONG MyStart, MyLength, count;
2907 #if DBG
2908 #ifndef __REACTOS__
2909  ULONG PreviousClear = 0;
2910 #endif
2911  ULONG i = 0;
2912 #endif
2913 
2914  FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
2915 
2916  //
2917  // Mark the bits clear in the FreeClusterBitMap.
2918  //
2919 
2920  if (ByteCount == 0xFFFFFFFF) {
2921 
2922  //
2923  // Special case the computation of ClusterCout
2924  // when file is of max size (2^32 - 1).
2925  //
2926 
2927  ClusterCount = (1 << (32 - LogOfBytesPerCluster));
2928 
2929  } else {
2930 
2931  ClusterCount = ByteCount >> LogOfBytesPerCluster;
2932  }
2933 
2934  ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo );
2935 
2936  Window = Vcb->CurrentWindow;
2937 
2938  //
2939  // If we've divided the bitmap, elide bitmap manipulation for
2940  // runs that are outside the current bucket.
2941  //
2942 
2943  ClusterEnd = ClusterIndex + ClusterCount - 1;
2944 
2945  if (!(ClusterIndex > Window->LastCluster ||
2946  ClusterEnd < Window->FirstCluster)) {
2947 
2948  //
2949  // The run being freed overlaps the current bucket, so we'll
2950  // have to clear some bits.
2951  //
2952 
2953  if (ClusterIndex < Window->FirstCluster &&
2954  ClusterEnd > Window->LastCluster) {
2955 
2956  MyStart = Window->FirstCluster;
2957  MyLength = Window->LastCluster - Window->FirstCluster + 1;
2958 
2959  } else if (ClusterIndex < Window->FirstCluster) {
2960 
2961  MyStart = Window->FirstCluster;
2962  MyLength = ClusterEnd - Window->FirstCluster + 1;
2963 
2964  } else {
2965 
2966  //
2967  // The range being freed starts in the bucket, and may possibly
2968  // extend beyond the bucket.
2969  //
2970 
2971  MyStart = ClusterIndex;
2972 
2973  if (ClusterEnd <= Window->LastCluster) {
2974 
2975  MyLength = ClusterCount;
2976 
2977  } else {
2978 
2979  MyLength = Window->LastCluster - ClusterIndex + 1;
2980  }
2981  }
2982 
2983  if (MyLength == 0) {
2984 
2985  continue;
2986  }
2987 
2988 #if DBG
2989 #ifndef __REACTOS__
2990 #ifdef _MSC_VER
2991 #pragma prefast( suppress:28931, "this is DBG build only" )
2992 #endif
2993  PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2994 #endif
2995 
2996 
2997  //
2998  // Verify that the Bits are all really set.
2999  //
3000 
3001  NT_ASSERT( MyStart + MyLength - Window->FirstCluster <= Vcb->FreeClusterBitMap.SizeOfBitMap );
3002 
3003  for (i = 0; i < MyLength; i++) {
3004 
3005  NT_ASSERT( RtlCheckBit(&Vcb->FreeClusterBitMap,
3006  MyStart - Window->FirstCluster + i) == 1 );
3007  }
3008 #endif // DBG
3009 
3010  FatUnreserveClusters( IrpContext, Vcb,
3011  MyStart - Window->FirstCluster + 2,
3012  MyLength );
3013  }
3014 
3015  //
3016  // Adjust the ClustersFree count for each bitmap window, even the ones
3017  // that are not the current window.
3018  //
3019 
3020  if (FatIsFat32(Vcb)) {
3021 
3022  Window = &Vcb->Windows[FatWindowOfCluster( ClusterIndex )];
3023 
3024  } else {
3025 
3026  Window = &Vcb->Windows[0];
3027  }
3028 
3029  MyStart = ClusterIndex;
3030 
3031  for (MyLength = ClusterCount; MyLength > 0; MyLength -= count) {
3032 
3033  count = FatMin(Window->LastCluster - MyStart + 1, MyLength);
3034  Window->ClustersFree += count;
3035 
3036  //
3037  // If this was not the last window this allocation spanned,
3038  // advance to the next.
3039  //
3040 
3041  if (MyLength != count) {
3042 
3043  Window++;
3044  MyStart = Window->FirstCluster;
3045  }
3046  }
3047 
3048  //
3049  // Deallocation is now complete. Adjust the free cluster count.
3050  //
3051 
3052  Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
3053  }
3054 
3055 #if DBG
3056  if (Vcb->CurrentWindow->ClustersFree !=
3057  RtlNumberOfClearBits(&Vcb->FreeClusterBitMap)) {
3058 
3059  DbgPrint("%x vs %x\n", Vcb->CurrentWindow->ClustersFree,
3060  RtlNumberOfClearBits(&Vcb->FreeClusterBitMap));
3061 
3062  DbgPrint("%x for %x\n", ClusterIndex, ClusterCount);
3063  }
3064 #endif
3065 
3067 
3068 
3069  } _SEH2_FINALLY {
3070 
3071  DebugUnwind( FatDeallocateDiskSpace );
3072 
3073  //
3074  // Is there any unwinding to do?
3075  //
3076 
3077  ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
3078 
3079  if ( _SEH2_AbnormalTermination() ) {
3080 
3081  LBO LocalLbo;
3082  VBO LocalVbo;
3083 
3084  ULONG Index;
3085  ULONG Clusters;
3086  ULONG FatIndex;
3087  ULONG PriorLastIndex;
3088 
3089  //
3090  // For each entry we already deallocated, reallocate it,
3091  // chaining together as nessecary. Note that we continue
3092  // up to and including the last "for" iteration even though
3093  // the SetFatRun could not have been successful. This
3094  // allows us a convienent way to re-link the final successful
3095  // SetFatRun.
3096  //
3097  // It is possible that the reason we got here will prevent us
3098  // from succeeding in this operation.
3099  //
3100 
3101  PriorLastIndex = 0;
3102 
3103  for (Index = 0; Index <= McbIndex; Index++) {
3104 
3105  FatGetNextMcbEntry(Vcb, Mcb, Index, &LocalVbo, &LocalLbo, &ByteCount);
3106 
3107  if (ByteCount == 0xFFFFFFFF) {
3108 
3109  //
3110  // Special case the computation of ClusterCout
3111  // when file is of max size (2^32 - 1).
3112  //
3113 
3114  Clusters = (1 << (32 - LogOfBytesPerCluster));
3115 
3116  } else {
3117 
3118  Clusters = ByteCount >> LogOfBytesPerCluster;
3119  }
3120 
3121  FatIndex = FatGetIndexFromLbo( Vcb, LocalLbo );
3122 
3123  //
3124  // We must always restore the prior iteration's last
3125  // entry, pointing it to the first cluster of this run.
3126  //
3127 
3128  if (PriorLastIndex != 0) {
3129 
3130  FatSetFatEntry( IrpContext,
3131  Vcb,
3132  PriorLastIndex,
3133  (FAT_ENTRY)FatIndex );
3134  }
3135 
3136  //
3137  // If this is not the last entry (the one that failed)
3138  // then reallocate the disk space on the fat.
3139  //
3140 
3141  if ( Index < McbIndex ) {
3142 
3143  FatAllocateClusters(IrpContext, Vcb, FatIndex, Clusters);
3144 
3145  PriorLastIndex = FatIndex + Clusters - 1;
3146  }
3147  }
3148  }
3149 
3150  DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0);
3151  } _SEH2_END;
3152 
3153  return;
3154 }
3155 
3156 
3157 _Requires_lock_held_(_Global_critical_region_)
3158 VOID
3159 FatSplitAllocation (
3160  IN PIRP_CONTEXT IrpContext,
3161  IN PVCB Vcb,
3162  IN OUT PLARGE_MCB Mcb,
3163  IN VBO SplitAtVbo,
3165  )
3166 
3167 /*++
3168 
3169 Routine Description:
3170 
3171  This procedure takes a single mcb and splits its allocation into
3172  two separate allocation units. The separation must only be done
3173  on cluster boundaries, otherwise we bugcheck.
3174 
3175  On the disk this actually works by inserting a FAT_CLUSTER_LAST into
3176  the last index of the first part being split out.
3177 
3178  Pictorially what is done is the following (where ! denotes the end of
3179  the fat chain (i.e., FAT_CLUSTER_LAST)):
3180 
3181 
3182  Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
3183 
3184  ^
3185  SplitAtVbo ---------------------+
3186 
3187  RemainingMcb (empty)
3188 
3189  becomes
3190 
3191  Mcb |--a--|--b--|--c--!
3192 
3193 
3194  RemainingMcb |--d--|--e--|--f--|
3195 
3196 Arguments:
3197 
3198  Vcb - Supplies the VCB being modified
3199 
3200  Mcb - Supplies the MCB describing the allocation being split into
3201  two parts. Upon return this Mcb now contains the first chain.
3202 
3203  SplitAtVbo - Supplies the VBO of the first byte for the second chain
3204  that we creating.
3205 
3206  RemainingMcb - Receives the MCB describing the second chain of allocated
3207  disk space. The caller passes in an initialized Mcb that
3208  is filled in by this procedure STARTING AT VBO 0.
3209 
3210 Return Value:
3211 
3212  VOID - TRUE if the operation completed and FALSE if it had to
3213  block but could not.
3214 
3215 --*/
3216 
3217 {
3218  VBO SourceVbo;
3219  VBO TargetVbo;
3220  VBO DontCare;
3221 
3222  LBO Lbo;
3223 
3224  ULONG ByteCount;
3225 
3226 #if DBG
3227  ULONG BytesPerCluster;
3228 #endif
3229 
3230  PAGED_CODE();
3231 
3232  DebugTrace(+1, Dbg, "FatSplitAllocation\n", 0);
3233  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3234  DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb);
3235  DebugTrace( 0, Dbg, " SplitAtVbo = %8lx\n", SplitAtVbo);
3236  DebugTrace( 0, Dbg, " RemainingMcb = %p\n", RemainingMcb);
3237 
3238 #if DBG
3239  BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
3240 #endif
3241 
3242  //
3243  // Assert that the split point is cluster alligned
3244  //
3245 
3246  NT_ASSERT( (SplitAtVbo & (BytesPerCluster - 1)) == 0 );
3247 
3248  //
3249  // We should never be handed an empty source MCB and asked to split
3250  // at a non zero point.
3251  //
3252 
3253  NT_ASSERT( !((0 != SplitAtVbo) && (0 == FsRtlNumberOfRunsInLargeMcb( Mcb))));
3254 
3255  //
3256  // Assert we were given an empty target Mcb.
3257  //
3258 
3259  //
3260  // This assert is commented out to avoid hitting in the Ea error
3261  // path. In that case we will be using the same Mcb's to split the
3262  // allocation that we used to merge them. The target Mcb will contain
3263  // the runs that the split will attempt to insert.
3264  //
3265  //
3266  // NT_ASSERT( FsRtlNumberOfRunsInMcb( RemainingMcb ) == 0 );
3267  //
3268 
3269  _SEH2_TRY {
3270 
3271  //
3272  // Move the runs after SplitAtVbo from the souce to the target
3273  //
3274 
3275  SourceVbo = SplitAtVbo;
3276  TargetVbo = 0;
3277 
3278  while (FatLookupMcbEntry(Vcb, Mcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
3279 
3280  FatAddMcbEntry( Vcb, RemainingMcb, TargetVbo, Lbo, ByteCount );
3281 
3282  FatRemoveMcbEntry( Vcb, Mcb, SourceVbo, ByteCount );
3283 
3284  TargetVbo += ByteCount;
3285  SourceVbo += ByteCount;
3286 
3287  //
3288  // If SourceVbo overflows, we were actually snipping off the end
3289  // of the maximal file ... and are now done.
3290  //
3291 
3292  if (SourceVbo == 0) {
3293 
3294  break;
3295  }
3296  }
3297 
3298  //
3299  // Mark the last pre-split cluster as a FAT_LAST_CLUSTER
3300  //
3301 
3302  if ( SplitAtVbo != 0 ) {
3303 
3304  FatLookupLastMcbEntry( Vcb, Mcb, &DontCare, &Lbo, NULL );
3305 
3306  FatSetFatEntry( IrpContext,
3307  Vcb,
3309  FAT_CLUSTER_LAST );
3310  }
3311 
3312  } _SEH2_FINALLY {
3313 
3314  DebugUnwind( FatSplitAllocation );
3315 
3316  //
3317  // If we got an exception, we must glue back together the Mcbs
3318  //
3319 
3320  if ( _SEH2_AbnormalTermination() ) {
3321 
3322  TargetVbo = SplitAtVbo;
3323  SourceVbo = 0;
3324 
3325  while (FatLookupMcbEntry(Vcb, RemainingMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
3326 
3327  FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount );
3328 
3329  FatRemoveMcbEntry( Vcb, RemainingMcb, SourceVbo, ByteCount );
3330 
3331  TargetVbo += ByteCount;
3332  SourceVbo += ByteCount;
3333  }
3334  }
3335 
3336  DebugTrace(-1, Dbg, "FatSplitAllocation -> (VOID)\n", 0);
3337  } _SEH2_END;
3338 
3339  return;
3340 }
3341 
3342 
3343 _Requires_lock_held_(_Global_critical_region_)
3344 VOID
3345 FatMergeAllocation (
3346  IN PIRP_CONTEXT IrpContext,
3347  IN PVCB Vcb,
3348  IN OUT PLARGE_MCB Mcb,
3350  )
3351 
3352 /*++
3353 
3354 Routine Description:
3355 
3356  This routine takes two separate allocations described by two MCBs and
3357  joins them together into one allocation.
3358 
3359  Pictorially what is done is the following (where ! denotes the end of
3360  the fat chain (i.e., FAT_CLUSTER_LAST)):
3361 
3362 
3363  Mcb |--a--|--b--|--c--!
3364 
3365  SecondMcb |--d--|--e--|--f--|
3366 
3367  becomes
3368 
3369  Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
3370 
3371  SecondMcb |--d--|--e--|--f--|
3372 
3373 
3374 Arguments:
3375 
3376  Vcb - Supplies the VCB being modified
3377 
3378  Mcb - Supplies the MCB of the first allocation that is being modified.
3379  Upon return this Mcb will also describe the newly enlarged
3380  allocation
3381 
3382  SecondMcb - Supplies the ZERO VBO BASED MCB of the second allocation
3383  that is being appended to the first allocation. This
3384  procedure leaves SecondMcb unchanged.
3385 
3386 Return Value:
3387 
3388  VOID - TRUE if the operation completed and FALSE if it had to
3389  block but could not.
3390 
3391 --*/
3392 
3393 {
3394  VBO SpliceVbo = 0;
3395  LBO SpliceLbo;
3396 
3397  VBO SourceVbo;
3398  VBO TargetVbo = 0;
3399 
3400  LBO Lbo;
3401 
3402  ULONG ByteCount;
3403 
3404  PAGED_CODE();
3405 
3406  DebugTrace(+1, Dbg, "FatMergeAllocation\n", 0);
3407  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3408  DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb);
3409  DebugTrace( 0, Dbg, " SecondMcb = %p\n", SecondMcb);
3410 
3411  _SEH2_TRY {
3412 
3413  //
3414  // Append the runs from SecondMcb to Mcb
3415  //
3416 
3417  (void)FatLookupLastMcbEntry( Vcb, Mcb, &SpliceVbo, &SpliceLbo, NULL );
3418 
3419  SourceVbo = 0;
3420  TargetVbo = SpliceVbo + 1;
3421 
3422  while (FatLookupMcbEntry(Vcb, SecondMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
3423 
3424  FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount );
3425 
3426  SourceVbo += ByteCount;
3427  TargetVbo += ByteCount;
3428  }
3429 
3430  //
3431  // Link the last pre-merge cluster to the first cluster of SecondMcb
3432  //
3433 
3435 
3436  FatSetFatEntry( IrpContext,
3437  Vcb,
3438  FatGetIndexFromLbo( Vcb, SpliceLbo ),
3440 
3441  } _SEH2_FINALLY {
3442 
3443  DebugUnwind( FatMergeAllocation );
3444 
3445  //
3446  // If we got an exception, we must remove the runs added to Mcb
3447  //
3448 
3449  if ( _SEH2_AbnormalTermination() ) {
3450 
3451  ULONG CutLength;
3452 
3453  if ((CutLength = TargetVbo - (SpliceVbo + 1)) != 0) {
3454 
3455  FatRemoveMcbEntry( Vcb, Mcb, SpliceVbo + 1, CutLength);
3456  }
3457  }
3458 
3459  DebugTrace(-1, Dbg, "FatMergeAllocation -> (VOID)\n", 0);
3460  } _SEH2_END;
3461 
3462  return;
3463 }
3464 
3465 
3466 //
3467 // Internal support routine
3468 //
3469 
3472  IN PVCB Vcb,
3474  )
3475 
3476 /*++
3477 
3478 Routine Description:
3479 
3480  This procedure tells the caller how to interpret the input fat table
3481  entry. It will indicate if the fat cluster is available, resereved,
3482  bad, the last one, or the another fat index. This procedure can deal
3483  with both 12 and 16 bit fat.
3484 
3485 Arguments:
3486 
3487  Vcb - Supplies the Vcb to examine, yields 12/16 bit info
3488 
3489  Entry - Supplies the fat entry to examine
3490 
3491 Return Value:
3492 
3493  CLUSTER_TYPE - Is the type of the input Fat entry
3494 
3495 --*/
3496 
3497 {
3498  DebugTrace(+1, Dbg, "InterpretClusterType\n", 0);
3499  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3500  DebugTrace( 0, Dbg, " Entry = %8lx\n", Entry);
3501 
3502  PAGED_CODE();
3503 
3504  switch(Vcb->AllocationSupport.FatIndexBitSize ) {
3505  case 32:
3507  break;
3508 
3509  case 12:
3510  NT_ASSERT( Entry <= 0xfff );
3511  if (Entry >= 0x0ff0) {
3512  Entry |= 0x0FFFF000;
3513  }
3514  break;
3515 
3516  default:
3517  case 16:
3518  NT_ASSERT( Entry <= 0xffff );
3519  if (Entry >= 0x0fff0) {
3520  Entry |= 0x0FFF0000;
3521  }
3522  break;
3523  }
3524 
3525  if (Entry == FAT_CLUSTER_AVAILABLE) {
3526 
3527  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterAvailable\n", 0);
3528 
3529  return FatClusterAvailable;
3530 
3531  } else if (Entry < FAT_CLUSTER_RESERVED) {
3532 
3533  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterNext\n", 0);
3534 
3535  return FatClusterNext;
3536 
3537  } else if (Entry < FAT_CLUSTER_BAD) {
3538 
3539  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterReserved\n", 0);
3540 
3541  return FatClusterReserved;
3542 
3543  } else if (Entry == FAT_CLUSTER_BAD) {
3544 
3545  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterBad\n", 0);
3546 
3547  return FatClusterBad;
3548 
3549  } else {
3550 
3551  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterLast\n", 0);
3552 
3553  return FatClusterLast;
3554  }
3555 }
3556 
3557 
3558 //
3559 // Internal support routine
3560 //
3561 
3562 VOID
3564  IN PIRP_CONTEXT IrpContext,
3565  IN PVCB Vcb,
3566  IN ULONG FatIndex,
3569  )
3570 
3571 /*++
3572 
3573 Routine Description:
3574 
3575  This routine takes an index into the fat and gives back the value
3576  in the Fat at this index. At any given time, for a 16 bit fat, this
3577  routine allows only one page per volume of the fat to be pinned in
3578  memory. For a 12 bit bit fat, the entire fat (max 6k) is pinned. This
3579  extra layer of caching makes the vast majority of requests very
3580  fast. The context for this caching stored in a structure in the Vcb.
3581 
3582 Arguments:
3583 
3584  Vcb - Supplies the Vcb to examine, yields 12/16 bit info,
3585  fat access context, etc.
3586 
3587  FatIndex - Supplies the fat index to examine.
3588 
3589  FatEntry - Receives the fat entry pointed to by FatIndex. Note that
3590  it must point to non-paged pool.
3591 
3592  Context - This structure keeps track of a page of pinned fat between calls.
3593 
3594 --*/
3595 
3596 {
3597  PAGED_CODE();
3598 
3599  DebugTrace(+1, Dbg, "FatLookupFatEntry\n", 0);
3600  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3601  DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
3602  DebugTrace( 0, Dbg, " FatEntry = %8lx\n", FatEntry);
3603 
3604  //
3605  // Make sure they gave us a valid fat index.
3606  //
3607 
3608  FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
3609 
3610  //
3611  // Case on 12 or 16 bit fats.
3612  //
3613  // In the 12 bit case (mostly floppies) we always have the whole fat
3614  // (max 6k bytes) pinned during allocation operations. This is possibly
3615  // a wee bit slower, but saves headaches over fat entries with 8 bits
3616  // on one page, and 4 bits on the next.
3617  //
3618  // The 16 bit case always keeps the last used page pinned until all
3619  // operations are done and it is unpinned.
3620  //
3621 
3622  //
3623  // DEAL WITH 12 BIT CASE
3624  //
3625 
3626  if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
3627 
3628  //
3629  // Check to see if the fat is already pinned, otherwise pin it.
3630  //
3631 
3632  if (Context->Bcb == NULL) {
3633 
3634  FatReadVolumeFile( IrpContext,
3635  Vcb,
3636  FatReservedBytes( &Vcb->Bpb ),
3637  FatBytesPerFat( &Vcb->Bpb ),
3638  &Context->Bcb,
3639  &Context->PinnedPage );
3640  }
3641 
3642  //
3643  // Load the return value.
3644  //
3645 
3646 
3647  FatLookup12BitEntry( Context->PinnedPage, FatIndex, FatEntry );
3648 
3649  } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
3650 
3651  //
3652  // DEAL WITH 32 BIT CASE
3653  //
3654 
3655  ULONG PageEntryOffset;
3656  ULONG OffsetIntoVolumeFile;
3657 
3658  //
3659  // Initialize two local variables that help us.
3660  //
3661  OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(FAT_ENTRY);
3662  PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(FAT_ENTRY);
3663 
3664  //
3665  // Check to see if we need to read in a new page of fat
3666  //
3667 
3668  if ((Context->Bcb == NULL) ||
3669  (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3670 
3671  //
3672  // The entry wasn't in the pinned page, so must we unpin the current
3673  // page (if any) and read in a new page.
3674  //
3675 
3676  FatUnpinBcb( IrpContext, Context->Bcb );
3677 
3678  FatReadVolumeFile( IrpContext,
3679  Vcb,
3680  OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3681  PAGE_SIZE,
3682  &Context->Bcb,
3683  &Context->PinnedPage );
3684 
3685  Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3686  }
3687 
3688  //
3689  // Grab the fat entry from the pinned page, and return
3690  //
3691 
3692  *FatEntry = ((PULONG)(Context->PinnedPage))[PageEntryOffset] & FAT32_ENTRY_MASK;
3693 
3694  } else {
3695 
3696  //
3697  // DEAL WITH 16 BIT CASE
3698  //
3699 
3700  ULONG PageEntryOffset;
3701  ULONG OffsetIntoVolumeFile;
3702 
3703  //
3704  // Initialize two local variables that help us.
3705  //
3706 
3707  OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(USHORT);
3708  PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(USHORT);
3709 
3710  //
3711  // Check to see if we need to read in a new page of fat
3712  //
3713 
3714  if ((Context->Bcb == NULL) ||
3715  (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3716 
3717  //
3718  // The entry wasn't in the pinned page, so must we unpin the current
3719  // page (if any) and read in a new page.
3720  //
3721 
3722  FatUnpinBcb( IrpContext, Context->Bcb );
3723 
3724  FatReadVolumeFile( IrpContext,
3725  Vcb,
3726  OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3727  PAGE_SIZE,
3728  &Context->Bcb,
3729  &Context->PinnedPage );
3730 
3731  Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3732  }
3733 
3734  //
3735  // Grab the fat entry from the pinned page, and return
3736  //
3737 
3738  *FatEntry = ((PUSHORT)(Context->PinnedPage))[PageEntryOffset];
3739  }
3740 
3741  DebugTrace(-1, Dbg, "FatLookupFatEntry -> (VOID)\n", 0);
3742  return;
3743 }
3744 
3745 
3746 _Requires_lock_held_(_Global_critical_region_)
3747 VOID
3748 FatSetFatEntry (
3749  IN PIRP_CONTEXT IrpContext,
3750  IN PVCB Vcb,
3751  IN ULONG FatIndex,
3753  )
3754 
3755 /*++
3756 
3757 Routine Description:
3758 
3759  This routine takes an index into the fat and puts a value in the Fat
3760  at this index. The routine special cases 12, 16 and 32 bit fats. In
3761  all cases we go to the cache manager for a piece of the fat.
3762 
3763  We have a special form of this call for setting the DOS-style dirty bit.
3764  Unlike the dirty bit in the boot sector, we do not go to special effort
3765  to make sure that this hits the disk synchronously - if the system goes
3766  down in the window between the dirty bit being set in the boot sector
3767  and the FAT index zero dirty bit being lazy written, then life is tough.
3768 
3769  The only possible scenario is that Win9x may see what it thinks is a clean
3770  volume that really isn't (hopefully Memphis will pay attention to our dirty
3771  bit as well). The dirty bit will get out quickly, and if heavy activity is
3772  occurring, then the dirty bit should actually be there virtually all of the
3773  time since the act of cleaning the volume is the "rare" occurance.
3774 
3775  There are synchronization concerns that would crop up if we tried to make
3776  this synchronous. This thread may already own the Bcb shared for the first
3777  sector of the FAT (so we can't get it exclusive for a writethrough). This
3778  would require some more serious replumbing to work around than I want to
3779  consider at this time.
3780 
3781  We can and do, however, synchronously set the bit clean.
3782 
3783  At this point the reader should understand why the NT dirty bit is where it is.
3784 
3785 Arguments:
3786 
3787  Vcb - Supplies the Vcb to examine, yields 12/16/32 bit info, etc.
3788 
3789  FatIndex - Supplies the destination fat index.
3790 
3791  FatEntry - Supplies the source fat entry.
3792 
3793 --*/
3794 
3795 {
3796  LBO Lbo;
3797  PBCB Bcb = NULL;
3798  ULONG SectorSize;
3799  ULONG OffsetIntoVolumeFile;
3800  ULONG WasWait = TRUE;
3801  BOOLEAN RegularOperation = TRUE;
3802  BOOLEAN CleaningOperation = FALSE;
3804 
3805  PAGED_CODE();
3806 
3807  DebugTrace(+1, Dbg, "FatSetFatEntry\n", 0);
3808  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3809  DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
3810  DebugTrace( 0, Dbg, " FatEntry = %4x\n", FatEntry);
3811 
3812  //
3813  // Make sure they gave us a valid fat index if this isn't the special
3814  // clean-bit modifying call.
3815  //
3816 
3817  if (FatIndex == FAT_DIRTY_BIT_INDEX) {
3818 
3819  //
3820  // We are setting the clean bit state. Of course, we could
3821  // have corruption that would cause us to try to fiddle the
3822  // reserved index - we guard against this by having the
3823  // special entry values use the reserved high 4 bits that
3824  // we know that we'll never try to set.
3825  //
3826 
3827  //
3828  // We don't want to repin the FAT pages involved here. Just
3829  // let the lazy writer hit them when it can.
3830  //
3831 
3832  RegularOperation = FALSE;
3833 
3834  switch (FatEntry) {
3835  case FAT_CLEAN_VOLUME:
3837  CleaningOperation = TRUE;
3838  break;
3839 
3840  case FAT_DIRTY_VOLUME:
3841  switch (Vcb->AllocationSupport.FatIndexBitSize) {
3842  case 12:
3844  break;
3845 
3846  case 32:
3848  break;
3849 
3850  default:
3852  break;
3853  }
3854  break;
3855 
3856  default:
3858  break;
3859  }
3860 
3861  //
3862  // Disable dirtying semantics for the duration of this operation. Force this
3863  // operation to wait for the duration.
3864  //
3865 
3866  WasWait = FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
3868 
3869  } else {
3870 
3872  FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
3873  }
3874 
3875  //
3876  // Set Sector Size
3877  //
3878 
3879  SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
3880 
3881  //
3882  // Case on 12 or 16 bit fats.
3883  //
3884  // In the 12 bit case (mostly floppies) we always have the whole fat
3885  // (max 6k bytes) pinned during allocation operations. This is possibly
3886  // a wee bit slower, but saves headaches over fat entries with 8 bits
3887  // on one page, and 4 bits on the next.
3888  //
3889  // In the 16 bit case we only read the page that we need to set the fat
3890  // entry.
3891  //
3892 
3893  //
3894  // DEAL WITH 12 BIT CASE
3895  //
3896 
3897  _SEH2_TRY {
3898 
3899  if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
3900 
3901  PVOID PinnedFat;
3902 
3903  //
3904  // Make sure we have a valid entry
3905  //
3906 
3907  FatEntry &= 0xfff;
3908 
3909  //
3910  // We read in the entire fat. Note that using prepare write marks
3911  // the bcb pre-dirty, so we don't have to do it explicitly.
3912  //
3913 
3914  OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) + FatIndex * 3 / 2;
3915 
3916  FatPrepareWriteVolumeFile( IrpContext,
3917  Vcb,
3918  FatReservedBytes( &Vcb->Bpb ),
3919  FatBytesPerFat( &Vcb->Bpb ),
3920  &Bcb,
3921  &PinnedFat,
3922  RegularOperation,
3923  FALSE );
3924 
3925  //
3926  // Mark the sector(s) dirty in the DirtyFatMcb. This call is
3927  // complicated somewhat for the 12 bit case since a single
3928  // entry write can span two sectors (and pages).
3929  //
3930  // Get the Lbo for the sector where the entry starts, and add it to
3931  // the dirty fat Mcb.
3932  //
3933 
3934  Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
3935 
3936  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
3937 
3938  //
3939  // If the entry started on the last byte of the sector, it continues
3940  // to the next sector, so mark the next sector dirty as well.
3941  //
3942  // Note that this entry will simply coalese with the last entry,
3943  // so this operation cannot fail. Also if we get this far, we have
3944  // made it, so no unwinding will be needed.
3945  //
3946 
3947  if ( (OffsetIntoVolumeFile & (SectorSize - 1)) == (SectorSize - 1) ) {
3948 
3949  Lbo += SectorSize;
3950 
3951  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
3952  }
3953 
3954  //
3955  // Store the entry into the fat; we need a little synchonization
3956  // here and can't use a spinlock since the bytes might not be
3957  // resident.
3958  //
3959 
3961  ReleaseMutex = TRUE;
3962 
3963  FatSet12BitEntry( PinnedFat, FatIndex, FatEntry );
3964 
3966  ReleaseMutex = FALSE;
3967 
3968  } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
3969 
3970  //
3971  // DEAL WITH 32 BIT CASE
3972  //
3973 
3974  PULONG PinnedFatEntry32;
3975 
3976  //
3977  // Read in a new page of fat
3978  //
3979 
3980  OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) +
3981  FatIndex * sizeof( FAT_ENTRY );
3982 
3983  FatPrepareWriteVolumeFile( IrpContext,
3984  Vcb,
3985  OffsetIntoVolumeFile,
3986  sizeof(FAT_ENTRY),
3987  &Bcb,
3988  (PVOID *)&PinnedFatEntry32,
3989  RegularOperation,
3990  FALSE );
3991  //
3992  // Mark the sector dirty in the DirtyFatMcb
3993  //
3994 
3995  Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
3996 
3997  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
3998 
3999  //
4000  // Store the FatEntry to the pinned page.
4001  //
4002  // Preserve the reserved bits in FAT32 entries in the file heap.
4003  //
4004 
4005 #ifdef ALPHA
4007  ReleaseMutex = TRUE;
4008 #endif // ALPHA
4009 
4010  if (FatIndex != FAT_DIRTY_BIT_INDEX) {
4011 
4012  *PinnedFatEntry32 = ((*PinnedFatEntry32 & ~FAT32_ENTRY_MASK) | FatEntry);
4013 
4014  } else {
4015 
4016  *PinnedFatEntry32 = FatEntry;
4017  }
4018 
4019 #ifdef ALPHA
4021  ReleaseMutex = FALSE;
4022 #endif // ALPHA
4023 
4024  } else {
4025 
4026  //
4027  // DEAL WITH 16 BIT CASE
4028  //
4029 
4030  PUSHORT PinnedFatEntry;
4031 
4032  //
4033  // Read in a new page of fat
4034  //
4035 
4036  OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) +
4037  FatIndex * sizeof(USHORT);
4038 
4039  FatPrepareWriteVolumeFile( IrpContext,
4040  Vcb,
4041  OffsetIntoVolumeFile,
4042  sizeof(USHORT),
4043  &Bcb,
4044  (PVOID *)&PinnedFatEntry,
4045  RegularOperation,
4046  FALSE );
4047  //
4048  // Mark the sector dirty in the DirtyFatMcb
4049  //
4050 
4051  Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
4052 
4053  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
4054 
4055  //
4056  // Store the FatEntry to the pinned page.
4057  //
4058  // We need extra synchronization here for broken architectures
4059  // like the ALPHA that don't support atomic 16 bit writes.
4060  //
4061 
4062 #ifdef ALPHA
4064  ReleaseMutex = TRUE;
4065 #endif // ALPHA
4066 
4067  *PinnedFatEntry = (USHORT)FatEntry;
4068 
4069 #ifdef ALPHA
4071  ReleaseMutex = FALSE;
4072 #endif // ALPHA
4073  }
4074 
4075  } _SEH2_FINALLY {
4076 
4077  DebugUnwind( FatSetFatEntry );
4078 
4079  //
4080  // Re-enable volume dirtying in case this was a dirty bit operation.
4081  //
4082 
4083  ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_DISABLE_DIRTY );
4084 
4085  //
4086  // Make this operation asynchronous again if needed.
4087  //
4088 
4089  if (!WasWait) {
4090 
4091  ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
4092  }
4093 
4094  //
4095  // If we still somehow have the Mutex, release it.
4096  //
4097 
4098  if (ReleaseMutex) {
4099 
4101 
4103  }
4104 
4105  //
4106  // Unpin the Bcb. For cleaning operations or if the corruption was detected while mounting we make this write-through.
4107  //
4108 
4109  if ((CleaningOperation ||
4111  Bcb) {
4112 
4113  IO_STATUS_BLOCK IgnoreStatus;
4114 
4115  CcRepinBcb( Bcb );
4116  CcUnpinData( Bcb );
4117  DbgDoit( IrpContext->PinCount -= 1 );
4118  CcUnpinRepinnedBcb( Bcb, TRUE, &IgnoreStatus );
4119 
4120  } else {
4121 
4122  FatUnpinBcb(IrpContext, Bcb);
4123  }
4124 
4125  DebugTrace(-1, Dbg, "FatSetFatEntry -> (VOID)\n", 0);
4126  } _SEH2_END;
4127 
4128  return;
4129 }
4130 
4131 
4132 //
4133 // Internal support routine
4134 //
4135 
4136 VOID
4138  IN PIRP_CONTEXT IrpContext,
4139  IN PVCB Vcb,
4140  IN ULONG StartingFatIndex,
4141  IN ULONG ClusterCount,
4142  IN BOOLEAN ChainTogether
4143  )
4144 
4145 /*++
4146 
4147 Routine Description:
4148 
4149  This routine sets a continuous run of clusters in the fat. If ChainTogether
4150  is TRUE, then the clusters are linked together as in normal Fat fasion,
4151  with the last cluster receiving FAT_CLUSTER_LAST. If ChainTogether is
4152  FALSE, all the entries are set to FAT_CLUSTER_AVAILABLE, effectively
4153  freeing all the clusters in the run.
4154 
4155 Arguments:
4156 
4157  Vcb - Supplies the Vcb to examine, yields 12/16 bit info, etc.
4158 
4159  StartingFatIndex - Supplies the destination fat index.
4160 
4161  ClusterCount - Supplies the number of contiguous clusters to work on.
4162 
4163  ChainTogether - Tells us whether to fill the entries with links, or
4164  FAT_CLUSTER_AVAILABLE
4165 
4166 
4167 Return Value:
4168 
4169  VOID
4170 
4171 --*/
4172 
4173 {
4174 #define MAXCOUNTCLUS 0x10000
4175 #define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
4176  PBCB SavedBcbs[COUNTSAVEDBCBS][2];
4177 
4178  ULONG SectorSize;
4179  ULONG Cluster;
4180 
4181  LBO StartSectorLbo;
4182  LBO FinalSectorLbo;
4183  LBO Lbo;
4184 
4185  PVOID PinnedFat;
4186 
4188 
4189  ULONG SavedStartingFatIndex = StartingFatIndex;
4190 
4191  PAGED_CODE();
4192 
4193  DebugTrace(+1, Dbg, "FatSetFatRun\n", 0);
4194  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
4195  DebugTrace( 0, Dbg, " StartingFatIndex = %8x\n", StartingFatIndex);
4196  DebugTrace( 0, Dbg, " ClusterCount = %8lx\n", ClusterCount);
4197  DebugTrace( 0, Dbg, " ChainTogether = %s\n", ChainTogether ? "TRUE":"FALSE");
4198 
4199  //
4200  // Make sure they gave us a valid fat run.
4201  //
4202 
4203  FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex);
4204  FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex + ClusterCount - 1);
4205 
4206  //
4207  // Check special case
4208  //
4209 
4210  if (ClusterCount == 0) {
4211 
4212  DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4213  return;
4214  }
4215 
4216  //
4217  // Set Sector Size
4218  //
4219 
4220  SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
4221 
4222  //
4223  // Case on 12 or 16 bit fats.
4224  //
4225  // In the 12 bit case (mostly floppies) we always have the whole fat
4226  // (max 6k bytes) pinned during allocation operations. This is possibly
4227  // a wee bit slower, but saves headaches over fat entries with 8 bits
4228  // on one page, and 4 bits on the next.
4229  //
4230  // In the 16 bit case we only read one page at a time, as needed.
4231  //
4232 
4233  //
4234  // DEAL WITH 12 BIT CASE
4235  //
4236 
4237  _SEH2_TRY {
4238 
4239  if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
4240 
4241  //
4242  // We read in the entire fat. Note that using prepare write marks
4243  // the bcb pre-dirty, so we don't have to do it explicitly.
4244  //
4245 
4246  RtlZeroMemory( &SavedBcbs[0][0], 2 * sizeof(PBCB) * 2);
4247 
4248  FatPrepareWriteVolumeFile( IrpContext,
4249  Vcb,
4250  FatReservedBytes( &Vcb->Bpb ),
4251  FatBytesPerFat( &Vcb->Bpb ),
4252  &SavedBcbs[0][0],
4253  &PinnedFat,
4254  TRUE,
4255  FALSE );
4256 
4257  //
4258  // Mark the affected sectors dirty. Note that FinalSectorLbo is
4259  // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
4260  // we catch the case of a dirty fat entry straddling a sector boundry.
4261  //
4262  // Note that if the first AddMcbEntry succeeds, all following ones
4263  // will simply coalese, and thus also succeed.
4264  //
4265 
4266  StartSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + StartingFatIndex * 3 / 2)
4267  & ~(SectorSize - 1);
4268 
4269  FinalSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + ((StartingFatIndex +
4270  ClusterCount) * 3 + 2) / 2) & ~(SectorSize - 1);
4271 
4272  for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4273 
4274  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4275  }
4276 
4277  //
4278  // Store the entries into the fat; we need a little
4279  // synchonization here and can't use a spinlock since the bytes
4280  // might not be resident.
4281  //
4282 
4284  ReleaseMutex = TRUE;
4285 
4286  for (Cluster = StartingFatIndex;
4287  Cluster < StartingFatIndex + ClusterCount - 1;
4288  Cluster++) {
4289 
4290  FatSet12BitEntry( PinnedFat,
4291  Cluster,
4292  ChainTogether ? Cluster + 1 : FAT_CLUSTER_AVAILABLE );
4293  }
4294 
4295  //
4296  // Save the last entry
4297  //
4298 
4299  FatSet12BitEntry( PinnedFat,
4300  Cluster,
4301  ChainTogether ?
4303 
4305  ReleaseMutex = FALSE;
4306 
4307  } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
4308 
4309  //
4310  // DEAL WITH 32 BIT CASE
4311  //
4312 
4313  for (;;) {
4314 
4315  VBO StartOffsetInVolume;
4316  VBO FinalOffsetInVolume;
4317 
4318  ULONG Page;
4319  ULONG FinalCluster;
4320  PULONG FatEntry = NULL;
4321  ULONG ClusterCountThisRun;
4322 
4323  StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4324  StartingFatIndex * sizeof(FAT_ENTRY);
4325 
4326  if (ClusterCount > MAXCOUNTCLUS) {
4327  ClusterCountThisRun = MAXCOUNTCLUS;
4328  } else {
4329  ClusterCountThisRun = ClusterCount;
4330  }
4331 
4332  FinalOffsetInVolume = StartOffsetInVolume +
4333  (ClusterCountThisRun - 1) * sizeof(FAT_ENTRY);
4334 
4335  {
4336  ULONG NumberOfPages;
4337  ULONG Offset;
4338 
4339  NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4340  (StartOffsetInVolume / PAGE_SIZE) + 1;
4341 
4342  RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4343 
4344  for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4345  Page < NumberOfPages;
4346  Page++, Offset += PAGE_SIZE ) {
4347 
4348  FatPrepareWriteVolumeFile( IrpContext,
4349  Vcb,
4350  Offset,
4351  PAGE_SIZE,
4352  &SavedBcbs[Page][0],
4353  (PVOID *)&SavedBcbs[Page][1],
4354  TRUE,
4355  FALSE );
4356 
4357  if (Page == 0) {
4358 
4359  FatEntry = (PULONG)((PUCHAR)SavedBcbs[0][1] +
4360  (StartOffsetInVolume % PAGE_SIZE));
4361  }
4362  }
4363  }
4364 
4365  //
4366  // Mark the run dirty
4367  //
4368 
4369  StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4370  FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4371 
4372  for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4373 
4374  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO)Lbo, Lbo, SectorSize );
4375  }
4376 
4377  //
4378  // Store the entries
4379  //
4380  // We need extra synchronization here for broken architectures
4381  // like the ALPHA that don't support atomic 16 bit writes.
4382  //
4383 
4384 #ifdef ALPHA
4386  ReleaseMutex = TRUE;
4387 #endif // ALPHA
4388 
4389  FinalCluster = StartingFatIndex + ClusterCountThisRun - 1;
4390  Page = 0;
4391 
4392  for (Cluster = StartingFatIndex;
4393  Cluster <= FinalCluster;
4394  Cluster++, FatEntry++) {
4395 
4396  //
4397  // If we just crossed a page boundry (as opposed to starting
4398  // on one), update our idea of FatEntry.
4399 
4400  if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4401  (Cluster != StartingFatIndex) ) {
4402 
4403  Page += 1;
4404  FatEntry = (PULONG)SavedBcbs[Page][1];
4405  }
4406 
4407  *FatEntry = ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4409  }
4410 
4411  //
4412  // Fix up the last entry if we were chaining together
4413  //
4414 
4415  if ((ClusterCount <= MAXCOUNTCLUS) &&
4416  ChainTogether ) {
4417 
4418  *(FatEntry-1) = FAT_CLUSTER_LAST;
4419  }
4420 
4421 #ifdef ALPHA
4423  ReleaseMutex = FALSE;
4424 #endif // ALPHA
4425 
4426  {
4427  ULONG i;
4428 
4429  //
4430  // Unpin the Bcbs
4431  //
4432 
4433  for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4434 
4435  FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4436  SavedBcbs[i][0] = NULL;
4437  }
4438  }
4439 
4440  if (ClusterCount <= MAXCOUNTCLUS) {
4441 
4442  break;
4443 
4444  } else {
4445 
4446  StartingFatIndex += MAXCOUNTCLUS;
4447  ClusterCount -= MAXCOUNTCLUS;
4448  }
4449  }
4450 
4451  } else {
4452 
4453  //
4454  // DEAL WITH 16 BIT CASE
4455  //
4456 
4457  VBO StartOffsetInVolume;
4458  VBO FinalOffsetInVolume;
4459 
4460  ULONG Page;
4461  ULONG FinalCluster;
4462  PUSHORT FatEntry = NULL;
4463 
4464  StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4465  StartingFatIndex * sizeof(USHORT);
4466 
4467  FinalOffsetInVolume = StartOffsetInVolume +
4468  (ClusterCount - 1) * sizeof(USHORT);
4469 
4470  //
4471  // Read in one page of fat at a time. We cannot read in the
4472  // all of the fat we need because of cache manager limitations.
4473  //
4474  // SavedBcb was initialized to be able to hold the largest
4475  // possible number of pages in a fat plus and extra one to
4476  // accomadate the boot sector, plus one more to make sure there
4477  // is enough room for the RtlZeroMemory below that needs the mark
4478  // the first Bcb after all the ones we will use as an end marker.
4479  //
4480 
4481  {
4482  ULONG NumberOfPages;
4483  ULONG Offset;
4484 
4485  NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4486  (StartOffsetInVolume / PAGE_SIZE) + 1;
4487 
4488  RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4489 
4490  for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4491  Page < NumberOfPages;
4492  Page++, Offset += PAGE_SIZE ) {
4493 
4494  FatPrepareWriteVolumeFile( IrpContext,
4495  Vcb,
4496  Offset,
4497  PAGE_SIZE,
4498  &SavedBcbs[Page][0],
4499  (PVOID *)&SavedBcbs[Page][1],
4500  TRUE,
4501  FALSE );
4502 
4503  if (Page == 0) {
4504 
4505  FatEntry = (PUSHORT)((PUCHAR)SavedBcbs[0][1] +
4506  (StartOffsetInVolume % PAGE_SIZE));
4507  }
4508  }
4509  }
4510 
4511  //
4512  // Mark the run dirty
4513  //
4514 
4515  StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4516  FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4517 
4518  for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4519 
4520  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4521  }
4522 
4523  //
4524  // Store the entries
4525  //
4526  // We need extra synchronization here for broken architectures
4527  // like the ALPHA that don't support atomic 16 bit writes.
4528  //
4529 
4530 #ifdef ALPHA
4532  ReleaseMutex = TRUE;
4533 #endif // ALPHA
4534 
4535  FinalCluster = StartingFatIndex + ClusterCount - 1;
4536  Page = 0;
4537 
4538  for (Cluster = StartingFatIndex;
4539  Cluster <= FinalCluster;
4540  Cluster++, FatEntry++) {
4541 
4542  //
4543  // If we just crossed a page boundry (as opposed to starting
4544  // on one), update our idea of FatEntry.
4545 
4546  if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4547  (Cluster != StartingFatIndex) ) {
4548 
4549  Page += 1;
4550  FatEntry = (PUSHORT)SavedBcbs[Page][1];
4551  }
4552 
4553  *FatEntry = (USHORT) (ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4555  }
4556 
4557  //
4558  // Fix up the last entry if we were chaining together
4559  //
4560 
4561  if ( ChainTogether ) {
4562 
4563 #ifdef _MSC_VER
4564 #pragma warning( suppress: 4310 )
4565 #endif
4567 
4568  }
4569 #ifdef ALPHA
4571  ReleaseMutex = FALSE;
4572 #endif // ALPHA
4573  }
4574 
4575  } _SEH2_FINALLY {
4576 
4577  ULONG i;
4578 
4580 
4581  //
4582  // If we still somehow have the Mutex, release it.
4583  //
4584 
4585  if (ReleaseMutex) {
4586 
4588 
4590  }
4591 
4592  //
4593  // Unpin the Bcbs
4594  //
4595 
4596  for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4597 
4598  FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4599  }
4600 
4601  //
4602  // At this point nothing in this finally clause should have raised.
4603  // So, now comes the unsafe (sigh) stuff.
4604  //
4605 
4606  if ( _SEH2_AbnormalTermination() &&
4607  (Vcb->AllocationSupport.FatIndexBitSize == 32) ) {
4608 
4609  //
4610  // Fat32 unwind
4611  //
4612  // This case is more complex because the FAT12 and FAT16 cases
4613  // pin all the needed FAT pages (128K max), after which it
4614  // can't fail, before changing any FAT entries. In the Fat32
4615  // case, it may not be practical to pin all the needed FAT
4616  // pages, because that could span many megabytes. So Fat32
4617  // attacks in chunks, and if a failure occurs once the first
4618  // chunk has been updated, we have to back out the updates.
4619  //
4620  // The unwind consists of walking back over each FAT entry we
4621  // have changed, setting it back to the previous value. Note
4622  // that the previous value with either be FAT_CLUSTER_AVAILABLE
4623  // (if ChainTogether==TRUE) or a simple link to the successor
4624  // (if ChainTogether==FALSE).
4625  //
4626  // We concede that any one of these calls could fail too; our
4627  // objective is to make this case no more likely than the case
4628  // for a file consisting of multiple disjoint runs.
4629  //
4630 
4631  while ( StartingFatIndex > SavedStartingFatIndex ) {
4632 
4633  StartingFatIndex--;
4634 
4635  FatSetFatEntry( IrpContext, Vcb, StartingFatIndex,
4636  ChainTogether ?
4637  StartingFatIndex + 1 : FAT_CLUSTER_AVAILABLE );
4638  }
4639  }
4640 
4641  DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4642  } _SEH2_END;
4643 
4644  return;
4645 }
4646 
4647 
4648 //
4649 // Internal support routine
4650 //
4651 
4652 UCHAR
4654  IN ULONG Value
4655  )
4656 
4657 /*++
4658 
4659 Routine Description:
4660 
4661  This routine just computes the base 2 log of an integer. It is only used
4662  on objects that are know to be powers of two.
4663 
4664 Arguments:
4665 
4666  Value - The value to take the base 2 log of.
4667 
4668 Return Value:
4669 
4670  UCHAR - The base 2 log of Value.
4671 
4672 --*/
4673 
4674 {
4675  UCHAR Log = 0;
4676 
4677 #if FASTFATDBG
4678  ULONG OrigValue = Value;
4679 #endif
4680 
4681  PAGED_CODE();
4682 
4683  //
4684  // Knock bits off until we we get a one at position 0
4685  //
4686 
4687  while ( (Value & 0xfffffffe) != 0 ) {
4688 
4689  Log++;
4690  Value >>= 1;
4691  }
4692 
4693  //
4694  // If there was more than one bit set, the file system messed up,
4695  // Bug Check.
4696  //
4697 
4698  if (Value != 0x1) {
4699 
4700  DebugTrace(+1, Dbg, "LogOf\n", 0);
4701  DebugTrace( 0, Dbg, " Value = %8lx\n", OrigValue);
4702 
4703  DebugTrace( 0, Dbg, "Received non power of 2.\n", 0);
4704 
4705  DebugTrace(-1, Dbg, "LogOf -> %8lx\n", Log);
4706 
4707 #ifdef _MSC_VER
4708 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
4709 #endif
4710  FatBugCheck( Value, Log, 0 );
4711  }
4712 
4713  return Log;
4714 }
4715 
4716 
4717 VOID
4719  IN PIRP_CONTEXT IrpContext,
4720  IN PVCB Vcb,
4721  IN ULONG StartIndex OPTIONAL,
4722  IN ULONG EndIndex OPTIONAL,
4723  IN BOOLEAN SetupWindows,
4724  IN PFAT_WINDOW SwitchToWindow OPTIONAL,
4725  IN PULONG BitMapBuffer OPTIONAL
4726  )
4727 /*++
4728 
4729 Routine Description:
4730 
4731  This routine handles scanning a segment of the FAT into in-memory structures.
4732 
4733  There are three fundamental cases, with variations depending on the FAT type:
4734 
4735  1) During volume setup, FatSetupAllocations
4736 
4737  1a) for FAT12/16, read the FAT into our free clusterbitmap
4738  1b) for FAT32, perform the initial scan for window free cluster counts
4739 
4740  2) Switching FAT32 windows on the fly during system operation
4741 
4742  3) Reading arbitrary segments of the FAT for the purposes of the GetVolumeBitmap
4743  call (only for FAT32)
4744 
4745  There really is too much going on in here. At some point this should be
4746  substantially rewritten.
4747 
4748 Arguments:
4749 
4750  Vcb - Supplies the volume involved
4751 
4752  StartIndex - Supplies the starting cluster, ignored if SwitchToWindow supplied
4753 
4754  EndIndex - Supplies the ending cluster, ignored if SwitchToWindow supplied
4755 
4756  SetupWindows - Indicates if we are doing the initial FAT32 scan
4757 
4758  SwitchToWindow - Supplies the FAT window we are examining and will switch to
4759 
4760  BitMapBuffer - Supplies a specific bitmap to fill in, if not supplied we fill
4761  in the volume free cluster bitmap if !SetupWindows
4762 
4763 Return Value:
4764 
4765  None. Lots of side effects.
4766 
4767 --*/
4768 {
4770  ULONG Page = 0;
4771  ULONG Offset = 0;
4772  ULONG FatIndex;
4774  FAT_ENTRY FirstFatEntry = FAT_CLUSTER_AVAILABLE;
4775  PUSHORT FatBuffer;
4776  PVOID pv;
4777  PBCB Bcb = NULL;
4778  ULONG EntriesPerWindow;
4779 
4780  ULONG ClustersThisRun;
4781  ULONG StartIndexOfThisRun;
4782 
4783  PULONG FreeClusterCount = NULL;
4784 
4785  PFAT_WINDOW CurrentWindow = NULL;
4786 
4787  PVOID NewBitMapBuffer = NULL;
4788  PRTL_BITMAP BitMap = NULL;
4789  RTL_BITMAP PrivateBitMap;
4790 
4791  ULONG ClusterSize = 0;
4792  ULONG PrefetchPages = 0;
4793  ULONG FatPages = 0;
4794 
4795  VBO BadClusterVbo = 0;
4796  LBO Lbo = 0;
4797 
4798  enum RunType {
4799  FreeClusters,
4800  AllocatedClusters,
4801  UnknownClusters
4802  } CurrentRun;
4803 
4804  PAGED_CODE();
4805 
4806  //
4807  // Now assert correct usage.
4808  //
4809 
4810  FatIndexBitSize = Vcb->AllocationSupport.FatIndexBitSize;
4811 
4812  NT_ASSERT( !(SetupWindows && (SwitchToWindow || BitMapBuffer)));
4813  NT_ASSERT( !(SetupWindows && FatIndexBitSize != 32));
4814 
4815  if (Vcb->NumberOfWindows > 1) {
4816 
4817  //
4818  // FAT32: Calculate the number of FAT entries covered by a window. This is
4819  // equal to the number of bits in the freespace bitmap, the size of which
4820  // is hardcoded.
4821  //
4822 
4823  EntriesPerWindow = MAX_CLUSTER_BITMAP_SIZE;
4824 
4825  } else {
4826 
4827  EntriesPerWindow = Vcb->AllocationSupport.NumberOfClusters;
4828  }
4829 
4830  //
4831  // We will also fill in the cumulative count of free clusters for
4832  // the entire volume. If this is not appropriate, NULL it out
4833  // shortly.
4834  //
4835 
4836  FreeClusterCount = &Vcb->AllocationSupport.NumberOfFreeClusters;
4837 
4838  if (SetupWindows) {
4839 
4840  NT_ASSERT(BitMapBuffer == NULL);
4841 
4842  //
4843  // In this case we're just supposed to scan the fat and set up
4844  // the information regarding where the buckets fall and how many
4845  // free clusters are in each.
4846  //
4847  // It is fine to monkey with the real windows, we must be able
4848  // to do this to activate the volume.
4849  //
4850 
4851  BitMap = NULL;
4852 
4853  CurrentWindow = &Vcb->Windows[0];
4854  CurrentWindow->FirstCluster = StartIndex;
4855  CurrentWindow->ClustersFree = 0;
4856 
4857  //
4858  // We always wish to calculate total free clusters when
4859  // setting up the FAT windows.
4860  //
4861 
4862  } else if (BitMapBuffer == NULL) {
4863 
4864  //
4865  // We will be filling in the free cluster bitmap for the volume.
4866  // Careful, we can raise out of here and be hopelessly hosed if
4867  // we built this up in the main bitmap/window itself.
4868  //
4869  // For simplicity's sake, we'll do the swap for everyone. FAT32
4870  // provokes the need since we can't tolerate partial results
4871  // when switching windows.
4872  //
4873 
4874  NT_ASSERT( SwitchToWindow );
4875 
4876  CurrentWindow = SwitchToWindow;
4877  StartIndex = CurrentWindow->FirstCluster;
4878  EndIndex = CurrentWindow->LastCluster;
4879 
4880  BitMap = &PrivateBitMap;
4881  NewBitMapBuffer = FsRtlAllocatePoolWithTag( PagedPool,
4882  (EntriesPerWindow + 7) / 8,
4883  TAG_FAT_BITMAP );
4884 
4885  RtlInitializeBitMap( &PrivateBitMap,
4886  NewBitMapBuffer,
4887  EndIndex - StartIndex + 1);
4888 
4889  if ((FatIndexBitSize == 32) &&
4890  (Vcb->NumberOfWindows > 1)) {
4891 
4892  //
4893  // We do not wish count total clusters here.
4894  //
4895 
4896  FreeClusterCount = NULL;
4897 
4898  }
4899 
4900  } else {
4901 
4902  BitMap = &PrivateBitMap;
4903  RtlInitializeBitMap(&PrivateBitMap,
4904  BitMapBuffer,
4905  EndIndex - StartIndex + 1);
4906 
4907  //
4908  // We do not count total clusters here.
4909  //
4910 
4911  FreeClusterCount = NULL;
4912  }
4913 
4914  //
4915  // Now, our start index better be in the file heap.
4916  //
4917 
4918  NT_ASSERT( StartIndex >= 2 );
4919 
4920  _SEH2_TRY {
4921 
4922  //
4923  // Pick up the initial chunk of the FAT and first entry.
4924  //
4925 
4926  if (FatIndexBitSize == 12) {
4927 
4928  //
4929  // We read in the entire fat in the 12 bit case.
4930  //
4931 
4932  FatReadVolumeFile( IrpContext,
4933  Vcb,
4934  FatReservedBytes( &Vcb->Bpb ),
4935  FatBytesPerFat( &Vcb->Bpb ),
4936  &Bcb,
4937  (PVOID *)&FatBuffer );
4938 
4939  FatLookup12BitEntry(FatBuffer, 0, &FirstFatEntry);
4940 
4941  } else {
4942 
4943  //
4944  // Read in one page of fat at a time. We cannot read in the
4945  // all of the fat we need because of cache manager limitations.
4946  //
4947 
4948  ULONG BytesPerEntry = FatIndexBitSize >> 3;
4949 
4950  FatPages = (FatReservedBytes(&Vcb->Bpb) + FatBytesPerFat(&Vcb->Bpb) + (PAGE_SIZE - 1)) / PAGE_SIZE;
4951  Page = (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) / PAGE_SIZE;
4952 
4953  Offset = Page * PAGE_SIZE;
4954 
4955  //
4956  // Prefetch the FAT entries in memory for optimal performance.
4957  //
4958 
4959  PrefetchPages = FatPages - Page;
4960 
4961  if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
4962 
4963  PrefetchPages = ALIGN_UP_BY(Page, FAT_PREFETCH_PAGE_COUNT) - Page;
4964  }
4965 
4966 #if (NTDDI_VERSION >= NTDDI_WIN8)
4967  FatPrefetchPages( IrpContext,
4968  Vcb->VirtualVolumeFile,
4969  Page,
4970  PrefetchPages );
4971 #endif
4972 
4973  FatReadVolumeFile( IrpContext,
4974  Vcb,
4975  Offset,
4976  PAGE_SIZE,
4977  &Bcb,
4978  &pv);
4979 
4980  if (FatIndexBitSize == 32) {
4981 
4982  FatBuffer = (PUSHORT)((PUCHAR)pv +
4983  (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) %
4984  PAGE_SIZE);
4985 
4986  FirstFatEntry = *((PULONG)FatBuffer);
4987  FirstFatEntry = FirstFatEntry & FAT32_ENTRY_MASK;
4988 
4989  } else {
4990 
4991  FatBuffer = (PUSHORT)((PUCHAR)pv +
4992  FatReservedBytes(&Vcb->Bpb) % PAGE_SIZE) + 2;
4993 
4994  FirstFatEntry = *FatBuffer;
4995  }
4996 
4997  }
4998 
4999  ClusterSize = 1 << (Vcb->AllocationSupport.LogOfBytesPerCluster);
5000 
5001  CurrentRun = (FirstFatEntry == FAT_CLUSTER_AVAILABLE) ?
5002  FreeClusters : AllocatedClusters;
5003 
5004  StartIndexOfThisRun = StartIndex;
5005 
5006  for (FatIndex = StartIndex; FatIndex <= EndIndex; FatIndex++) {
5007 
5008  if (FatIndexBitSize == 12) {
5009 
5010  FatLookup12BitEntry(FatBuffer, FatIndex, &FatEntry);
5011 
5012  } else {
5013 
5014  //
5015  // If we are setting up the FAT32 windows and have stepped into a new
5016  // bucket, finalize this one and move forward.
5017  //
5018 
5019  if (SetupWindows &&
5020  FatIndex > StartIndex &&
5021  (FatIndex - 2) % EntriesPerWindow == 0) {
5022 
5023  CurrentWindow->LastCluster = FatIndex - 1;
5024 
5025  if (CurrentRun == FreeClusters) {
5026 
5027  //
5028  // We must be counting clusters in order to modify the
5029  // contents of the window.
5030  //
5031 
5032  NT_ASSERT( FreeClusterCount );
5033 
5034  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5035  CurrentWindow->ClustersFree += ClustersThisRun;
5036 
5037  if (FreeClusterCount) {
5038  *FreeClusterCount += ClustersThisRun;
5039  }
5040 
5041  } else {
5042 
5043  NT_ASSERT(CurrentRun == AllocatedClusters);
5044 
5045  }
5046 
5047  StartIndexOfThisRun = FatIndex;
5048  CurrentRun = UnknownClusters;
5049 
5050  CurrentWindow++;
5051  CurrentWindow->ClustersFree = 0;
5052  CurrentWindow->FirstCluster = FatIndex;
5053  }
5054 
5055  //
5056  // If we just stepped onto a new page, grab a new pointer.
5057  //
5058 
5059  if (((ULONG_PTR)FatBuffer & (PAGE_SIZE - 1)) == 0) {
5060 
5061  FatUnpinBcb( IrpContext, Bcb );
5062 
5063  Page++;
5064  Offset += PAGE_SIZE;
5065 
5066 #if (NTDDI_VERSION >= NTDDI_WIN8)
5067  //
5068  // If we have exhausted all the prefetch pages, prefetch the next chunk.
5069  //
5070 
5071  if (--PrefetchPages == 0) {
5072 
5073  PrefetchPages = FatPages - Page;
5074 
5075  if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
5076 
5077  PrefetchPages = FAT_PREFETCH_PAGE_COUNT;
5078  }
5079 
5080  FatPrefetchPages( IrpContext,
5081  Vcb->VirtualVolumeFile,
5082  Page,
5083  PrefetchPages );
5084  }
5085 #endif
5086 
5087  FatReadVolumeFile( IrpContext,
5088  Vcb,
5089  Offset,
5090  PAGE_SIZE,
5091  &Bcb,
5092  &pv );
5093 
5094  FatBuffer = (PUSHORT)pv;
5095  }
5096 
5097  if (FatIndexBitSize == 32) {
5098 
5099 #ifndef __REACTOS__
5100 #ifdef _MSC_VER
5101 #pragma warning( suppress: 4213 )
5102 #endif
5103  FatEntry = *((PULONG)FatBuffer)++;
5105 #else
5106  FatEntry = *FatBuffer;
5107  FatBuffer += 1;
5109 #endif
5110 
5111  } else {
5112 
5113  FatEntry = *FatBuffer;
5114  FatBuffer += 1;
5115  }
5116  }
5117 
5118  if (CurrentRun == UnknownClusters) {
5119 
5120  CurrentRun = (FatEntry == FAT_CLUSTER_AVAILABLE) ?
5121  FreeClusters : AllocatedClusters;
5122  }
5123 
5124  //
5125  // Are we switching from a free run to an allocated run?
5126  //
5127 
5128  if (CurrentRun == FreeClusters &&
5130 
5131  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5132 
5133  if (FreeClusterCount) {
5134 
5135  *FreeClusterCount += ClustersThisRun;
5136  CurrentWindow->ClustersFree += ClustersThisRun;
5137  }
5138 
5139  if (BitMap) {
5140 
5141  RtlClearBits( BitMap,
5142  StartIndexOfThisRun - StartIndex,
5143  ClustersThisRun );
5144  }
5145 
5146  CurrentRun = AllocatedClusters;
5147  StartIndexOfThisRun = FatIndex;
5148  }
5149 
5150  //
5151  // Are we switching from an allocated run to a free run?
5152  //
5153 
5154  if (CurrentRun == AllocatedClusters &&
5156 
5157  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5158 
5159  if (BitMap) {
5160 
5161  RtlSetBits( BitMap,
5162  StartIndexOfThisRun - StartIndex,
5163  ClustersThisRun );
5164  }
5165 
5166  CurrentRun = FreeClusters;
5167  StartIndexOfThisRun = FatIndex;
5168  }
5169 
5170  //
5171  // If the entry is marked bad, add it to the bad block MCB
5172  //
5173 
5174  if ((SetupWindows || (Vcb->NumberOfWindows == 1)) &&
5176 
5177  //
5178  // This cluster is marked bad.
5179  // Add it to the BadBlockMcb.
5180  //
5181 
5183  FatAddMcbEntry( Vcb, &Vcb->BadBlockMcb, BadClusterVbo, Lbo, ClusterSize );
5184  BadClusterVbo += ClusterSize;
5185  }
5186  }
5187 
5188  //
5189  // If we finished the scan, then we know about all the possible bad clusters.
5190  //
5191 
5193 
5194  //
5195  // Now we have to record the final run we encountered
5196  //
5197 
5198  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5199 
5200  if (CurrentRun == FreeClusters) {
5201 
5202  if (FreeClusterCount) {
5203 
5204  *FreeClusterCount += ClustersThisRun;
5205  CurrentWindow->ClustersFree += ClustersThisRun;
5206  }
5207 
5208  if (BitMap) {
5209 
5210  RtlClearBits( BitMap,
5211  StartIndexOfThisRun - StartIndex,
5212  ClustersThisRun );
5213  }
5214 
5215  } else {
5216 
5217  if (BitMap) {
5218 
5219  RtlSetBits( BitMap,
5220  StartIndexOfThisRun - StartIndex,
5221  ClustersThisRun );
5222  }
5223  }
5224 
5225  //
5226  // And finish the last window if we are in setup.
5227  //
5228 
5229  if (SetupWindows) {
5230 
5231  CurrentWindow->LastCluster = FatIndex - 1;
5232  }
5233 
5234  //
5235  // Now switch the active window if required. We've succesfully gotten everything
5236  // nailed down.
5237  //
5238  // If we were tracking the free cluster count, this means we should update the
5239  // window. This is the case of FAT12/16 initialization.
5240  //
5241 
5242  if (SwitchToWindow) {
5243 
5244  if (Vcb->FreeClusterBitMap.Buffer) {
5245 
5246  ExFreePool( Vcb->FreeClusterBitMap.Buffer );
5247  }
5248 
5249  RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
5250  NewBitMapBuffer,
5251  EndIndex - StartIndex + 1 );
5252 
5253  NewBitMapBuffer = NULL;
5254 
5255  Vcb->CurrentWindow = SwitchToWindow;
5256  Vcb->ClusterHint = (ULONG)-1;
5257 
5258  if (FreeClusterCount) {
5259 
5260  NT_ASSERT( !SetupWindows );
5261 
5262  Vcb->CurrentWindow->ClustersFree = *FreeClusterCount;
5263  }
5264  }
5265 
5266  //
5267  // Make sure plausible things occured ...
5268  //
5269 
5270  if (!SetupWindows && BitMapBuffer == NULL) {
5271 
5273  }
5274 
5275  NT_ASSERT(Vcb->AllocationSupport.NumberOfFreeClusters <= Vcb->AllocationSupport.NumberOfClusters);
5276 
5277  } _SEH2_FINALLY {
5278 
5279  //
5280  // Unpin the last bcb and drop the temporary bitmap buffer if it exists.
5281  //
5282 
5283  FatUnpinBcb( IrpContext, Bcb);
5284 
5285  if (NewBitMapBuffer) {
5286 
5287  ExFreePool( NewBitMapBuffer );
5288  }
5289  } _SEH2_END;
5290 }
5291 
#define FAT_CLUSTER_LAST
Definition: fat.h:258
#define DbgDoit(X)
Definition: fatdata.h:336
IN PVCB IN ULONG AbsoluteClusterHint
Definition: fatprocs.h:343
#define FatFileAreaLbo(B)
Definition: fat.h:458
#define IN
Definition: typedefs.h:39
CLUSTER_TYPE FatInterpretClusterType(IN PVCB Vcb, IN FAT_ENTRY Entry)
Definition: allocsup.c:3471
#define FatLockFreeClusterBitMap(VCB)
Definition: allocsup.c:99
#define TRUE
Definition: types.h:120
#define MAX_CLUSTER_BITMAP_SIZE
Definition: allocsup.c:247
#define IRP_CONTEXT_FLAG_WAIT
Definition: cdstruc.h:1221
#define FatGetLboFromIndex(VCB, FAT_INDEX)
Definition: fat.h:559
#define STATUS_INSUFFICIENT_RESOURCES
Definition: udferr_usr.h:158
ULONG ClustersFree
Definition: fatstruc.h:174
struct png_info_def **typedef void(__cdecl typeof(png_destroy_read_struct))(struct png_struct_def **
Definition: typeof.h:49
IN PVCB IN VBO IN ULONG OUT PBCB * Bcb
Definition: fatprocs.h:411
#define ASSERT_CURRENT_WINDOW_GOOD(VCB)
Definition: allocsup.c:83
#define FCB_LOOKUP_ALLOCATIONSIZE_HINT
Definition: fatstruc.h:1240
PVOID NTAPI FsRtlAllocatePoolWithTag(IN POOL_TYPE PoolType, IN ULONG NumberOfBytes, IN ULONG Tag)
Definition: filter.c:229
#define FCB_STATE_ZERO_ON_DEALLOCATION
Definition: fatstruc.h:1223
#define DbgPrint
Definition: loader.c:25
#define FatMin(a, b)
Definition: allocsup.c:30
FSRTL_ADVANCED_FCB_HEADER Header
Definition: cdstruc.h:931
PIRP NTAPI IoBuildSynchronousFsdRequest(IN ULONG MajorFunction, IN PDEVICE_OBJECT DeviceObject, IN PVOID Buffer, IN ULONG Length, IN PLARGE_INTEGER StartingOffset, IN PKEVENT Event, IN PIO_STATUS_BLOCK IoStatusBlock)
Definition: irp.c:1069
#define FatIndexBitSize(B)
Definition: fat.h:515
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:323
Definition: cdstruc.h:908
#define BooleanFlagOn(F, SF)
Definition: ext2fs.h:183
NTSTATUS FatPrefetchPages(IN PIRP_CONTEXT IrpContext, IN PFILE_OBJECT FileObject, IN ULONG StartingPage, IN ULONG PageCount)
Definition: cachesup.c:1929
PMDL FatBuildZeroMdl(__in PIRP_CONTEXT IrpContext, __in ULONG Length)
Definition: deviosup.c:3734
NTSTATUS FreeClusters(PNTFS_VCB Vcb, PNTFS_ATTR_CONTEXT AttrContext, ULONG AttrOffset, PFILE_RECORD_HEADER FileRecord, ULONG ClustersToFree)
Definition: attrib.c:1057
GLuint GLuint GLsizei count
Definition: gl.h:1545
ULONG32 VBO
Definition: fat.h:38
unsigned char * PUCHAR
Definition: retypes.h:3
IN PVCB IN OUT PLARGE_MCB IN PLARGE_MCB SecondMcb
Definition: fatprocs.h:373
#define FCB_STATE_FLUSH_FAT
Definition: fatstruc.h:1196
_In_ CLIPOBJ _In_ BRUSHOBJ _In_ LONG x1
Definition: winddi.h:3706
LONG NTSTATUS
Definition: precomp.h:26
NTSYSAPI void WINAPI RtlInitializeBitMap(PRTL_BITMAP, PULONG, ULONG)
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
IN PFCB IN VBO OUT PLBO Lbo
Definition: fatprocs.h:306
#define TAG_FAT_WINDOW
Definition: nodetype.h:166
VOID FatReadVolumeFile(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN VBO StartingVbo, IN ULONG ByteCount, OUT PBCB *Bcb, OUT PVOID *Buffer)
Definition: cachesup.c:102
VOID NTAPI CcUnpinData(IN PVOID Bcb)
Definition: pinsup.c:955
#define FAT_DIRTY_VOLUME
Definition: fat.h:235
$ULONG LowPart
Definition: ntbasedef.h:576
ULONG FirstCluster
Definition: fatstruc.h:172
Definition: cdstruc.h:504
LBO * PLBO
Definition: fat.h:36
#define RtlCheckBit(BMH, BP)
Definition: rtlfuncs.h:3154
#define FAT_CLUSTER_BAD
Definition: fat.h:257
VOID FatExamineFatEntries(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartIndex OPTIONAL, IN ULONG EndIndex OPTIONAL, IN BOOLEAN SetupWindows, IN PFAT_WINDOW SwitchToWindow OPTIONAL, IN PULONG BitMapBuffer OPTIONAL)
Definition: allocsup.c:4718
static CC_FILE_SIZES FileSizes
BOOLEAN FatLookupLastMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, OUT PVBO Vbo, OUT PLBO Lbo, OUT PULONG Index OPTIONAL)
Definition: fsctrl.c:494
#define FatRaiseStatus(IRPCONTEXT, STATUS)
Definition: fatprocs.h:2974
#define MmGetSystemAddressForMdlSafe(_Mdl, _Priority)
NTSTATUS NTAPI KeWaitForSingleObject(IN PVOID Object, IN KWAIT_REASON WaitReason, IN KPROCESSOR_MODE WaitMode, IN BOOLEAN Alertable, IN PLARGE_INTEGER Timeout OPTIONAL)
Definition: wait.c:416
ULONG NTAPI FsRtlNumberOfRunsInLargeMcb(IN PLARGE_MCB Mcb)
Definition: largemcb.c:769
#define MAXCOUNTCLUS
#define MDL_MAPPED_TO_SYSTEM_VA
Definition: mmtypes.h:18
VOID FatSetupAllocationSupport(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
Definition: allocsup.c:357
VOID NTAPI FsRtlTruncateLargeMcb(IN PLARGE_MCB Mcb, IN LONGLONG Vbn)
Definition: largemcb.c:1016
#define FatVerifyIndexIsValid(IC, V, I)
Definition: fat.h:532
IN UCHAR Value
Definition: halp.h:394
LARGE_INTEGER FileSize
Definition: cctypes.h:16
_SEH2_TRY
Definition: create.c:4226
uint32_t ULONG_PTR
Definition: typedefs.h:64
BOOLEAN NTAPI ExAcquireResourceExclusiveLite(IN PERESOURCE Resource, IN BOOLEAN Wait)
Definition: resource.c:770
#define FatReservedBytes(B)
Definition: fat.h:414
INLINE ULONG FatSelectBestWindow(IN PVCB Vcb)
Definition: allocsup.c:277
#define FatUnreserveClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
Definition: allocsup.c:176
IN PFCB FcbOrDcb
Definition: fatprocs.h:306
_Must_inspect_result_ _In_ ULONG Flags
Definition: wsk.h:170
Definition: window.c:28
IN PFCB IN VBO OUT PLBO OUT PULONG OUT PBOOLEAN OUT PBOOLEAN EndOnMax
Definition: fatprocs.h:306
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
#define STATUS_FILE_CORRUPT_ERROR
Definition: udferr_usr.h:168
_In_ ULONG _In_ ULONG Offset
Definition: ntddpcm.h:101
ULONGLONG QuadPart
Definition: ms-dtyp.idl:185
#define VCB_STATE_FLAG_BAD_BLOCKS_POPULATED
Definition: fatstruc.h:575
BOOLEAN FatLookupMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, OUT PLBO Lbo, OUT PULONG ByteCount OPTIONAL, OUT PULONG Index OPTIONAL)
Definition: fsctrl.c:418
Definition: Header.h:8
LONGLONG LBO
Definition: fat.h:34
VOID NTAPI MmUnmapLockedPages(IN PVOID BaseAddress, IN PMDL Mdl)
Definition: mdlsup.c:841
#define CcIsFileCached(FO)
VOID FatRemoveMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN ULONG SectorCount)
Definition: fsctrl.c:599
#define FatBugCheck(A, B, C)
Definition: nodetype.h:104
#define FatBytesPerFat(B)
Definition: fat.h:410
#define _SEH2_GetExceptionInformation()
Definition: pseh2_64.h:11
VOID NTAPI FsRtlInitializeLargeMcb(IN PLARGE_MCB Mcb, IN POOL_TYPE PoolType)
Definition: largemcb.c:450
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:546
UCHAR FatLogOf(IN ULONG Value)
Definition: allocsup.c:4653
unsigned char BOOLEAN
IN PFCB IN VBO OUT PLBO OUT PULONG OUT PBOOLEAN Allocated
Definition: fatprocs.h:306
#define FatWindowOfCluster(C)
Definition: allocsup.c:253
smooth NULL
Definition: ftsmooth.c:416
#define _SEH2_AbnormalTermination()
Definition: pseh2_64.h:13
#define FatRootDirectorySize(B)
Definition: fat.h:427
#define FAT_CLEAN_VOLUME
Definition: fat.h:234
_At_(*)(_In_ PWSK_CLIENT Client, _In_opt_ PUNICODE_STRING NodeName, _In_opt_ PUNICODE_STRING ServiceName, _In_opt_ ULONG NameSpace, _In_opt_ GUID *Provider, _In_opt_ PADDRINFOEXW Hints, _Outptr_ PADDRINFOEXW *Result, _In_opt_ PEPROCESS OwningProcess, _In_opt_ PETHREAD OwningThread, _Inout_ PIRP Irp Result)(Mem)) NTSTATUS(WSKAPI *PFN_WSK_GET_ADDRESS_INFO
Definition: wsk.h:426
#define MDL_SOURCE_IS_NONPAGED_POOL
Definition: mmtypes.h:20
#define FatLookup12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:584
#define FatReserveClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
Definition: allocsup.c:197
Definition: bufpool.h:45
NTSYSAPI ULONG WINAPI RtlFindClearBits(PCRTL_BITMAP, ULONG, ULONG)
VOID FatSetFatRun(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartingFatIndex, IN ULONG ClusterCount, IN BOOLEAN ChainTogether)
Definition: allocsup.c:4137
VOID FatTearDownAllocationSupport(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
Definition: allocsup.c:547
_Inout_ PFILE_OBJECT FileObject
Definition: cdprocs.h:588
#define try_leave(S)
Definition: cdprocs.h:2180
#define FatGetIndexFromLbo(VCB, LBO)
Definition: fat.h:566
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255
#define DebugUnwind(X)
Definition: fatdata.h:315
#define FatAllocateClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
Definition: allocsup.c:158
FAT_DATA FatData
Definition: fatdata.c:56
$ULONG HighPart
Definition: ntbasedef.h:577
#define Dbg
Definition: allocsup.c:28
#define INLINE
Definition: rosdhcp.h:56
if(!(yy_init))
Definition: macro.lex.yy.c:714
VOID NTAPI CcUnpinRepinnedBcb(IN PVOID Bcb, IN BOOLEAN WriteThrough, OUT PIO_STATUS_BLOCK IoStatus)
Definition: cachesub.c:343
return Iosb
Definition: create.c:4402
enum _CLUSTER_TYPE CLUSTER_TYPE
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:32
#define STATUS_PENDING
Definition: ntstatus.h:82
#define try_return(S)
Definition: cdprocs.h:2179
#define COUNTSAVEDBCBS
#define ARGUMENT_PRESENT(ArgumentPointer)
#define FAT16_DIRTY_ENTRY
Definition: fat.h:248
_Requires_lock_held_(_Global_critical_region_)
Definition: allocsup.c:87
#define Vcb
Definition: cdprocs.h:1415
DWORD ClusterSize
Definition: format.c:67
static const UCHAR Index[8]
Definition: usbohci.c:18
IN PVCB IN OUT PLARGE_MCB IN VBO OUT PLARGE_MCB RemainingMcb
Definition: fatprocs.h:363
BOOLEAN FatGetNextMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN ULONG RunIndex, OUT PVBO Vbo, OUT PLBO Lbo, OUT PULONG ByteCount)
Definition: fsctrl.c:541
VOID FASTCALL ExReleaseResourceLite(IN PERESOURCE Resource)
Definition: resource.c:1817
* PFILE_OBJECT
Definition: iotypes.h:1957
ULONG LastCluster
Definition: fatstruc.h:173
VOID NTAPI IoFreeMdl(PMDL Mdl)
Definition: iomdl.c:146
int Window
Definition: x11stubs.h:26
BOOL WINAPI DECLSPEC_HOTPATCH ReleaseMutex(IN HANDLE hMutex)
Definition: synch.c:618
CD_MCB Mcb
Definition: cdstruc.h:1022
unsigned char UCHAR
Definition: xmlstorage.h:181
#define FAT32_DIRTY_ENTRY
Definition: fat.h:249
char * PBOOLEAN
Definition: retypes.h:11
IN PVCB IN ULONG IN OUT PULONG IN BOOLEAN ExactMatchRequired
Definition: fatprocs.h:343
Definition: fsck.fat.h:192
#define VOID
Definition: acefi.h:82
#define FatUnlockFreeClusterBitMap(VCB)
Definition: allocsup.c:112
LARGE_INTEGER ValidDataLength
Definition: cctypes.h:17
IN PVCB IN ULONG FatIndex
Definition: fatprocs.h:382
#define PAGE_SIZE
Definition: env_spec_w32.h:49
#define NOTHING
Definition: env_spec_w32.h:461
#define FlagOn(_F, _SF)
Definition: ext2fs.h:179
#define TAG_FAT_BITMAP
Definition: nodetype.h:163
__drv_aliasesMem FORCEINLINE PIO_STACK_LOCATION IoGetNextIrpStackLocation(_In_ PIRP Irp)
Definition: iofuncs.h:2647
#define FatIsFat32(VCB)
Definition: fatprocs.h:1446
IN PVCB IN PLARGE_MCB IN BOOLEAN ZeroOnDeallocate
Definition: fatprocs.h:354
ClearFlag(Dirent->Flags, DIRENT_FLAG_NOT_PERSISTENT)
#define VCB_STATE_FLAG_MOUNT_IN_PROGRESS
Definition: fatstruc.h:577
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _In_ LARGE_INTEGER ByteCount
Definition: iotypes.h:1063
ULONG Runs
Definition: symtest.c:7
BOOLEAN FatAddMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN LBO Lbo, IN ULONG SectorCount)
Definition: fsctrl.c:364
#define MAXULONG
Definition: typedefs.h:250
IN PVCB IN OUT PLARGE_MCB IN VBO SplitAtVbo
Definition: fatprocs.h:363
NTSYSAPI void WINAPI RtlClearBits(PRTL_BITMAP, ULONG, ULONG)
#define MAX_ZERO_MDL_SIZE
Definition: allocsup.c:2600
#define FatNumberOfClusters(B)
Definition: fat.h:482
VOID FatInitializeCacheMap(_In_ PFILE_OBJECT FileObject, _In_ PCC_FILE_SIZES FileSizes, _In_ BOOLEAN PinAccess, _In_ PCACHE_MANAGER_CALLBACKS Callbacks, _In_ PVOID LazyWriteContext)
Definition: cachesup.c:62
#define SetFlag(_F, _SF)
Definition: ext2fs.h:187
_SEH2_END
Definition: create.c:4400
#define FAT_DIRTY_BIT_INDEX
Definition: fat.h:237
#define KeInitializeEvent(pEvt, foo, foo2)
Definition: env_spec_w32.h:477
LARGE_INTEGER AllocationSize
Definition: cctypes.h:15
unsigned short USHORT
Definition: pedump.c:61
LARGE_INTEGER FatMaxLarge
Definition: fatdata.c:63
ULONG FatExceptionFilter(IN PIRP_CONTEXT IrpContext, IN PEXCEPTION_POINTERS ExceptionPointer)
Definition: fatdata.c:204
#define FAT12_DIRTY_ENTRY
Definition: fat.h:247
VOID NTAPI CcSetFileSizes(IN PFILE_OBJECT FileObject, IN PCC_FILE_SIZES FileSizes)
Definition: fssup.c:356
NTSYSAPI ULONG WINAPI RtlFindLongestRunClear(PCRTL_BITMAP, PULONG)
IN PVCB IN ULONG IN OUT PULONG IN BOOLEAN OUT PLARGE_MCB Mcb
Definition: fatprocs.h:343
NTSYSAPI void WINAPI RtlSetBits(PRTL_BITMAP, ULONG, ULONG)
_SEH2_FINALLY
Definition: create.c:4371
VOID NTAPI FsRtlUninitializeLargeMcb(IN PLARGE_MCB Mcb)
Definition: largemcb.c:1053
unsigned int * PULONG
Definition: retypes.h:1
#define min(a, b)
Definition: monoChain.cc:55
NTSTATUS NTAPI IoCallDriver(IN PDEVICE_OBJECT DeviceObject, IN PIRP Irp)
Definition: irp.c:1218
BOOLEAN NTAPI ExAcquireResourceSharedLite(IN PERESOURCE Resource, IN BOOLEAN Wait)
Definition: resource.c:885
#define FatSet12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:603
#define SL_WRITE_THROUGH
Definition: iotypes.h:1783
ULONG FirstClusterOfFile
Definition: fatstruc.h:817
ULONG32 FAT_ENTRY
Definition: fat.h:225
#define FAT_CLUSTER_RESERVED
Definition: fat.h:256
#define OUT
Definition: typedefs.h:40
#define STATUS_DISK_FULL
Definition: udferr_usr.h:155
#define IRP_CONTEXT_FLAG_DISABLE_DIRTY
Definition: fatstruc.h:1557
#define FAT_CLEAN_ENTRY
Definition: fat.h:245
#define FatFindFreeClusterRun(IRPCONTEXT, VCB, CLUSTER_COUNT, CLUSTER_HINT)
Definition: allocsup.c:229
struct tagContext Context
Definition: acpixf.h:1034
unsigned int ULONG
Definition: retypes.h:1
_In_ PFCB _In_ PDIRENT_ENUM_CONTEXT _Inout_ PDIRENT Dirent
Definition: cdprocs.h:424
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:261
#define ALIGN_UP_BY(size, align)
PVCB Vcb
Definition: cdstruc.h:939
#define FatRootDirectoryLbo(B)
Definition: fat.h:445
VOID NTAPI CcRepinBcb(IN PVOID Bcb)
Definition: cachesub.c:331
#define IRP_MJ_WRITE
Definition: rdpdr.c:47
NTSYSAPI BOOLEAN WINAPI RtlAreBitsClear(PCRTL_BITMAP, ULONG, ULONG)
#define FAT32_ENTRY_MASK
Definition: fat.h:227
#define _SEH2_EXCEPT(...)
Definition: pseh2_64.h:6
#define _SEH2_GetExceptionCode()
Definition: pseh2_64.h:12
_In_ ULONG SectorSize
Definition: halfuncs.h:291
VOID NTAPI KeClearEvent(IN PKEVENT Event)
Definition: eventobj.c:22
return STATUS_SUCCESS
Definition: btrfs.c:3014
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:382
NTSYSAPI ULONG WINAPI RtlNumberOfClearBits(PCRTL_BITMAP)
#define UInt32x32To64(a, b)
Definition: intsafe.h:258
ULONG FcbState
Definition: cdstruc.h:977
unsigned short * PUSHORT
Definition: retypes.h:2
IN PFCB IN VBO Vbo
Definition: fatprocs.h:306
base of all file and directory entries
Definition: entries.h:82
#define ExFreePool(addr)
Definition: env_spec_w32.h:352
LONGLONG QuadPart
Definition: typedefs.h:113
#define _Analysis_assume_(expr)
Definition: no_sal2.h:10
VOID FatLookupFatEntry(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG FatIndex, IN OUT PULONG FatEntry, IN OUT PFAT_ENUMERATION_CONTEXT Context)
Definition: allocsup.c:3563
#define FAT_PREFETCH_PAGE_COUNT
Definition: allocsup.c:36
#define FatBytesPerCluster(B)
Definition: fat.h:408
#define PAGED_CODE()
IN BOOLEAN Wait
Definition: fatprocs.h:1538
#define FatFreeClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
Definition: allocsup.c:140
#define NT_ASSERT
Definition: rtlfuncs.h:3312
PULONG MinorVersion OPTIONAL
Definition: CrossNt.h:68
FCB_CONDITION FcbCondition
Definition: fatstruc.h:849
CACHE_MANAGER_CALLBACKS CacheManagerNoOpCallbacks
Definition: fatstruc.h:159