ReactOS  0.4.15-dev-1033-gd7d716a
allocsup.c File Reference
#include "fatprocs.h"
Include dependency graph for allocsup.c:

Go to the source code of this file.

Macros

#define BugCheckFileId   (FAT_BUG_CHECK_ALLOCSUP)
 
#define Dbg   (DEBUG_TRACE_ALLOCSUP)
 
#define FatMin(a, b)   ((a) < (b) ? (a) : (b))
 
#define FAT_PREFETCH_PAGE_COUNT   0x100
 
#define ASSERT_CURRENT_WINDOW_GOOD(VCB)
 
#define FatLockFreeClusterBitMap(VCB)
 
#define FatUnlockFreeClusterBitMap(VCB)
 
#define FatIsClusterFree(IRPCONTEXT, VCB, FAT_INDEX)   (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)
 
#define FatFreeClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatAllocateClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatUnreserveClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatReserveClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatFindFreeClusterRun(IRPCONTEXT, VCB, CLUSTER_COUNT, CLUSTER_HINT)
 
#define MAX_CLUSTER_BITMAP_SIZE   (1 << 16)
 
#define FatWindowOfCluster(C)   (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)
 
#define MAX_ZERO_MDL_SIZE   (1*1024*1024)
 
#define MAXCOUNTCLUS   0x10000
 
#define COUNTSAVEDBCBS   ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
 

Functions

VOID FatLookupFatEntry (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG FatIndex, IN OUT PULONG FatEntry, IN OUT PFAT_ENUMERATION_CONTEXT Context)
 
VOID FatSetFatRun (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartingFatIndex, IN ULONG ClusterCount, IN BOOLEAN ChainTogether)
 
UCHAR FatLogOf (IN ULONG Value)
 
INLINE ULONG FatSelectBestWindow (IN PVCB Vcb)
 
VOID FatSetupAllocationSupport (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
 
VOID FatTearDownAllocationSupport (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
 
 _Requires_lock_held_ (_Global_critical_region_)
 
CLUSTER_TYPE FatInterpretClusterType (IN PVCB Vcb, IN FAT_ENTRY Entry)
 
VOID FatExamineFatEntries (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartIndex OPTIONAL, IN ULONG EndIndex OPTIONAL, IN BOOLEAN SetupWindows, IN PFAT_WINDOW SwitchToWindow OPTIONAL, IN PULONG BitMapBuffer OPTIONAL)
 

Macro Definition Documentation

◆ ASSERT_CURRENT_WINDOW_GOOD

#define ASSERT_CURRENT_WINDOW_GOOD (   VCB)

Definition at line 83 of file allocsup.c.

◆ BugCheckFileId

#define BugCheckFileId   (FAT_BUG_CHECK_ALLOCSUP)

Definition at line 22 of file allocsup.c.

◆ COUNTSAVEDBCBS

#define COUNTSAVEDBCBS   ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)

◆ Dbg

#define Dbg   (DEBUG_TRACE_ALLOCSUP)

Definition at line 28 of file allocsup.c.

◆ FAT_PREFETCH_PAGE_COUNT

#define FAT_PREFETCH_PAGE_COUNT   0x100

Definition at line 36 of file allocsup.c.

◆ FatAllocateClusters

#define FatAllocateClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
if ((CLUSTER_COUNT) == 1) { \
FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \
} else { \
FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \
} \
}
#define FAT_CLUSTER_LAST
Definition: fat.h:258
#define TRUE
Definition: types.h:120
Definition: cdstruc.h:504

Definition at line 158 of file allocsup.c.

◆ FatFindFreeClusterRun

#define FatFindFreeClusterRun (   IRPCONTEXT,
  VCB,
  CLUSTER_COUNT,
  CLUSTER_HINT 
)
Value:
( \
(CLUSTER_COUNT == 1) && \
FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \
(CLUSTER_HINT) : \
RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
(CLUSTER_COUNT), \
(CLUSTER_HINT) - 2) + 2 \
)
Definition: cdstruc.h:504
#define FatIsClusterFree(IRPCONTEXT, VCB, FAT_INDEX)
Definition: allocsup.c:127
struct _VCB VCB

Definition at line 229 of file allocsup.c.

◆ FatFreeClusters

#define FatFreeClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
if ((CLUSTER_COUNT) == 1) { \
FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \
} else { \
FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \
} \
}
Definition: cdstruc.h:504
#define FALSE
Definition: types.h:117
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255

Definition at line 140 of file allocsup.c.

◆ FatIsClusterFree

#define FatIsClusterFree (   IRPCONTEXT,
  VCB,
  FAT_INDEX 
)    (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)

Definition at line 127 of file allocsup.c.

◆ FatLockFreeClusterBitMap

#define FatLockFreeClusterBitMap (   VCB)
Value:
{ \
NT_ASSERT(KeAreApcsDisabled()); \
ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
ASSERT_CURRENT_WINDOW_GOOD(VCB) \
}
Definition: cdstruc.h:504
BOOLEAN NTAPI KeAreApcsDisabled(VOID)
Definition: apc.c:958

Definition at line 99 of file allocsup.c.

◆ FatMin

#define FatMin (   a,
  b 
)    ((a) < (b) ? (a) : (b))

Definition at line 30 of file allocsup.c.

◆ FatReserveClusters

#define FatReserveClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \
NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
NT_ASSERT( (FAT_INDEX) >= 2); \
RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \
_AfterRun = 2; \
} \
if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \
(VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \
if (1 == (VCB)->ClusterHint) { \
(VCB)->ClusterHint = 2; \
} \
} \
else { \
(VCB)->ClusterHint = _AfterRun; \
} \
}
Definition: cdstruc.h:504
#define RtlCheckBit(BMH, BP)
Definition: rtlfuncs.h:3154
struct _VCB VCB
NTSYSAPI ULONG WINAPI RtlFindClearBits(PCRTL_BITMAP, ULONG, ULONG)
if(!(yy_init))
Definition: macro.lex.yy.c:714

Definition at line 197 of file allocsup.c.

◆ FatUnlockFreeClusterBitMap

#define FatUnlockFreeClusterBitMap (   VCB)
Value:
{ \
ASSERT_CURRENT_WINDOW_GOOD(VCB) \
NT_ASSERT(KeAreApcsDisabled()); \
ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
}
Definition: cdstruc.h:504
BOOLEAN NTAPI KeAreApcsDisabled(VOID)
Definition: apc.c:958

Definition at line 112 of file allocsup.c.

◆ FatUnreserveClusters

#define FatUnreserveClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
NT_ASSERT( (FAT_INDEX) >= 2); \
RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
if ((FAT_INDEX) < (VCB)->ClusterHint) { \
(VCB)->ClusterHint = (FAT_INDEX); \
} \
}
Definition: cdstruc.h:504
struct _VCB VCB

Definition at line 176 of file allocsup.c.

◆ FatWindowOfCluster

#define FatWindowOfCluster (   C)    (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)

Definition at line 253 of file allocsup.c.

◆ MAX_CLUSTER_BITMAP_SIZE

#define MAX_CLUSTER_BITMAP_SIZE   (1 << 16)

Definition at line 247 of file allocsup.c.

◆ MAX_ZERO_MDL_SIZE

#define MAX_ZERO_MDL_SIZE   (1*1024*1024)

Definition at line 2600 of file allocsup.c.

◆ MAXCOUNTCLUS

#define MAXCOUNTCLUS   0x10000

Function Documentation

◆ _Requires_lock_held_()

_Requires_lock_held_ ( _Global_critical_region_  )

Definition at line 615 of file allocsup.c.

658 {
659  VBO CurrentVbo;
660  LBO CurrentLbo;
661  LBO PriorLbo;
662 
663  VBO FirstVboOfCurrentRun = 0;
664  LBO FirstLboOfCurrentRun;
665 
666  BOOLEAN LastCluster;
667  ULONG Runs;
668 
669  PVCB Vcb;
671  ULONG BytesPerCluster;
672  ULARGE_INTEGER BytesOnVolume;
673 
675 
676  PAGED_CODE();
677 
678  Vcb = FcbOrDcb->Vcb;
679 
680 
681  DebugTrace(+1, Dbg, "FatLookupFileAllocation\n", 0);
682  DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
683  DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo);
684  DebugTrace( 0, Dbg, " pLbo = %8lx\n", Lbo);
685  DebugTrace( 0, Dbg, " pByteCount = %8lx\n", ByteCount);
686  DebugTrace( 0, Dbg, " pAllocated = %8lx\n", Allocated);
687 
688  Context.Bcb = NULL;
689 
690  *EndOnMax = FALSE;
691 
692  //
693  // Check the trivial case that the mapping is already in our
694  // Mcb.
695  //
696 
698 
699  *Allocated = TRUE;
700 
701  NT_ASSERT( *ByteCount != 0 );
702 
703  //
704  // Detect the overflow case, trim and claim the condition.
705  //
706 
707  if (Vbo + *ByteCount == 0) {
708 
709  *EndOnMax = TRUE;
710  }
711 
712  DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0);
713  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
714  return;
715  }
716 
717  //
718  // Initialize the Vcb, the cluster size, LastCluster, and
719  // FirstLboOfCurrentRun (to be used as an indication of the first
720  // iteration through the following while loop).
721  //
722 
723  BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
724 
725  BytesOnVolume.QuadPart = UInt32x32To64( Vcb->AllocationSupport.NumberOfClusters, BytesPerCluster );
726 
727  LastCluster = FALSE;
728  FirstLboOfCurrentRun = 0;
729 
730  //
731  // Discard the case that the request extends beyond the end of
732  // allocation. Note that if the allocation size if not known
733  // AllocationSize is set to 0xffffffff.
734  //
735 
736  if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) {
737 
738  *Allocated = FALSE;
739 
740  DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0);
741  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
742  return;
743  }
744 
745  //
746  // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
747  // and FatEntry to describe the beginning of the last entry in the Mcb.
748  // This is used as initialization for the following loop.
749  //
750  // If the Mcb was empty, we start at the beginning of the file with
751  // CurrentVbo set to 0 to indicate a new run.
752  //
753 
754  if (FatLookupLastMcbEntry( Vcb, &FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo, &Runs )) {
755 
756  DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1);
757 
758  CurrentVbo -= (BytesPerCluster - 1);
759  CurrentLbo -= (BytesPerCluster - 1);
760 
761  //
762  // Convert an index to a count.
763  //
764 
765  Runs += 1;
766 
767  } else {
768 
769  DebugTrace( 0, Dbg, "Mcb empty.\n", 0);
770 
771  //
772  // Check for an FcbOrDcb that has no allocation
773  //
774 
775  if (FcbOrDcb->FirstClusterOfFile == 0) {
776 
777  *Allocated = FALSE;
778 
779  DebugTrace( 0, Dbg, "File has no allocation.\n", 0);
780  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
781  return;
782 
783  } else {
784 
785  CurrentVbo = 0;
787  FirstVboOfCurrentRun = CurrentVbo;
788  FirstLboOfCurrentRun = CurrentLbo;
789 
790  Runs = 0;
791 
792  DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo);
793  }
794  }
795 
796  //
797  // Now we know that we are looking up a valid Vbo, but it is
798  // not in the Mcb, which is a monotonically increasing list of
799  // Vbo's. Thus we have to go to the Fat, and update
800  // the Mcb as we go. We use a try-finally to unpin the page
801  // of fat hanging around. Also we mark *Allocated = FALSE, so that
802  // the caller wont try to use the data if we hit an exception.
803  //
804 
805  *Allocated = FALSE;
806 
807  _SEH2_TRY {
808 
809  FatEntry = (FAT_ENTRY)FatGetIndexFromLbo( Vcb, CurrentLbo );
810 
811  //
812  // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
813  // The assumption here, is that only whole clusters of Vbos and Lbos
814  // are mapped in the Mcb.
815  //
816 
817  NT_ASSERT( ((CurrentLbo - Vcb->AllocationSupport.FileAreaLbo)
818  % BytesPerCluster == 0) &&
819  (CurrentVbo % BytesPerCluster == 0) );
820 
821  //
822  // Starting from the first Vbo after the last Mcb entry, scan through
823  // the Fat looking for our Vbo. We continue through the Fat until we
824  // hit a noncontiguity beyond the desired Vbo, or the last cluster.
825  //
826 
827  while ( !LastCluster ) {
828 
829  //
830  // Get the next fat entry, and update our Current variables.
831  //
832 
833  FatLookupFatEntry( IrpContext, Vcb, FatEntry, (PULONG)&FatEntry, &Context );
834 
835  PriorLbo = CurrentLbo;
836  CurrentLbo = FatGetLboFromIndex( Vcb, FatEntry );
837  CurrentVbo += BytesPerCluster;
838 
839  switch ( FatInterpretClusterType( Vcb, FatEntry )) {
840 
841  //
842  // Check for a break in the Fat allocation chain.
843  //
844 
845  case FatClusterAvailable:
846  case FatClusterReserved:
847  case FatClusterBad:
848 
849  DebugTrace( 0, Dbg, "Break in allocation chain, entry = %d\n", FatEntry);
850  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0);
851 
852  FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
854  break;
855 
856  //
857  // If this is the last cluster, we must update the Mcb and
858  // exit the loop.
859  //
860 
861  case FatClusterLast:
862 
863  //
864  // Assert we know where the current run started. If the
865  // Mcb was empty when we were called, thenFirstLboOfCurrentRun
866  // was set to the start of the file. If the Mcb contained an
867  // entry, then FirstLboOfCurrentRun was set on the first
868  // iteration through the loop. Thus if FirstLboOfCurrentRun
869  // is 0, then there was an Mcb entry and we are on our first
870  // iteration, meaing that the last cluster in the Mcb was
871  // really the last allocated cluster, but we checked Vbo
872  // against AllocationSize, and found it OK, thus AllocationSize
873  // must be too large.
874  //
875  // Note that, when we finally arrive here, CurrentVbo is actually
876  // the first Vbo beyond the file allocation and CurrentLbo is
877  // meaningless.
878  //
879 
880  DebugTrace( 0, Dbg, "Read last cluster of file.\n", 0);
881 
882  //
883  // Detect the case of the maximal file. Note that this really isn't
884  // a proper Vbo - those are zero-based, and this is a one-based number.
885  // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of
886  // 2^32 - 2.
887  //
888  // Just so we don't get confused here.
889  //
890 
891  if (CurrentVbo == 0) {
892 
893  *EndOnMax = TRUE;
894  CurrentVbo -= 1;
895  }
896 
897  LastCluster = TRUE;
898 
899  if (FirstLboOfCurrentRun != 0 ) {
900 
901  DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
902  DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
903  DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
904  DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
905 
907  &FcbOrDcb->Mcb,
908  FirstVboOfCurrentRun,
909  FirstLboOfCurrentRun,
910  CurrentVbo - FirstVboOfCurrentRun );
911 
912  Runs += 1;
913  }
914 
915  //
916  // Being at the end of allocation, make sure we have found
917  // the Vbo. If we haven't, seeing as we checked VBO
918  // against AllocationSize, the real disk allocation is less
919  // than that of AllocationSize. This comes about when the
920  // real allocation is not yet known, and AllocaitonSize
921  // contains MAXULONG.
922  //
923  // KLUDGE! - If we were called by FatLookupFileAllocationSize
924  // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup
925  // hint. Thus we merrily go along looking for a match that isn't
926  // there, but in the meantime building an Mcb. If this is
927  // the case, fill in AllocationSize and return.
928  //
929 
930  if ( Vbo == MAXULONG - 1 ) {
931 
932  *Allocated = FALSE;
933 
934  FcbOrDcb->Header.AllocationSize.QuadPart = CurrentVbo;
935 
936  DebugTrace( 0, Dbg, "New file allocation size = %08lx.\n", CurrentVbo);
937  try_return ( NOTHING );
938  }
939 
940  //
941  // We will lie ever so slightly if we really terminated on the
942  // maximal byte of a file. It is really allocated.
943  //
944 
945  if (Vbo >= CurrentVbo && !*EndOnMax) {
946 
947  *Allocated = FALSE;
948  try_return ( NOTHING );
949  }
950 
951  break;
952 
953  //
954  // This is a continuation in the chain. If the run has a
955  // discontiguity at this point, update the Mcb, and if we are beyond
956  // the desired Vbo, this is the end of the run, so set LastCluster
957  // and exit the loop.
958  //
959 
960  case FatClusterNext:
961 
962  //
963  // This is the loop check. The Vbo must not be bigger than the size of
964  // the volume, and the Vbo must not have a) wrapped and b) not been at the
965  // very last cluster in the chain, for the case of the maximal file.
966  //
967 
968  if ( CurrentVbo == 0 ||
969  (BytesOnVolume.HighPart == 0 && CurrentVbo > BytesOnVolume.LowPart)) {
970 
971  FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
973  }
974 
975  if ( PriorLbo + BytesPerCluster != CurrentLbo ) {
976 
977  //
978  // Note that on the first time through the loop
979  // (FirstLboOfCurrentRun == 0), we don't add the
980  // run to the Mcb since it curresponds to the last
981  // run already stored in the Mcb.
982  //
983 
984  if ( FirstLboOfCurrentRun != 0 ) {
985 
986  DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
987  DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
988  DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
989  DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
990 
992  &FcbOrDcb->Mcb,
993  FirstVboOfCurrentRun,
994  FirstLboOfCurrentRun,
995  CurrentVbo - FirstVboOfCurrentRun );
996 
997  Runs += 1;
998  }
999 
1000  //
1001  // Since we are at a run boundry, with CurrentLbo and
1002  // CurrentVbo being the first cluster of the next run,
1003  // we see if the run we just added encompases the desired
1004  // Vbo, and if so exit. Otherwise we set up two new
1005  // First*boOfCurrentRun, and continue.
1006  //
1007 
1008  if (CurrentVbo > Vbo) {
1009 
1010  LastCluster = TRUE;
1011 
1012  } else {
1013 
1014  FirstVboOfCurrentRun = CurrentVbo;
1015  FirstLboOfCurrentRun = CurrentLbo;
1016  }
1017  }
1018  break;
1019 
1020  default:
1021 
1022  DebugTrace(0, Dbg, "Illegal Cluster Type.\n", FatEntry);
1023 
1024 #ifdef _MSC_VER
1025 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
1026 #endif
1027  FatBugCheck( 0, 0, 0 );
1028 
1029  break;
1030 
1031  } // switch()
1032  } // while()
1033 
1034  //
1035  // Load up the return parameters.
1036  //
1037  // On exit from the loop, Vbo still contains the desired Vbo, and
1038  // CurrentVbo is the first byte after the run that contained the
1039  // desired Vbo.
1040  //
1041 
1042  *Allocated = TRUE;
1043 
1044  *Lbo = FirstLboOfCurrentRun + (Vbo - FirstVboOfCurrentRun);
1045 
1046  *ByteCount = CurrentVbo - Vbo;
1047 
1048  if (ARGUMENT_PRESENT(Index)) {
1049 
1050  //
1051  // Note that Runs only needs to be accurate with respect to where we
1052  // ended. Since partial-lookup cases will occur without exclusive
1053  // synchronization, the Mcb itself may be much bigger by now.
1054  //
1055 
1056  *Index = Runs - 1;
1057  }
1058 
1059  try_exit: NOTHING;
1060 
1061  } _SEH2_FINALLY {
1062 
1063  DebugUnwind( FatLookupFileAllocation );
1064 
1065  //
1066  // We are done reading the Fat, so unpin the last page of fat
1067  // that is hanging around
1068  //
1069 
1070  FatUnpinBcb( IrpContext, Context.Bcb );
1071 
1072  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
1073  } _SEH2_END;
1074 
1075  return;
1076 }
CLUSTER_TYPE FatInterpretClusterType(IN PVCB Vcb, IN FAT_ENTRY Entry)
Definition: allocsup.c:3471
#define FatGetLboFromIndex(VCB, FAT_INDEX)
Definition: fat.h:559
FSRTL_ADVANCED_FCB_HEADER Header
Definition: cdstruc.h:931
#define TRUE
Definition: types.h:120
ULONG32 VBO
Definition: fat.h:38
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
IN PFCB IN VBO OUT PLBO Lbo
Definition: fatprocs.h:306
$ULONG LowPart
Definition: ntbasedef.h:570
Definition: cdstruc.h:504
BOOLEAN FatLookupLastMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, OUT PVBO Vbo, OUT PLBO Lbo, OUT PULONG Index OPTIONAL)
Definition: fsctrl.c:494
#define FatRaiseStatus(IRPCONTEXT, STATUS)
Definition: fatprocs.h:2974
_SEH2_TRY
Definition: create.c:4226
IN PFCB FcbOrDcb
Definition: fatprocs.h:306
IN PFCB IN VBO OUT PLBO OUT PULONG OUT PBOOLEAN OUT PBOOLEAN EndOnMax
Definition: fatprocs.h:306
#define STATUS_FILE_CORRUPT_ERROR
Definition: udferr_usr.h:168
ULONGLONG QuadPart
Definition: ms-dtyp.idl:185
BOOLEAN FatLookupMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, OUT PLBO Lbo, OUT PULONG ByteCount OPTIONAL, OUT PULONG Index OPTIONAL)
Definition: fsctrl.c:418
#define FALSE
Definition: types.h:117
LONGLONG LBO
Definition: fat.h:34
#define FatBugCheck(A, B, C)
Definition: nodetype.h:104
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:546
unsigned char BOOLEAN
IN PFCB IN VBO OUT PLBO OUT PULONG OUT PBOOLEAN Allocated
Definition: fatprocs.h:306
smooth NULL
Definition: ftsmooth.c:416
#define FatGetIndexFromLbo(VCB, LBO)
Definition: fat.h:566
#define DebugUnwind(X)
Definition: fatdata.h:315
$ULONG HighPart
Definition: ntbasedef.h:571
#define Dbg
Definition: allocsup.c:28
#define try_return(S)
Definition: cdprocs.h:2179
#define ARGUMENT_PRESENT(ArgumentPointer)
#define Vcb
Definition: cdprocs.h:1415
static const UCHAR Index[8]
Definition: usbohci.c:18
CD_MCB Mcb
Definition: cdstruc.h:1022
Definition: fsck.fat.h:192
#define VOID
Definition: acefi.h:82
#define NOTHING
Definition: env_spec_w32.h:461
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _In_ LARGE_INTEGER ByteCount
Definition: iotypes.h:1065
ULONG Runs
Definition: symtest.c:7
BOOLEAN FatAddMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN LBO Lbo, IN ULONG SectorCount)
Definition: fsctrl.c:364
#define MAXULONG
Definition: typedefs.h:251
_SEH2_END
Definition: create.c:4400
_SEH2_FINALLY
Definition: create.c:4371
unsigned int * PULONG
Definition: retypes.h:1
ULONG FirstClusterOfFile
Definition: fatstruc.h:817
ULONG32 FAT_ENTRY
Definition: fat.h:225
struct tagContext Context
Definition: acpixf.h:1034
unsigned int ULONG
Definition: retypes.h:1
PVCB Vcb
Definition: cdstruc.h:939
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:382
#define UInt32x32To64(a, b)
Definition: intsafe.h:250
IN PFCB IN VBO Vbo
Definition: fatprocs.h:306
VOID FatLookupFatEntry(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG FatIndex, IN OUT PULONG FatEntry, IN OUT PFAT_ENUMERATION_CONTEXT Context)
Definition: allocsup.c:3563
#define PAGED_CODE()
#define NT_ASSERT
Definition: rtlfuncs.h:3312

◆ FatExamineFatEntries()

VOID FatExamineFatEntries ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb,
IN ULONG StartIndex  OPTIONAL,
IN ULONG EndIndex  OPTIONAL,
IN BOOLEAN  SetupWindows,
IN PFAT_WINDOW SwitchToWindow  OPTIONAL,
IN PULONG BitMapBuffer  OPTIONAL 
)

Definition at line 4718 of file allocsup.c.

4768 {
4770  ULONG Page = 0;
4771  ULONG Offset = 0;
4772  ULONG FatIndex;
4774  FAT_ENTRY FirstFatEntry = FAT_CLUSTER_AVAILABLE;
4775  PUSHORT FatBuffer;
4776  PVOID pv;
4777  PBCB Bcb = NULL;
4778  ULONG EntriesPerWindow;
4779 
4780  ULONG ClustersThisRun;
4781  ULONG StartIndexOfThisRun;
4782 
4783  PULONG FreeClusterCount = NULL;
4784 
4785  PFAT_WINDOW CurrentWindow = NULL;
4786 
4787  PVOID NewBitMapBuffer = NULL;
4788  PRTL_BITMAP BitMap = NULL;
4789  RTL_BITMAP PrivateBitMap;
4790 
4791  ULONG ClusterSize = 0;
4792  ULONG PrefetchPages = 0;
4793  ULONG FatPages = 0;
4794 
4795  VBO BadClusterVbo = 0;
4796  LBO Lbo = 0;
4797 
4798  enum RunType {
4799  FreeClusters,
4800  AllocatedClusters,
4801  UnknownClusters
4802  } CurrentRun;
4803 
4804  PAGED_CODE();
4805 
4806  //
4807  // Now assert correct usage.
4808  //
4809 
4810  FatIndexBitSize = Vcb->AllocationSupport.FatIndexBitSize;
4811 
4812  NT_ASSERT( !(SetupWindows && (SwitchToWindow || BitMapBuffer)));
4813  NT_ASSERT( !(SetupWindows && FatIndexBitSize != 32));
4814 
4815  if (Vcb->NumberOfWindows > 1) {
4816 
4817  //
4818  // FAT32: Calculate the number of FAT entries covered by a window. This is
4819  // equal to the number of bits in the freespace bitmap, the size of which
4820  // is hardcoded.
4821  //
4822 
4823  EntriesPerWindow = MAX_CLUSTER_BITMAP_SIZE;
4824 
4825  } else {
4826 
4827  EntriesPerWindow = Vcb->AllocationSupport.NumberOfClusters;
4828  }
4829 
4830  //
4831  // We will also fill in the cumulative count of free clusters for
4832  // the entire volume. If this is not appropriate, NULL it out
4833  // shortly.
4834  //
4835 
4836  FreeClusterCount = &Vcb->AllocationSupport.NumberOfFreeClusters;
4837 
4838  if (SetupWindows) {
4839 
4840  NT_ASSERT(BitMapBuffer == NULL);
4841 
4842  //
4843  // In this case we're just supposed to scan the fat and set up
4844  // the information regarding where the buckets fall and how many
4845  // free clusters are in each.
4846  //
4847  // It is fine to monkey with the real windows, we must be able
4848  // to do this to activate the volume.
4849  //
4850 
4851  BitMap = NULL;
4852 
4853  CurrentWindow = &Vcb->Windows[0];
4854  CurrentWindow->FirstCluster = StartIndex;
4855  CurrentWindow->ClustersFree = 0;
4856 
4857  //
4858  // We always wish to calculate total free clusters when
4859  // setting up the FAT windows.
4860  //
4861 
4862  } else if (BitMapBuffer == NULL) {
4863 
4864  //
4865  // We will be filling in the free cluster bitmap for the volume.
4866  // Careful, we can raise out of here and be hopelessly hosed if
4867  // we built this up in the main bitmap/window itself.
4868  //
4869  // For simplicity's sake, we'll do the swap for everyone. FAT32
4870  // provokes the need since we can't tolerate partial results
4871  // when switching windows.
4872  //
4873 
4874  NT_ASSERT( SwitchToWindow );
4875 
4876  CurrentWindow = SwitchToWindow;
4877  StartIndex = CurrentWindow->FirstCluster;
4878  EndIndex = CurrentWindow->LastCluster;
4879 
4880  BitMap = &PrivateBitMap;
4881  NewBitMapBuffer = FsRtlAllocatePoolWithTag( PagedPool,
4882  (EntriesPerWindow + 7) / 8,
4883  TAG_FAT_BITMAP );
4884 
4885  RtlInitializeBitMap( &PrivateBitMap,
4886  NewBitMapBuffer,
4887  EndIndex - StartIndex + 1);
4888 
4889  if ((FatIndexBitSize == 32) &&
4890  (Vcb->NumberOfWindows > 1)) {
4891 
4892  //
4893  // We do not wish count total clusters here.
4894  //
4895 
4896  FreeClusterCount = NULL;
4897 
4898  }
4899 
4900  } else {
4901 
4902  BitMap = &PrivateBitMap;
4903  RtlInitializeBitMap(&PrivateBitMap,
4904  BitMapBuffer,
4905  EndIndex - StartIndex + 1);
4906 
4907  //
4908  // We do not count total clusters here.
4909  //
4910 
4911  FreeClusterCount = NULL;
4912  }
4913 
4914  //
4915  // Now, our start index better be in the file heap.
4916  //
4917 
4918  NT_ASSERT( StartIndex >= 2 );
4919 
4920  _SEH2_TRY {
4921 
4922  //
4923  // Pick up the initial chunk of the FAT and first entry.
4924  //
4925 
4926  if (FatIndexBitSize == 12) {
4927 
4928  //
4929  // We read in the entire fat in the 12 bit case.
4930  //
4931 
4932  FatReadVolumeFile( IrpContext,
4933  Vcb,
4934  FatReservedBytes( &Vcb->Bpb ),
4935  FatBytesPerFat( &Vcb->Bpb ),
4936  &Bcb,
4937  (PVOID *)&FatBuffer );
4938 
4939  FatLookup12BitEntry(FatBuffer, 0, &FirstFatEntry);
4940 
4941  } else {
4942 
4943  //
4944  // Read in one page of fat at a time. We cannot read in the
4945  // all of the fat we need because of cache manager limitations.
4946  //
4947 
4948  ULONG BytesPerEntry = FatIndexBitSize >> 3;
4949 
4950  FatPages = (FatReservedBytes(&Vcb->Bpb) + FatBytesPerFat(&Vcb->Bpb) + (PAGE_SIZE - 1)) / PAGE_SIZE;
4951  Page = (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) / PAGE_SIZE;
4952 
4953  Offset = Page * PAGE_SIZE;
4954 
4955  //
4956  // Prefetch the FAT entries in memory for optimal performance.
4957  //
4958 
4959  PrefetchPages = FatPages - Page;
4960 
4961  if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
4962 
4963  PrefetchPages = ALIGN_UP_BY(Page, FAT_PREFETCH_PAGE_COUNT) - Page;
4964  }
4965 
4966 #if (NTDDI_VERSION >= NTDDI_WIN8)
4967  FatPrefetchPages( IrpContext,
4968  Vcb->VirtualVolumeFile,
4969  Page,
4970  PrefetchPages );
4971 #endif
4972 
4973  FatReadVolumeFile( IrpContext,
4974  Vcb,
4975  Offset,
4976  PAGE_SIZE,
4977  &Bcb,
4978  &pv);
4979 
4980  if (FatIndexBitSize == 32) {
4981 
4982  FatBuffer = (PUSHORT)((PUCHAR)pv +
4983  (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) %
4984  PAGE_SIZE);
4985 
4986  FirstFatEntry = *((PULONG)FatBuffer);
4987  FirstFatEntry = FirstFatEntry & FAT32_ENTRY_MASK;
4988 
4989  } else {
4990 
4991  FatBuffer = (PUSHORT)((PUCHAR)pv +
4992  FatReservedBytes(&Vcb->Bpb) % PAGE_SIZE) + 2;
4993 
4994  FirstFatEntry = *FatBuffer;
4995  }
4996 
4997  }
4998 
4999  ClusterSize = 1 << (Vcb->AllocationSupport.LogOfBytesPerCluster);
5000 
5001  CurrentRun = (FirstFatEntry == FAT_CLUSTER_AVAILABLE) ?
5002  FreeClusters : AllocatedClusters;
5003 
5004  StartIndexOfThisRun = StartIndex;
5005 
5006  for (FatIndex = StartIndex; FatIndex <= EndIndex; FatIndex++) {
5007 
5008  if (FatIndexBitSize == 12) {
5009 
5010  FatLookup12BitEntry(FatBuffer, FatIndex, &FatEntry);
5011 
5012  } else {
5013 
5014  //
5015  // If we are setting up the FAT32 windows and have stepped into a new
5016  // bucket, finalize this one and move forward.
5017  //
5018 
5019  if (SetupWindows &&
5020  FatIndex > StartIndex &&
5021  (FatIndex - 2) % EntriesPerWindow == 0) {
5022 
5023  CurrentWindow->LastCluster = FatIndex - 1;
5024 
5025  if (CurrentRun == FreeClusters) {
5026 
5027  //
5028  // We must be counting clusters in order to modify the
5029  // contents of the window.
5030  //
5031 
5032  NT_ASSERT( FreeClusterCount );
5033 
5034  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5035  CurrentWindow->ClustersFree += ClustersThisRun;
5036 
5037  if (FreeClusterCount) {
5038  *FreeClusterCount += ClustersThisRun;
5039  }
5040 
5041  } else {
5042 
5043  NT_ASSERT(CurrentRun == AllocatedClusters);
5044 
5045  }
5046 
5047  StartIndexOfThisRun = FatIndex;
5048  CurrentRun = UnknownClusters;
5049 
5050  CurrentWindow++;
5051  CurrentWindow->ClustersFree = 0;
5052  CurrentWindow->FirstCluster = FatIndex;
5053  }
5054 
5055  //
5056  // If we just stepped onto a new page, grab a new pointer.
5057  //
5058 
5059  if (((ULONG_PTR)FatBuffer & (PAGE_SIZE - 1)) == 0) {
5060 
5061  FatUnpinBcb( IrpContext, Bcb );
5062 
5063  Page++;
5064  Offset += PAGE_SIZE;
5065 
5066 #if (NTDDI_VERSION >= NTDDI_WIN8)
5067  //
5068  // If we have exhausted all the prefetch pages, prefetch the next chunk.
5069  //
5070 
5071  if (--PrefetchPages == 0) {
5072 
5073  PrefetchPages = FatPages - Page;
5074 
5075  if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
5076 
5077  PrefetchPages = FAT_PREFETCH_PAGE_COUNT;
5078  }
5079 
5080  FatPrefetchPages( IrpContext,
5081  Vcb->VirtualVolumeFile,
5082  Page,
5083  PrefetchPages );
5084  }
5085 #endif
5086 
5087  FatReadVolumeFile( IrpContext,
5088  Vcb,
5089  Offset,
5090  PAGE_SIZE,
5091  &Bcb,
5092  &pv );
5093 
5094  FatBuffer = (PUSHORT)pv;
5095  }
5096 
5097  if (FatIndexBitSize == 32) {
5098 
5099 #ifndef __REACTOS__
5100 #ifdef _MSC_VER
5101 #pragma warning( suppress: 4213 )
5102 #endif
5103  FatEntry = *((PULONG)FatBuffer)++;
5105 #else
5106  FatEntry = *FatBuffer;
5107  FatBuffer += 1;
5109 #endif
5110 
5111  } else {
5112 
5113  FatEntry = *FatBuffer;
5114  FatBuffer += 1;
5115  }
5116  }
5117 
5118  if (CurrentRun == UnknownClusters) {
5119 
5120  CurrentRun = (FatEntry == FAT_CLUSTER_AVAILABLE) ?
5121  FreeClusters : AllocatedClusters;
5122  }
5123 
5124  //
5125  // Are we switching from a free run to an allocated run?
5126  //
5127 
5128  if (CurrentRun == FreeClusters &&
5130 
5131  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5132 
5133  if (FreeClusterCount) {
5134 
5135  *FreeClusterCount += ClustersThisRun;
5136  CurrentWindow->ClustersFree += ClustersThisRun;
5137  }
5138 
5139  if (BitMap) {
5140 
5141  RtlClearBits( BitMap,
5142  StartIndexOfThisRun - StartIndex,
5143  ClustersThisRun );
5144  }
5145 
5146  CurrentRun = AllocatedClusters;
5147  StartIndexOfThisRun = FatIndex;
5148  }
5149 
5150  //
5151  // Are we switching from an allocated run to a free run?
5152  //
5153 
5154  if (CurrentRun == AllocatedClusters &&
5156 
5157  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5158 
5159  if (BitMap) {
5160 
5161  RtlSetBits( BitMap,
5162  StartIndexOfThisRun - StartIndex,
5163  ClustersThisRun );
5164  }
5165 
5166  CurrentRun = FreeClusters;
5167  StartIndexOfThisRun = FatIndex;
5168  }
5169 
5170  //
5171  // If the entry is marked bad, add it to the bad block MCB
5172  //
5173 
5174  if ((SetupWindows || (Vcb->NumberOfWindows == 1)) &&
5176 
5177  //
5178  // This cluster is marked bad.
5179  // Add it to the BadBlockMcb.
5180  //
5181 
5183  FatAddMcbEntry( Vcb, &Vcb->BadBlockMcb, BadClusterVbo, Lbo, ClusterSize );
5184  BadClusterVbo += ClusterSize;
5185  }
5186  }
5187 
5188  //
5189  // If we finished the scan, then we know about all the possible bad clusters.
5190  //
5191 
5193 
5194  //
5195  // Now we have to record the final run we encountered
5196  //
5197 
5198  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5199 
5200  if (CurrentRun == FreeClusters) {
5201 
5202  if (FreeClusterCount) {
5203 
5204  *FreeClusterCount += ClustersThisRun;
5205  CurrentWindow->ClustersFree += ClustersThisRun;
5206  }
5207 
5208  if (BitMap) {
5209 
5210  RtlClearBits( BitMap,
5211  StartIndexOfThisRun - StartIndex,
5212  ClustersThisRun );
5213  }
5214 
5215  } else {
5216 
5217  if (BitMap) {
5218 
5219  RtlSetBits( BitMap,
5220  StartIndexOfThisRun - StartIndex,
5221  ClustersThisRun );
5222  }
5223  }
5224 
5225  //
5226  // And finish the last window if we are in setup.
5227  //
5228 
5229  if (SetupWindows) {
5230 
5231  CurrentWindow->LastCluster = FatIndex - 1;
5232  }
5233 
5234  //
5235  // Now switch the active window if required. We've succesfully gotten everything
5236  // nailed down.
5237  //
5238  // If we were tracking the free cluster count, this means we should update the
5239  // window. This is the case of FAT12/16 initialization.
5240  //
5241 
5242  if (SwitchToWindow) {
5243 
5244  if (Vcb->FreeClusterBitMap.Buffer) {
5245 
5246  ExFreePool( Vcb->FreeClusterBitMap.Buffer );
5247  }
5248 
5249  RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
5250  NewBitMapBuffer,
5251  EndIndex - StartIndex + 1 );
5252 
5253  NewBitMapBuffer = NULL;
5254 
5255  Vcb->CurrentWindow = SwitchToWindow;
5256  Vcb->ClusterHint = (ULONG)-1;
5257 
5258  if (FreeClusterCount) {
5259 
5260  NT_ASSERT( !SetupWindows );
5261 
5262  Vcb->CurrentWindow->ClustersFree = *FreeClusterCount;
5263  }
5264  }
5265 
5266  //
5267  // Make sure plausible things occured ...
5268  //
5269 
5270  if (!SetupWindows && BitMapBuffer == NULL) {
5271 
5273  }
5274 
5275  NT_ASSERT(Vcb->AllocationSupport.NumberOfFreeClusters <= Vcb->AllocationSupport.NumberOfClusters);
5276 
5277  } _SEH2_FINALLY {
5278 
5279  //
5280  // Unpin the last bcb and drop the temporary bitmap buffer if it exists.
5281  //
5282 
5283  FatUnpinBcb( IrpContext, Bcb);
5284 
5285  if (NewBitMapBuffer) {
5286 
5287  ExFreePool( NewBitMapBuffer );
5288  }
5289  } _SEH2_END;
5290 }
CLUSTER_TYPE FatInterpretClusterType(IN PVCB Vcb, IN FAT_ENTRY Entry)
Definition: allocsup.c:3471
#define MAX_CLUSTER_BITMAP_SIZE
Definition: allocsup.c:247
#define FatGetLboFromIndex(VCB, FAT_INDEX)
Definition: fat.h:559
ULONG ClustersFree
Definition: fatstruc.h:174
IN PVCB IN VBO IN ULONG OUT PBCB * Bcb
Definition: fatprocs.h:411
#define ASSERT_CURRENT_WINDOW_GOOD(VCB)
Definition: allocsup.c:83
PVOID NTAPI FsRtlAllocatePoolWithTag(IN POOL_TYPE PoolType, IN ULONG NumberOfBytes, IN ULONG Tag)
Definition: filter.c:229
#define FatIndexBitSize(B)
Definition: fat.h:515
NTSTATUS FatPrefetchPages(IN PIRP_CONTEXT IrpContext, IN PFILE_OBJECT FileObject, IN ULONG StartingPage, IN ULONG PageCount)
Definition: cachesup.c:1929
NTSTATUS FreeClusters(PNTFS_VCB Vcb, PNTFS_ATTR_CONTEXT AttrContext, ULONG AttrOffset, PFILE_RECORD_HEADER FileRecord, ULONG ClustersToFree)
Definition: attrib.c:1057
ULONG32 VBO
Definition: fat.h:38
unsigned char * PUCHAR
Definition: retypes.h:3
NTSYSAPI void WINAPI RtlInitializeBitMap(PRTL_BITMAP, PULONG, ULONG)
IN PFCB IN VBO OUT PLBO Lbo
Definition: fatprocs.h:306
VOID FatReadVolumeFile(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN VBO StartingVbo, IN ULONG ByteCount, OUT PBCB *Bcb, OUT PVOID *Buffer)
Definition: cachesup.c:102
ULONG FirstCluster
Definition: fatstruc.h:172
_SEH2_TRY
Definition: create.c:4226
uint32_t ULONG_PTR
Definition: typedefs.h:65
#define FatReservedBytes(B)
Definition: fat.h:414
_In_ ULONG _In_ ULONG Offset
Definition: ntddpcm.h:101
#define VCB_STATE_FLAG_BAD_BLOCKS_POPULATED
Definition: fatstruc.h:575
LONGLONG LBO
Definition: fat.h:34
#define FatBytesPerFat(B)
Definition: fat.h:410
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:546
smooth NULL
Definition: ftsmooth.c:416
#define FatLookup12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:584
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255
#define Vcb
Definition: cdprocs.h:1415
DWORD ClusterSize
Definition: format.c:67
ULONG LastCluster
Definition: fatstruc.h:173
Definition: fsck.fat.h:192
IN PVCB IN ULONG FatIndex
Definition: fatprocs.h:382
#define PAGE_SIZE
Definition: env_spec_w32.h:49
#define TAG_FAT_BITMAP
Definition: nodetype.h:163
BOOLEAN FatAddMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN LBO Lbo, IN ULONG SectorCount)
Definition: fsctrl.c:364
NTSYSAPI void WINAPI RtlClearBits(PRTL_BITMAP, ULONG, ULONG)
#define SetFlag(_F, _SF)
Definition: ext2fs.h:187
_SEH2_END
Definition: create.c:4400
NTSYSAPI void WINAPI RtlSetBits(PRTL_BITMAP, ULONG, ULONG)
_SEH2_FINALLY
Definition: create.c:4371
unsigned int * PULONG
Definition: retypes.h:1
unsigned int ULONG
Definition: retypes.h:1
#define ALIGN_UP_BY(size, align)
#define FAT32_ENTRY_MASK
Definition: fat.h:227
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:382
unsigned short * PUSHORT
Definition: retypes.h:2
#define ExFreePool(addr)
Definition: env_spec_w32.h:352
#define FAT_PREFETCH_PAGE_COUNT
Definition: allocsup.c:36
#define PAGED_CODE()
#define NT_ASSERT
Definition: rtlfuncs.h:3312

Referenced by FatSetupAllocationSupport().

◆ FatInterpretClusterType()

CLUSTER_TYPE FatInterpretClusterType ( IN PVCB  Vcb,
IN FAT_ENTRY  Entry 
)

Definition at line 3471 of file allocsup.c.

3497 {
3498  DebugTrace(+1, Dbg, "InterpretClusterType\n", 0);
3499  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3500  DebugTrace( 0, Dbg, " Entry = %8lx\n", Entry);
3501 
3502  PAGED_CODE();
3503 
3504  switch(Vcb->AllocationSupport.FatIndexBitSize ) {
3505  case 32:
3507  break;
3508 
3509  case 12:
3510  NT_ASSERT( Entry <= 0xfff );
3511  if (Entry >= 0x0ff0) {
3512  Entry |= 0x0FFFF000;
3513  }
3514  break;
3515 
3516  default:
3517  case 16:
3518  NT_ASSERT( Entry <= 0xffff );
3519  if (Entry >= 0x0fff0) {
3520  Entry |= 0x0FFF0000;
3521  }
3522  break;
3523  }
3524 
3525  if (Entry == FAT_CLUSTER_AVAILABLE) {
3526 
3527  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterAvailable\n", 0);
3528 
3529  return FatClusterAvailable;
3530 
3531  } else if (Entry < FAT_CLUSTER_RESERVED) {
3532 
3533  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterNext\n", 0);
3534 
3535  return FatClusterNext;
3536 
3537  } else if (Entry < FAT_CLUSTER_BAD) {
3538 
3539  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterReserved\n", 0);
3540 
3541  return FatClusterReserved;
3542 
3543  } else if (Entry == FAT_CLUSTER_BAD) {
3544 
3545  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterBad\n", 0);
3546 
3547  return FatClusterBad;
3548 
3549  } else {
3550 
3551  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterLast\n", 0);
3552 
3553  return FatClusterLast;
3554  }
3555 }
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
#define FAT_CLUSTER_BAD
Definition: fat.h:257
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255
#define Dbg
Definition: allocsup.c:28
#define Vcb
Definition: cdprocs.h:1415
#define FAT_CLUSTER_RESERVED
Definition: fat.h:256
#define FAT32_ENTRY_MASK
Definition: fat.h:227
base of all file and directory entries
Definition: entries.h:82
#define PAGED_CODE()
#define NT_ASSERT
Definition: rtlfuncs.h:3312

Referenced by _Requires_lock_held_(), and FatExamineFatEntries().

◆ FatLogOf()

UCHAR FatLogOf ( IN ULONG  Value)

Definition at line 4653 of file allocsup.c.

4674 {
4675  UCHAR Log = 0;
4676 
4677 #if FASTFATDBG
4678  ULONG OrigValue = Value;
4679 #endif
4680 
4681  PAGED_CODE();
4682 
4683  //
4684  // Knock bits off until we we get a one at position 0
4685  //
4686 
4687  while ( (Value & 0xfffffffe) != 0 ) {
4688 
4689  Log++;
4690  Value >>= 1;
4691  }
4692 
4693  //
4694  // If there was more than one bit set, the file system messed up,
4695  // Bug Check.
4696  //
4697 
4698  if (Value != 0x1) {
4699 
4700  DebugTrace(+1, Dbg, "LogOf\n", 0);
4701  DebugTrace( 0, Dbg, " Value = %8lx\n", OrigValue);
4702 
4703  DebugTrace( 0, Dbg, "Received non power of 2.\n", 0);
4704 
4705  DebugTrace(-1, Dbg, "LogOf -> %8lx\n", Log);
4706 
4707 #ifdef _MSC_VER
4708 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
4709 #endif
4710  FatBugCheck( Value, Log, 0 );
4711  }
4712 
4713  return Log;
4714 }
_In_ CLIPOBJ _In_ BRUSHOBJ _In_ LONG x1
Definition: winddi.h:3706
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
IN UCHAR Value
Definition: halp.h:394
#define FatBugCheck(A, B, C)
Definition: nodetype.h:104
#define Dbg
Definition: allocsup.c:28
unsigned char UCHAR
Definition: xmlstorage.h:181
unsigned int ULONG
Definition: retypes.h:1
#define PAGED_CODE()

Referenced by FatSetupAllocationSupport().

◆ FatLookupFatEntry()

VOID FatLookupFatEntry ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb,
IN ULONG  FatIndex,
IN OUT PULONG  FatEntry,
IN OUT PFAT_ENUMERATION_CONTEXT  Context 
)

Definition at line 3563 of file allocsup.c.

3596 {
3597  PAGED_CODE();
3598 
3599  DebugTrace(+1, Dbg, "FatLookupFatEntry\n", 0);
3600  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3601  DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
3602  DebugTrace( 0, Dbg, " FatEntry = %8lx\n", FatEntry);
3603 
3604  //
3605  // Make sure they gave us a valid fat index.
3606  //
3607 
3608  FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
3609 
3610  //
3611  // Case on 12 or 16 bit fats.
3612  //
3613  // In the 12 bit case (mostly floppies) we always have the whole fat
3614  // (max 6k bytes) pinned during allocation operations. This is possibly
3615  // a wee bit slower, but saves headaches over fat entries with 8 bits
3616  // on one page, and 4 bits on the next.
3617  //
3618  // The 16 bit case always keeps the last used page pinned until all
3619  // operations are done and it is unpinned.
3620  //
3621 
3622  //
3623  // DEAL WITH 12 BIT CASE
3624  //
3625 
3626  if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
3627 
3628  //
3629  // Check to see if the fat is already pinned, otherwise pin it.
3630  //
3631 
3632  if (Context->Bcb == NULL) {
3633 
3634  FatReadVolumeFile( IrpContext,
3635  Vcb,
3636  FatReservedBytes( &Vcb->Bpb ),
3637  FatBytesPerFat( &Vcb->Bpb ),
3638  &Context->Bcb,
3639  &Context->PinnedPage );
3640  }
3641 
3642  //
3643  // Load the return value.
3644  //
3645 
3646 
3647  FatLookup12BitEntry( Context->PinnedPage, FatIndex, FatEntry );
3648 
3649  } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
3650 
3651  //
3652  // DEAL WITH 32 BIT CASE
3653  //
3654 
3655  ULONG PageEntryOffset;
3656  ULONG OffsetIntoVolumeFile;
3657 
3658  //
3659  // Initialize two local variables that help us.
3660  //
3661  OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(FAT_ENTRY);
3662  PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(FAT_ENTRY);
3663 
3664  //
3665  // Check to see if we need to read in a new page of fat
3666  //
3667 
3668  if ((Context->Bcb == NULL) ||
3669  (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3670 
3671  //
3672  // The entry wasn't in the pinned page, so must we unpin the current
3673  // page (if any) and read in a new page.
3674  //
3675 
3676  FatUnpinBcb( IrpContext, Context->Bcb );
3677 
3678  FatReadVolumeFile( IrpContext,
3679  Vcb,
3680  OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3681  PAGE_SIZE,
3682  &Context->Bcb,
3683  &Context->PinnedPage );
3684 
3685  Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3686  }
3687 
3688  //
3689  // Grab the fat entry from the pinned page, and return
3690  //
3691 
3692  *FatEntry = ((PULONG)(Context->PinnedPage))[PageEntryOffset] & FAT32_ENTRY_MASK;
3693 
3694  } else {
3695 
3696  //
3697  // DEAL WITH 16 BIT CASE
3698  //
3699 
3700  ULONG PageEntryOffset;
3701  ULONG OffsetIntoVolumeFile;
3702 
3703  //
3704  // Initialize two local variables that help us.
3705  //
3706 
3707  OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(USHORT);
3708  PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(USHORT);
3709 
3710  //
3711  // Check to see if we need to read in a new page of fat
3712  //
3713 
3714  if ((Context->Bcb == NULL) ||
3715  (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3716 
3717  //
3718  // The entry wasn't in the pinned page, so must we unpin the current
3719  // page (if any) and read in a new page.
3720  //
3721 
3722  FatUnpinBcb( IrpContext, Context->Bcb );
3723 
3724  FatReadVolumeFile( IrpContext,
3725  Vcb,
3726  OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3727  PAGE_SIZE,
3728  &Context->Bcb,
3729  &Context->PinnedPage );
3730 
3731  Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3732  }
3733 
3734  //
3735  // Grab the fat entry from the pinned page, and return
3736  //
3737 
3738  *FatEntry = ((PUSHORT)(Context->PinnedPage))[PageEntryOffset];
3739  }
3740 
3741  DebugTrace(-1, Dbg, "FatLookupFatEntry -> (VOID)\n", 0);
3742  return;
3743 }
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
VOID FatReadVolumeFile(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN VBO StartingVbo, IN ULONG ByteCount, OUT PBCB *Bcb, OUT PVOID *Buffer)
Definition: cachesup.c:102
#define FatVerifyIndexIsValid(IC, V, I)
Definition: fat.h:532
#define FatReservedBytes(B)
Definition: fat.h:414
#define FatBytesPerFat(B)
Definition: fat.h:410
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:546
smooth NULL
Definition: ftsmooth.c:416
#define FatLookup12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:584
#define Dbg
Definition: allocsup.c:28
#define Vcb
Definition: cdprocs.h:1415
Definition: fsck.fat.h:192
IN PVCB IN ULONG FatIndex
Definition: fatprocs.h:382
#define PAGE_SIZE
Definition: env_spec_w32.h:49
unsigned short USHORT
Definition: pedump.c:61
unsigned int * PULONG
Definition: retypes.h:1
ULONG32 FAT_ENTRY
Definition: fat.h:225
unsigned int ULONG
Definition: retypes.h:1
#define FAT32_ENTRY_MASK
Definition: fat.h:227
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:382
unsigned short * PUSHORT
Definition: retypes.h:2
#define PAGED_CODE()

Referenced by _Requires_lock_held_().

◆ FatSelectBestWindow()

INLINE ULONG FatSelectBestWindow ( IN PVCB  Vcb)

Definition at line 277 of file allocsup.c.

299 {
300  ULONG i, Fave = 0;
301  ULONG MaxFree = 0;
302  ULONG FirstEmpty = (ULONG)-1;
303  ULONG ClustersPerWindow = MAX_CLUSTER_BITMAP_SIZE;
304 
305  NT_ASSERT( 1 != Vcb->NumberOfWindows);
306 
307  for (i = 0; i < Vcb->NumberOfWindows; i++) {
308 
309  if (Vcb->Windows[i].ClustersFree == ClustersPerWindow) {
310 
311  if (-1 == FirstEmpty) {
312 
313  //
314  // Keep note of the first empty window on the disc
315  //
316 
317  FirstEmpty = i;
318  }
319  }
320  else if (Vcb->Windows[i].ClustersFree > MaxFree) {
321 
322  //
323  // This window has the most free clusters, so far
324  //
325 
326  MaxFree = Vcb->Windows[i].ClustersFree;
327  Fave = i;
328 
329  //
330  // If this window has >50% free clusters, then we will take it,
331  // so don't bother considering more windows.
332  //
333 
334  if (MaxFree >= (ClustersPerWindow >> 1)) {
335 
336  break;
337  }
338  }
339  }
340 
341  //
342  // If there were no windows with 50% or more freespace, then select the
343  // first empty window on the disc, if any - otherwise we'll just go with
344  // the one with the most free clusters.
345  //
346 
347  if ((MaxFree < (ClustersPerWindow >> 1)) && (-1 != FirstEmpty)) {
348 
349  Fave = FirstEmpty;
350  }
351 
352  return Fave;
353 }
#define MAX_CLUSTER_BITMAP_SIZE
Definition: allocsup.c:247
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
#define Vcb
Definition: cdprocs.h:1415
unsigned int ULONG
Definition: retypes.h:1
#define NT_ASSERT
Definition: rtlfuncs.h:3312

Referenced by FatSetupAllocationSupport().

◆ FatSetFatRun()

VOID FatSetFatRun ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb,
IN ULONG  StartingFatIndex,
IN ULONG  ClusterCount,
IN BOOLEAN  ChainTogether 
)

Definition at line 4137 of file allocsup.c.

4173 {
4174 #define MAXCOUNTCLUS 0x10000
4175 #define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
4176  PBCB SavedBcbs[COUNTSAVEDBCBS][2];
4177 
4178  ULONG SectorSize;
4179  ULONG Cluster;
4180 
4181  LBO StartSectorLbo;
4182  LBO FinalSectorLbo;
4183  LBO Lbo;
4184 
4185  PVOID PinnedFat;
4186 
4188 
4189  ULONG SavedStartingFatIndex = StartingFatIndex;
4190 
4191  PAGED_CODE();
4192 
4193  DebugTrace(+1, Dbg, "FatSetFatRun\n", 0);
4194  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
4195  DebugTrace( 0, Dbg, " StartingFatIndex = %8x\n", StartingFatIndex);
4196  DebugTrace( 0, Dbg, " ClusterCount = %8lx\n", ClusterCount);
4197  DebugTrace( 0, Dbg, " ChainTogether = %s\n", ChainTogether ? "TRUE":"FALSE");
4198 
4199  //
4200  // Make sure they gave us a valid fat run.
4201  //
4202 
4203  FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex);
4204  FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex + ClusterCount - 1);
4205 
4206  //
4207  // Check special case
4208  //
4209 
4210  if (ClusterCount == 0) {
4211 
4212  DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4213  return;
4214  }
4215 
4216  //
4217  // Set Sector Size
4218  //
4219 
4220  SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
4221 
4222  //
4223  // Case on 12 or 16 bit fats.
4224  //
4225  // In the 12 bit case (mostly floppies) we always have the whole fat
4226  // (max 6k bytes) pinned during allocation operations. This is possibly
4227  // a wee bit slower, but saves headaches over fat entries with 8 bits
4228  // on one page, and 4 bits on the next.
4229  //
4230  // In the 16 bit case we only read one page at a time, as needed.
4231  //
4232 
4233  //
4234  // DEAL WITH 12 BIT CASE
4235  //
4236 
4237  _SEH2_TRY {
4238 
4239  if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
4240 
4241  //
4242  // We read in the entire fat. Note that using prepare write marks
4243  // the bcb pre-dirty, so we don't have to do it explicitly.
4244  //
4245 
4246  RtlZeroMemory( &SavedBcbs[0][0], 2 * sizeof(PBCB) * 2);
4247 
4248  FatPrepareWriteVolumeFile( IrpContext,
4249  Vcb,
4250  FatReservedBytes( &Vcb->Bpb ),
4251  FatBytesPerFat( &Vcb->Bpb ),
4252  &SavedBcbs[0][0],
4253  &PinnedFat,
4254  TRUE,
4255  FALSE );
4256 
4257  //
4258  // Mark the affected sectors dirty. Note that FinalSectorLbo is
4259  // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
4260  // we catch the case of a dirty fat entry straddling a sector boundry.
4261  //
4262  // Note that if the first AddMcbEntry succeeds, all following ones
4263  // will simply coalese, and thus also succeed.
4264  //
4265 
4266  StartSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + StartingFatIndex * 3 / 2)
4267  & ~(SectorSize - 1);
4268 
4269  FinalSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + ((StartingFatIndex +
4270  ClusterCount) * 3 + 2) / 2) & ~(SectorSize - 1);
4271 
4272  for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4273 
4274  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4275  }
4276 
4277  //
4278  // Store the entries into the fat; we need a little
4279  // synchonization here and can't use a spinlock since the bytes
4280  // might not be resident.
4281  //
4282 
4284  ReleaseMutex = TRUE;
4285 
4286  for (Cluster = StartingFatIndex;
4287  Cluster < StartingFatIndex + ClusterCount - 1;
4288  Cluster++) {
4289 
4290  FatSet12BitEntry( PinnedFat,
4291  Cluster,
4292  ChainTogether ? Cluster + 1 : FAT_CLUSTER_AVAILABLE );
4293  }
4294 
4295  //
4296  // Save the last entry
4297  //
4298 
4299  FatSet12BitEntry( PinnedFat,
4300  Cluster,
4301  ChainTogether ?
4303 
4305  ReleaseMutex = FALSE;
4306 
4307  } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
4308 
4309  //
4310  // DEAL WITH 32 BIT CASE
4311  //
4312 
4313  for (;;) {
4314 
4315  VBO StartOffsetInVolume;
4316  VBO FinalOffsetInVolume;
4317 
4318  ULONG Page;
4319  ULONG FinalCluster;
4320  PULONG FatEntry = NULL;
4321  ULONG ClusterCountThisRun;
4322 
4323  StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4324  StartingFatIndex * sizeof(FAT_ENTRY);
4325 
4326  if (ClusterCount > MAXCOUNTCLUS) {
4327  ClusterCountThisRun = MAXCOUNTCLUS;
4328  } else {
4329  ClusterCountThisRun = ClusterCount;
4330  }
4331 
4332  FinalOffsetInVolume = StartOffsetInVolume +
4333  (ClusterCountThisRun - 1) * sizeof(FAT_ENTRY);
4334 
4335  {
4336  ULONG NumberOfPages;
4337  ULONG Offset;
4338 
4339  NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4340  (StartOffsetInVolume / PAGE_SIZE) + 1;
4341 
4342  RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4343 
4344  for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4345  Page < NumberOfPages;
4346  Page++, Offset += PAGE_SIZE ) {
4347 
4348  FatPrepareWriteVolumeFile( IrpContext,
4349  Vcb,
4350  Offset,
4351  PAGE_SIZE,
4352  &SavedBcbs[Page][0],
4353  (PVOID *)&SavedBcbs[Page][1],
4354  TRUE,
4355  FALSE );
4356 
4357  if (Page == 0) {
4358 
4359  FatEntry = (PULONG)((PUCHAR)SavedBcbs[0][1] +
4360  (StartOffsetInVolume % PAGE_SIZE));
4361  }
4362  }
4363  }
4364 
4365  //
4366  // Mark the run dirty
4367  //
4368 
4369  StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4370  FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4371 
4372  for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4373 
4374  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO)Lbo, Lbo, SectorSize );
4375  }
4376 
4377  //
4378  // Store the entries
4379  //
4380  // We need extra synchronization here for broken architectures
4381  // like the ALPHA that don't support atomic 16 bit writes.
4382  //
4383 
4384 #ifdef ALPHA
4386  ReleaseMutex = TRUE;
4387 #endif // ALPHA
4388 
4389  FinalCluster = StartingFatIndex + ClusterCountThisRun - 1;
4390  Page = 0;
4391 
4392  for (Cluster = StartingFatIndex;
4393  Cluster <= FinalCluster;
4394  Cluster++, FatEntry++) {
4395 
4396  //
4397  // If we just crossed a page boundry (as opposed to starting
4398  // on one), update our idea of FatEntry.
4399 
4400  if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4401  (Cluster != StartingFatIndex) ) {
4402 
4403  Page += 1;
4404  FatEntry = (PULONG)SavedBcbs[Page][1];
4405  }
4406 
4407  *FatEntry = ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4409  }
4410 
4411  //
4412  // Fix up the last entry if we were chaining together
4413  //
4414 
4415  if ((ClusterCount <= MAXCOUNTCLUS) &&
4416  ChainTogether ) {
4417 
4418  *(FatEntry-1) = FAT_CLUSTER_LAST;
4419  }
4420 
4421 #ifdef ALPHA
4423  ReleaseMutex = FALSE;
4424 #endif // ALPHA
4425 
4426  {
4427  ULONG i;
4428 
4429  //
4430  // Unpin the Bcbs
4431  //
4432 
4433  for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4434 
4435  FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4436  SavedBcbs[i][0] = NULL;
4437  }
4438  }
4439 
4440  if (ClusterCount <= MAXCOUNTCLUS) {
4441 
4442  break;
4443 
4444  } else {
4445 
4446  StartingFatIndex += MAXCOUNTCLUS;
4447  ClusterCount -= MAXCOUNTCLUS;
4448  }
4449  }
4450 
4451  } else {
4452 
4453  //
4454  // DEAL WITH 16 BIT CASE
4455  //
4456 
4457  VBO StartOffsetInVolume;
4458  VBO FinalOffsetInVolume;
4459 
4460  ULONG Page;
4461  ULONG FinalCluster;
4462  PUSHORT FatEntry = NULL;
4463 
4464  StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4465  StartingFatIndex * sizeof(USHORT);
4466 
4467  FinalOffsetInVolume = StartOffsetInVolume +
4468  (ClusterCount - 1) * sizeof(USHORT);
4469 
4470  //
4471  // Read in one page of fat at a time. We cannot read in the
4472  // all of the fat we need because of cache manager limitations.
4473  //
4474  // SavedBcb was initialized to be able to hold the largest
4475  // possible number of pages in a fat plus and extra one to
4476  // accomadate the boot sector, plus one more to make sure there
4477  // is enough room for the RtlZeroMemory below that needs the mark
4478  // the first Bcb after all the ones we will use as an end marker.
4479  //
4480 
4481  {
4482  ULONG NumberOfPages;
4483  ULONG Offset;
4484 
4485  NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4486  (StartOffsetInVolume / PAGE_SIZE) + 1;
4487 
4488  RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4489 
4490  for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4491  Page < NumberOfPages;
4492  Page++, Offset += PAGE_SIZE ) {
4493 
4494  FatPrepareWriteVolumeFile( IrpContext,
4495  Vcb,
4496  Offset,
4497  PAGE_SIZE,
4498  &SavedBcbs[Page][0],
4499  (PVOID *)&SavedBcbs[Page][1],
4500  TRUE,
4501  FALSE );
4502 
4503  if (Page == 0) {
4504 
4505  FatEntry = (PUSHORT)((PUCHAR)SavedBcbs[0][1] +
4506  (StartOffsetInVolume % PAGE_SIZE));
4507  }
4508  }
4509  }
4510 
4511  //
4512  // Mark the run dirty
4513  //
4514 
4515  StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4516  FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4517 
4518  for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4519 
4520  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4521  }
4522 
4523  //
4524  // Store the entries
4525  //
4526  // We need extra synchronization here for broken architectures
4527  // like the ALPHA that don't support atomic 16 bit writes.
4528  //
4529 
4530 #ifdef ALPHA
4532  ReleaseMutex = TRUE;
4533 #endif // ALPHA
4534 
4535  FinalCluster = StartingFatIndex + ClusterCount - 1;
4536  Page = 0;
4537 
4538  for (Cluster = StartingFatIndex;
4539  Cluster <= FinalCluster;
4540  Cluster++, FatEntry++) {
4541 
4542  //
4543  // If we just crossed a page boundry (as opposed to starting
4544  // on one), update our idea of FatEntry.
4545 
4546  if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4547  (Cluster != StartingFatIndex) ) {
4548 
4549  Page += 1;
4550  FatEntry = (PUSHORT)SavedBcbs[Page][1];
4551  }
4552 
4553  *FatEntry = (USHORT) (ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4555  }
4556 
4557  //
4558  // Fix up the last entry if we were chaining together
4559  //
4560 
4561  if ( ChainTogether ) {
4562 
4563 #ifdef _MSC_VER
4564 #pragma warning( suppress: 4310 )
4565 #endif
4567 
4568  }
4569 #ifdef ALPHA
4571  ReleaseMutex = FALSE;
4572 #endif // ALPHA
4573  }
4574 
4575  } _SEH2_FINALLY {
4576 
4577  ULONG i;
4578 
4580 
4581  //
4582  // If we still somehow have the Mutex, release it.
4583  //
4584 
4585  if (ReleaseMutex) {
4586 
4588 
4590  }
4591 
4592  //
4593  // Unpin the Bcbs
4594  //
4595 
4596  for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4597 
4598  FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4599  }
4600 
4601  //
4602  // At this point nothing in this finally clause should have raised.
4603  // So, now comes the unsafe (sigh) stuff.
4604  //
4605 
4606  if ( _SEH2_AbnormalTermination() &&
4607  (Vcb->AllocationSupport.FatIndexBitSize == 32) ) {
4608 
4609  //
4610  // Fat32 unwind
4611  //
4612  // This case is more complex because the FAT12 and FAT16 cases
4613  // pin all the needed FAT pages (128K max), after which it
4614  // can't fail, before changing any FAT entries. In the Fat32
4615  // case, it may not be practical to pin all the needed FAT
4616  // pages, because that could span many megabytes. So Fat32
4617  // attacks in chunks, and if a failure occurs once the first
4618  // chunk has been updated, we have to back out the updates.
4619  //
4620  // The unwind consists of walking back over each FAT entry we
4621  // have changed, setting it back to the previous value. Note
4622  // that the previous value with either be FAT_CLUSTER_AVAILABLE
4623  // (if ChainTogether==TRUE) or a simple link to the successor
4624  // (if ChainTogether==FALSE).
4625  //
4626  // We concede that any one of these calls could fail too; our
4627  // objective is to make this case no more likely than the case
4628  // for a file consisting of multiple disjoint runs.
4629  //
4630 
4631  while ( StartingFatIndex > SavedStartingFatIndex ) {
4632 
4633  StartingFatIndex--;
4634 
4635  FatSetFatEntry( IrpContext, Vcb, StartingFatIndex,
4636  ChainTogether ?
4637  StartingFatIndex + 1 : FAT_CLUSTER_AVAILABLE );
4638  }
4639  }
4640 
4641  DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4642  } _SEH2_END;
4643 
4644  return;
4645 }
#define FAT_CLUSTER_LAST
Definition: fat.h:258
#define FatLockFreeClusterBitMap(VCB)
Definition: allocsup.c:99
#define TRUE
Definition: types.h:120
ULONG32 VBO
Definition: fat.h:38
unsigned char * PUCHAR
Definition: retypes.h:3
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
IN PFCB IN VBO OUT PLBO Lbo
Definition: fatprocs.h:306
#define MAXCOUNTCLUS
#define FatVerifyIndexIsValid(IC, V, I)
Definition: fat.h:532
_SEH2_TRY
Definition: create.c:4226
uint32_t ULONG_PTR
Definition: typedefs.h:65
#define FatReservedBytes(B)
Definition: fat.h:414
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
_In_ ULONG _In_ ULONG Offset
Definition: ntddpcm.h:101
#define FALSE
Definition: types.h:117
LONGLONG LBO
Definition: fat.h:34
#define FatBytesPerFat(B)
Definition: fat.h:410
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:546
unsigned char BOOLEAN
smooth NULL
Definition: ftsmooth.c:416
#define _SEH2_AbnormalTermination()
Definition: pseh2_64.h:13
VOID FatSetFatRun(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartingFatIndex, IN ULONG ClusterCount, IN BOOLEAN ChainTogether)
Definition: allocsup.c:4137
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255
#define DebugUnwind(X)
Definition: fatdata.h:315
#define Dbg
Definition: allocsup.c:28
#define COUNTSAVEDBCBS
#define Vcb
Definition: cdprocs.h:1415
BOOL WINAPI DECLSPEC_HOTPATCH ReleaseMutex(IN HANDLE hMutex)
Definition: synch.c:618
Definition: fsck.fat.h:192
#define FatUnlockFreeClusterBitMap(VCB)
Definition: allocsup.c:112
#define PAGE_SIZE
Definition: env_spec_w32.h:49
BOOLEAN FatAddMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN LBO Lbo, IN ULONG SectorCount)
Definition: fsctrl.c:364
_SEH2_END
Definition: create.c:4400
unsigned short USHORT
Definition: pedump.c:61
_SEH2_FINALLY
Definition: create.c:4371
unsigned int * PULONG
Definition: retypes.h:1
#define FatSet12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:603
ULONG32 FAT_ENTRY
Definition: fat.h:225
unsigned int ULONG
Definition: retypes.h:1
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
_In_ ULONG SectorSize
Definition: halfuncs.h:291
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:382
unsigned short * PUSHORT
Definition: retypes.h:2
#define PAGED_CODE()
#define NT_ASSERT
Definition: rtlfuncs.h:3312

◆ FatSetupAllocationSupport()

VOID FatSetupAllocationSupport ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb 
)

Definition at line 357 of file allocsup.c.

378 {
379  ULONG BitIndex;
380  ULONG ClustersDescribableByFat;
381 
382  PAGED_CODE();
383 
384  DebugTrace(+1, Dbg, "FatSetupAllocationSupport\n", 0);
385  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
386 
387  //
388  // Compute a number of fields for Vcb.AllocationSupport
389  //
390 
391  Vcb->AllocationSupport.RootDirectoryLbo = FatRootDirectoryLbo( &Vcb->Bpb );
392  Vcb->AllocationSupport.RootDirectorySize = FatRootDirectorySize( &Vcb->Bpb );
393 
394  Vcb->AllocationSupport.FileAreaLbo = FatFileAreaLbo( &Vcb->Bpb );
395 
396  Vcb->AllocationSupport.NumberOfClusters = FatNumberOfClusters( &Vcb->Bpb );
397 
398  Vcb->AllocationSupport.FatIndexBitSize = FatIndexBitSize( &Vcb->Bpb );
399 
400  Vcb->AllocationSupport.LogOfBytesPerSector = FatLogOf(Vcb->Bpb.BytesPerSector);
401  Vcb->AllocationSupport.LogOfBytesPerCluster = FatLogOf(FatBytesPerCluster( &Vcb->Bpb ));
402  Vcb->AllocationSupport.NumberOfFreeClusters = 0;
403 
404 
405  //
406  // Deal with a bug in DOS 5 format, if the Fat is not big enough to
407  // describe all the clusters on the disk, reduce this number. We expect
408  // that fat32 volumes will not have this problem.
409  //
410  // Turns out this was not a good assumption. We have to do this always now.
411  //
412 
413  ClustersDescribableByFat = ( ((FatIsFat32(Vcb)? Vcb->Bpb.LargeSectorsPerFat :
414  Vcb->Bpb.SectorsPerFat) *
415  Vcb->Bpb.BytesPerSector * 8)
416  / FatIndexBitSize(&Vcb->Bpb) ) - 2;
417 
418  if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByFat) {
419 
420  Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByFat;
421  }
422 
423  //
424  // Extend the virtual volume file to include the Fat
425  //
426 
427  {
429 
432  FatBytesPerFat( &Vcb->Bpb ));
434 
435  if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) {
436 
437  FatInitializeCacheMap( Vcb->VirtualVolumeFile,
438  &FileSizes,
439  TRUE,
441  Vcb );
442 
443  } else {
444 
445  CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes );
446  }
447  }
448 
449  _SEH2_TRY {
450 
451  if (FatIsFat32(Vcb) &&
452  Vcb->AllocationSupport.NumberOfClusters > MAX_CLUSTER_BITMAP_SIZE) {
453 
454  Vcb->NumberOfWindows = (Vcb->AllocationSupport.NumberOfClusters +
457 
458  } else {
459 
460  Vcb->NumberOfWindows = 1;
461  }
462 
464  Vcb->NumberOfWindows * sizeof(FAT_WINDOW),
465  TAG_FAT_WINDOW );
466 
467  RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
468  NULL,
469  0 );
470 
471  //
472  // Chose a FAT window to begin operation in.
473  //
474 
475  if (Vcb->NumberOfWindows > 1) {
476 
477  //
478  // Read the fat and count up free clusters. We bias by the two reserved
479  // entries in the FAT.
480  //
481 
482  FatExamineFatEntries( IrpContext, Vcb,
483  2,
484  Vcb->AllocationSupport.NumberOfClusters + 2 - 1,
485  TRUE,
486  NULL,
487  NULL);
488 
489 
490  //
491  // Pick a window to begin allocating from
492  //
493 
494  Vcb->CurrentWindow = &Vcb->Windows[ FatSelectBestWindow( Vcb)];
495 
496  } else {
497 
498  Vcb->CurrentWindow = &Vcb->Windows[0];
499 
500  //
501  // Carefully bias ourselves by the two reserved entries in the FAT.
502  //
503 
504  Vcb->CurrentWindow->FirstCluster = 2;
505  Vcb->CurrentWindow->LastCluster = Vcb->AllocationSupport.NumberOfClusters + 2 - 1;
506  }
507 
508  //
509  // Now transition to the FAT window we have chosen.
510  //
511 
512  FatExamineFatEntries( IrpContext, Vcb,
513  0,
514  0,
515  FALSE,
516  Vcb->CurrentWindow,
517  NULL);
518 
519  //
520  // Now set the ClusterHint to the first free bit in our favorite
521  // window (except the ClusterHint is off by two).
522  //
523 
524  Vcb->ClusterHint =
525  (BitIndex = RtlFindClearBits( &Vcb->FreeClusterBitMap, 1, 0 )) != -1 ?
526  BitIndex + 2 : 2;
527 
528  } _SEH2_FINALLY {
529 
531 
532  //
533  // If we hit an exception, back out.
534  //
535 
537 
538  FatTearDownAllocationSupport( IrpContext, Vcb );
539  }
540  } _SEH2_END;
541 
542  return;
543 }
#define FatFileAreaLbo(B)
Definition: fat.h:458
#define MAX_CLUSTER_BITMAP_SIZE
Definition: allocsup.c:247
PVOID NTAPI FsRtlAllocatePoolWithTag(IN POOL_TYPE PoolType, IN ULONG NumberOfBytes, IN ULONG Tag)
Definition: filter.c:229
#define FatIndexBitSize(B)
Definition: fat.h:515
#define TRUE
Definition: types.h:120
NTSYSAPI void WINAPI RtlInitializeBitMap(PRTL_BITMAP, PULONG, ULONG)
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
#define TAG_FAT_WINDOW
Definition: nodetype.h:166
VOID FatExamineFatEntries(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartIndex OPTIONAL, IN ULONG EndIndex OPTIONAL, IN BOOLEAN SetupWindows, IN PFAT_WINDOW SwitchToWindow OPTIONAL, IN PULONG BitMapBuffer OPTIONAL)
Definition: allocsup.c:4718
static CC_FILE_SIZES FileSizes
VOID FatSetupAllocationSupport(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
Definition: allocsup.c:357
LARGE_INTEGER FileSize
Definition: cctypes.h:16
_SEH2_TRY
Definition: create.c:4226
#define FatReservedBytes(B)
Definition: fat.h:414
INLINE ULONG FatSelectBestWindow(IN PVCB Vcb)
Definition: allocsup.c:277
#define FALSE
Definition: types.h:117
#define FatBytesPerFat(B)
Definition: fat.h:410
UCHAR FatLogOf(IN ULONG Value)
Definition: allocsup.c:4653
smooth NULL
Definition: ftsmooth.c:416
#define _SEH2_AbnormalTermination()
Definition: pseh2_64.h:13
#define FatRootDirectorySize(B)
Definition: fat.h:427
NTSYSAPI ULONG WINAPI RtlFindClearBits(PCRTL_BITMAP, ULONG, ULONG)
VOID FatTearDownAllocationSupport(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
Definition: allocsup.c:547
#define DebugUnwind(X)
Definition: fatdata.h:315
FAT_DATA FatData
Definition: fatdata.c:56
#define Dbg
Definition: allocsup.c:28
#define Vcb
Definition: cdprocs.h:1415
LARGE_INTEGER ValidDataLength
Definition: cctypes.h:17
#define FatIsFat32(VCB)
Definition: fatprocs.h:1446
#define FatNumberOfClusters(B)
Definition: fat.h:482
VOID FatInitializeCacheMap(_In_ PFILE_OBJECT FileObject, _In_ PCC_FILE_SIZES FileSizes, _In_ BOOLEAN PinAccess, _In_ PCACHE_MANAGER_CALLBACKS Callbacks, _In_ PVOID LazyWriteContext)
Definition: cachesup.c:62
_SEH2_END
Definition: create.c:4400
LARGE_INTEGER AllocationSize
Definition: cctypes.h:15
LARGE_INTEGER FatMaxLarge
Definition: fatdata.c:63
VOID NTAPI CcSetFileSizes(IN PFILE_OBJECT FileObject, IN PCC_FILE_SIZES FileSizes)
Definition: fssup.c:356
_SEH2_FINALLY
Definition: create.c:4371
unsigned int ULONG
Definition: retypes.h:1
#define FatRootDirectoryLbo(B)
Definition: fat.h:445
LONGLONG QuadPart
Definition: typedefs.h:114
#define FatBytesPerCluster(B)
Definition: fat.h:408
#define PAGED_CODE()
CACHE_MANAGER_CALLBACKS CacheManagerNoOpCallbacks
Definition: fatstruc.h:159

Referenced by FatSetupAllocationSupport().

◆ FatTearDownAllocationSupport()

VOID FatTearDownAllocationSupport ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb 
)

Definition at line 547 of file allocsup.c.

570 {
571  DebugTrace(+1, Dbg, "FatTearDownAllocationSupport\n", 0);
572  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
573 
574  PAGED_CODE();
575 
576  //
577  // If there are FAT buckets, free them.
578  //
579 
580  if ( Vcb->Windows != NULL ) {
581 
582  ExFreePool( Vcb->Windows );
583  Vcb->Windows = NULL;
584  }
585 
586  //
587  // Free the memory associated with the free cluster bitmap.
588  //
589 
590  if ( Vcb->FreeClusterBitMap.Buffer != NULL ) {
591 
592  ExFreePool( Vcb->FreeClusterBitMap.Buffer );
593 
594  //
595  // NULL this field as an flag.
596  //
597 
598  Vcb->FreeClusterBitMap.Buffer = NULL;
599  }
600 
601  //
602  // And remove all the runs in the dirty fat Mcb
603  //
604 
605  FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 0, 0xFFFFFFFF );
606 
607  DebugTrace(-1, Dbg, "FatTearDownAllocationSupport -> (VOID)\n", 0);
608 
609  UNREFERENCED_PARAMETER( IrpContext );
610 
611  return;
612 }
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:317
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
VOID FatRemoveMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN ULONG SectorCount)
Definition: fsctrl.c:599
smooth NULL
Definition: ftsmooth.c:416
#define Dbg
Definition: allocsup.c:28
#define Vcb
Definition: cdprocs.h:1415
#define ExFreePool(addr)
Definition: env_spec_w32.h:352
#define PAGED_CODE()

Referenced by FatDeleteVcb(), and FatSetupAllocationSupport().