ReactOS  0.4.14-dev-1276-g8aa58c1
allocsup.c File Reference
#include "fatprocs.h"
Include dependency graph for allocsup.c:

Go to the source code of this file.

Macros

#define BugCheckFileId   (FAT_BUG_CHECK_ALLOCSUP)
 
#define Dbg   (DEBUG_TRACE_ALLOCSUP)
 
#define FatMin(a, b)   ((a) < (b) ? (a) : (b))
 
#define FAT_PREFETCH_PAGE_COUNT   0x100
 
#define ASSERT_CURRENT_WINDOW_GOOD(VCB)
 
#define FatLockFreeClusterBitMap(VCB)
 
#define FatUnlockFreeClusterBitMap(VCB)
 
#define FatIsClusterFree(IRPCONTEXT, VCB, FAT_INDEX)   (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)
 
#define FatFreeClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatAllocateClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatUnreserveClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatReserveClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatFindFreeClusterRun(IRPCONTEXT, VCB, CLUSTER_COUNT, CLUSTER_HINT)
 
#define MAX_CLUSTER_BITMAP_SIZE   (1 << 16)
 
#define FatWindowOfCluster(C)   (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)
 
#define MAX_ZERO_MDL_SIZE   (1*1024*1024)
 
#define MAXCOUNTCLUS   0x10000
 
#define COUNTSAVEDBCBS   ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
 

Functions

VOID FatLookupFatEntry (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG FatIndex, IN OUT PULONG FatEntry, IN OUT PFAT_ENUMERATION_CONTEXT Context)
 
VOID FatSetFatRun (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartingFatIndex, IN ULONG ClusterCount, IN BOOLEAN ChainTogether)
 
UCHAR FatLogOf (IN ULONG Value)
 
INLINE ULONG FatSelectBestWindow (IN PVCB Vcb)
 
VOID FatSetupAllocationSupport (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
 
VOID FatTearDownAllocationSupport (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
 
 _Requires_lock_held_ (_Global_critical_region_)
 
CLUSTER_TYPE FatInterpretClusterType (IN PVCB Vcb, IN FAT_ENTRY Entry)
 
VOID FatExamineFatEntries (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartIndex OPTIONAL, IN ULONG EndIndex OPTIONAL, IN BOOLEAN SetupWindows, IN PFAT_WINDOW SwitchToWindow OPTIONAL, IN PULONG BitMapBuffer OPTIONAL)
 

Macro Definition Documentation

◆ ASSERT_CURRENT_WINDOW_GOOD

#define ASSERT_CURRENT_WINDOW_GOOD (   VCB)

Definition at line 83 of file allocsup.c.

◆ BugCheckFileId

#define BugCheckFileId   (FAT_BUG_CHECK_ALLOCSUP)

Definition at line 22 of file allocsup.c.

◆ COUNTSAVEDBCBS

#define COUNTSAVEDBCBS   ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)

◆ Dbg

#define Dbg   (DEBUG_TRACE_ALLOCSUP)

Definition at line 28 of file allocsup.c.

◆ FAT_PREFETCH_PAGE_COUNT

#define FAT_PREFETCH_PAGE_COUNT   0x100

Definition at line 36 of file allocsup.c.

◆ FatAllocateClusters

#define FatAllocateClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
if ((CLUSTER_COUNT) == 1) { \
FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \
} else { \
FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \
} \
}
#define FAT_CLUSTER_LAST
Definition: fat.h:258
#define TRUE
Definition: types.h:120
Definition: cdstruc.h:504

Definition at line 158 of file allocsup.c.

◆ FatFindFreeClusterRun

#define FatFindFreeClusterRun (   IRPCONTEXT,
  VCB,
  CLUSTER_COUNT,
  CLUSTER_HINT 
)
Value:
( \
(CLUSTER_COUNT == 1) && \
FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \
(CLUSTER_HINT) : \
RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
(CLUSTER_COUNT), \
(CLUSTER_HINT) - 2) + 2 \
)
Definition: cdstruc.h:504
#define FatIsClusterFree(IRPCONTEXT, VCB, FAT_INDEX)
Definition: allocsup.c:127
struct _VCB VCB

Definition at line 229 of file allocsup.c.

◆ FatFreeClusters

#define FatFreeClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
if ((CLUSTER_COUNT) == 1) { \
FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \
} else { \
FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \
} \
}
Definition: cdstruc.h:504
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255

Definition at line 140 of file allocsup.c.

◆ FatIsClusterFree

#define FatIsClusterFree (   IRPCONTEXT,
  VCB,
  FAT_INDEX 
)    (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)

Definition at line 127 of file allocsup.c.

◆ FatLockFreeClusterBitMap

#define FatLockFreeClusterBitMap (   VCB)
Value:
{ \
NT_ASSERT(KeAreApcsDisabled()); \
ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
ASSERT_CURRENT_WINDOW_GOOD(VCB) \
}
Definition: cdstruc.h:504
BOOLEAN NTAPI KeAreApcsDisabled(VOID)
Definition: apc.c:958

Definition at line 99 of file allocsup.c.

◆ FatMin

#define FatMin (   a,
  b 
)    ((a) < (b) ? (a) : (b))

Definition at line 30 of file allocsup.c.

◆ FatReserveClusters

#define FatReserveClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \
NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
NT_ASSERT( (FAT_INDEX) >= 2); \
RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \
_AfterRun = 2; \
} \
if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \
(VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \
if (1 == (VCB)->ClusterHint) { \
(VCB)->ClusterHint = 2; \
} \
} \
else { \
(VCB)->ClusterHint = _AfterRun; \
} \
}
Definition: cdstruc.h:504
#define RtlCheckBit(BMH, BP)
Definition: rtlfuncs.h:3154
struct _VCB VCB
NTSYSAPI ULONG WINAPI RtlFindClearBits(PCRTL_BITMAP, ULONG, ULONG)
if(!(yy_init))
Definition: macro.lex.yy.c:714

Definition at line 197 of file allocsup.c.

◆ FatUnlockFreeClusterBitMap

#define FatUnlockFreeClusterBitMap (   VCB)
Value:
{ \
ASSERT_CURRENT_WINDOW_GOOD(VCB) \
NT_ASSERT(KeAreApcsDisabled()); \
ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
}
Definition: cdstruc.h:504
BOOLEAN NTAPI KeAreApcsDisabled(VOID)
Definition: apc.c:958

Definition at line 112 of file allocsup.c.

◆ FatUnreserveClusters

#define FatUnreserveClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
NT_ASSERT( (FAT_INDEX) >= 2); \
RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
if ((FAT_INDEX) < (VCB)->ClusterHint) { \
(VCB)->ClusterHint = (FAT_INDEX); \
} \
}
Definition: cdstruc.h:504
struct _VCB VCB

Definition at line 176 of file allocsup.c.

◆ FatWindowOfCluster

#define FatWindowOfCluster (   C)    (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)

Definition at line 253 of file allocsup.c.

◆ MAX_CLUSTER_BITMAP_SIZE

#define MAX_CLUSTER_BITMAP_SIZE   (1 << 16)

Definition at line 247 of file allocsup.c.

◆ MAX_ZERO_MDL_SIZE

#define MAX_ZERO_MDL_SIZE   (1*1024*1024)

Definition at line 2600 of file allocsup.c.

◆ MAXCOUNTCLUS

#define MAXCOUNTCLUS   0x10000

Function Documentation

◆ _Requires_lock_held_()

_Requires_lock_held_ ( _Global_critical_region_  )

Definition at line 615 of file allocsup.c.

658 {
659  VBO CurrentVbo;
660  LBO CurrentLbo;
661  LBO PriorLbo;
662 
663  VBO FirstVboOfCurrentRun = 0;
664  LBO FirstLboOfCurrentRun;
665 
666  BOOLEAN LastCluster;
667  ULONG Runs;
668 
669  PVCB Vcb;
671  ULONG BytesPerCluster;
672  ULARGE_INTEGER BytesOnVolume;
673 
675 
676  PAGED_CODE();
677 
678  Vcb = FcbOrDcb->Vcb;
679 
680 
681  DebugTrace(+1, Dbg, "FatLookupFileAllocation\n", 0);
682  DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
683  DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo);
684  DebugTrace( 0, Dbg, " pLbo = %8lx\n", Lbo);
685  DebugTrace( 0, Dbg, " pByteCount = %8lx\n", ByteCount);
686  DebugTrace( 0, Dbg, " pAllocated = %8lx\n", Allocated);
687 
688  Context.Bcb = NULL;
689 
690  *EndOnMax = FALSE;
691 
692  //
693  // Check the trivial case that the mapping is already in our
694  // Mcb.
695  //
696 
698 
699  *Allocated = TRUE;
700 
701  NT_ASSERT( *ByteCount != 0 );
702 
703  //
704  // Detect the overflow case, trim and claim the condition.
705  //
706 
707  if (Vbo + *ByteCount == 0) {
708 
709  *EndOnMax = TRUE;
710  }
711 
712  DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0);
713  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
714  return;
715  }
716 
717  //
718  // Initialize the Vcb, the cluster size, LastCluster, and
719  // FirstLboOfCurrentRun (to be used as an indication of the first
720  // iteration through the following while loop).
721  //
722 
723  BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
724 
725  BytesOnVolume.QuadPart = UInt32x32To64( Vcb->AllocationSupport.NumberOfClusters, BytesPerCluster );
726 
727  LastCluster = FALSE;
728  FirstLboOfCurrentRun = 0;
729 
730  //
731  // Discard the case that the request extends beyond the end of
732  // allocation. Note that if the allocation size if not known
733  // AllocationSize is set to 0xffffffff.
734  //
735 
736  if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) {
737 
738  *Allocated = FALSE;
739 
740  DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0);
741  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
742  return;
743  }
744 
745  //
746  // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
747  // and FatEntry to describe the beginning of the last entry in the Mcb.
748  // This is used as initialization for the following loop.
749  //
750  // If the Mcb was empty, we start at the beginning of the file with
751  // CurrentVbo set to 0 to indicate a new run.
752  //
753 
754  if (FatLookupLastMcbEntry( Vcb, &FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo, &Runs )) {
755 
756  DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1);
757 
758  CurrentVbo -= (BytesPerCluster - 1);
759  CurrentLbo -= (BytesPerCluster - 1);
760 
761  //
762  // Convert an index to a count.
763  //
764 
765  Runs += 1;
766 
767  } else {
768 
769  DebugTrace( 0, Dbg, "Mcb empty.\n", 0);
770 
771  //
772  // Check for an FcbOrDcb that has no allocation
773  //
774 
775  if (FcbOrDcb->FirstClusterOfFile == 0) {
776 
777  *Allocated = FALSE;
778 
779  DebugTrace( 0, Dbg, "File has no allocation.\n", 0);
780  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
781  return;
782 
783  } else {
784 
785  CurrentVbo = 0;
787  FirstVboOfCurrentRun = CurrentVbo;
788  FirstLboOfCurrentRun = CurrentLbo;
789 
790  Runs = 0;
791 
792  DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo);
793  }
794  }
795 
796  //
797  // Now we know that we are looking up a valid Vbo, but it is
798  // not in the Mcb, which is a monotonically increasing list of
799  // Vbo's. Thus we have to go to the Fat, and update
800  // the Mcb as we go. We use a try-finally to unpin the page
801  // of fat hanging around. Also we mark *Allocated = FALSE, so that
802  // the caller wont try to use the data if we hit an exception.
803  //
804 
805  *Allocated = FALSE;
806 
807  _SEH2_TRY {
808 
809  FatEntry = (FAT_ENTRY)FatGetIndexFromLbo( Vcb, CurrentLbo );
810 
811  //
812  // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
813  // The assumption here, is that only whole clusters of Vbos and Lbos
814  // are mapped in the Mcb.
815  //
816 
817  NT_ASSERT( ((CurrentLbo - Vcb->AllocationSupport.FileAreaLbo)
818  % BytesPerCluster == 0) &&
819  (CurrentVbo % BytesPerCluster == 0) );
820 
821  //
822  // Starting from the first Vbo after the last Mcb entry, scan through
823  // the Fat looking for our Vbo. We continue through the Fat until we
824  // hit a noncontiguity beyond the desired Vbo, or the last cluster.
825  //
826 
827  while ( !LastCluster ) {
828 
829  //
830  // Get the next fat entry, and update our Current variables.
831  //
832 
833  FatLookupFatEntry( IrpContext, Vcb, FatEntry, (PULONG)&FatEntry, &Context );
834 
835  PriorLbo = CurrentLbo;
836  CurrentLbo = FatGetLboFromIndex( Vcb, FatEntry );
837  CurrentVbo += BytesPerCluster;
838 
839  switch ( FatInterpretClusterType( Vcb, FatEntry )) {
840 
841  //
842  // Check for a break in the Fat allocation chain.
843  //
844 
845  case FatClusterAvailable:
846  case FatClusterReserved:
847  case FatClusterBad:
848 
849  DebugTrace( 0, Dbg, "Break in allocation chain, entry = %d\n", FatEntry);
850  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0);
851 
852  FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
854  break;
855 
856  //
857  // If this is the last cluster, we must update the Mcb and
858  // exit the loop.
859  //
860 
861  case FatClusterLast:
862 
863  //
864  // Assert we know where the current run started. If the
865  // Mcb was empty when we were called, thenFirstLboOfCurrentRun
866  // was set to the start of the file. If the Mcb contained an
867  // entry, then FirstLboOfCurrentRun was set on the first
868  // iteration through the loop. Thus if FirstLboOfCurrentRun
869  // is 0, then there was an Mcb entry and we are on our first
870  // iteration, meaing that the last cluster in the Mcb was
871  // really the last allocated cluster, but we checked Vbo
872  // against AllocationSize, and found it OK, thus AllocationSize
873  // must be too large.
874  //
875  // Note that, when we finally arrive here, CurrentVbo is actually
876  // the first Vbo beyond the file allocation and CurrentLbo is
877  // meaningless.
878  //
879 
880  DebugTrace( 0, Dbg, "Read last cluster of file.\n", 0);
881 
882  //
883  // Detect the case of the maximal file. Note that this really isn't
884  // a proper Vbo - those are zero-based, and this is a one-based number.
885  // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of
886  // 2^32 - 2.
887  //
888  // Just so we don't get confused here.
889  //
890 
891  if (CurrentVbo == 0) {
892 
893  *EndOnMax = TRUE;
894  CurrentVbo -= 1;
895  }
896 
897  LastCluster = TRUE;
898 
899  if (FirstLboOfCurrentRun != 0 ) {
900 
901  DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
902  DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
903  DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
904  DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
905 
907  &FcbOrDcb->Mcb,
908  FirstVboOfCurrentRun,
909  FirstLboOfCurrentRun,
910  CurrentVbo - FirstVboOfCurrentRun );
911 
912  Runs += 1;
913  }
914 
915  //
916  // Being at the end of allocation, make sure we have found
917  // the Vbo. If we haven't, seeing as we checked VBO
918  // against AllocationSize, the real disk allocation is less
919  // than that of AllocationSize. This comes about when the
920  // real allocation is not yet known, and AllocaitonSize
921  // contains MAXULONG.
922  //
923  // KLUDGE! - If we were called by FatLookupFileAllocationSize
924  // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup
925  // hint. Thus we merrily go along looking for a match that isn't
926  // there, but in the meantime building an Mcb. If this is
927  // the case, fill in AllocationSize and return.
928  //
929 
930  if ( Vbo == MAXULONG - 1 ) {
931 
932  *Allocated = FALSE;
933 
934  FcbOrDcb->Header.AllocationSize.QuadPart = CurrentVbo;
935 
936  DebugTrace( 0, Dbg, "New file allocation size = %08lx.\n", CurrentVbo);
937  try_return ( NOTHING );
938  }
939 
940  //
941  // We will lie ever so slightly if we really terminated on the
942  // maximal byte of a file. It is really allocated.
943  //
944 
945  if (Vbo >= CurrentVbo && !*EndOnMax) {
946 
947  *Allocated = FALSE;
948  try_return ( NOTHING );
949  }
950 
951  break;
952 
953  //
954  // This is a continuation in the chain. If the run has a
955  // discontiguity at this point, update the Mcb, and if we are beyond
956  // the desired Vbo, this is the end of the run, so set LastCluster
957  // and exit the loop.
958  //
959 
960  case FatClusterNext:
961 
962  //
963  // This is the loop check. The Vbo must not be bigger than the size of
964  // the volume, and the Vbo must not have a) wrapped and b) not been at the
965  // very last cluster in the chain, for the case of the maximal file.
966  //
967 
968  if ( CurrentVbo == 0 ||
969  (BytesOnVolume.HighPart == 0 && CurrentVbo > BytesOnVolume.LowPart)) {
970 
971  FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
973  }
974 
975  if ( PriorLbo + BytesPerCluster != CurrentLbo ) {
976 
977  //
978  // Note that on the first time through the loop
979  // (FirstLboOfCurrentRun == 0), we don't add the
980  // run to the Mcb since it curresponds to the last
981  // run already stored in the Mcb.
982  //
983 
984  if ( FirstLboOfCurrentRun != 0 ) {
985 
986  DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
987  DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
988  DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
989  DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
990 
992  &FcbOrDcb->Mcb,
993  FirstVboOfCurrentRun,
994  FirstLboOfCurrentRun,
995  CurrentVbo - FirstVboOfCurrentRun );
996 
997  Runs += 1;
998  }
999 
1000  //
1001  // Since we are at a run boundry, with CurrentLbo and
1002  // CurrentVbo being the first cluster of the next run,
1003  // we see if the run we just added encompases the desired
1004  // Vbo, and if so exit. Otherwise we set up two new
1005  // First*boOfCurrentRun, and continue.
1006  //
1007 
1008  if (CurrentVbo > Vbo) {
1009 
1010  LastCluster = TRUE;
1011 
1012  } else {
1013 
1014  FirstVboOfCurrentRun = CurrentVbo;
1015  FirstLboOfCurrentRun = CurrentLbo;
1016  }
1017  }
1018  break;
1019 
1020  default:
1021 
1022  DebugTrace(0, Dbg, "Illegal Cluster Type.\n", FatEntry);
1023 
1024 #ifdef _MSC_VER
1025 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
1026 #endif
1027  FatBugCheck( 0, 0, 0 );
1028 
1029  break;
1030 
1031  } // switch()
1032  } // while()
1033 
1034  //
1035  // Load up the return parameters.
1036  //
1037  // On exit from the loop, Vbo still contains the desired Vbo, and
1038  // CurrentVbo is the first byte after the run that contained the
1039  // desired Vbo.
1040  //
1041 
1042  *Allocated = TRUE;
1043 
1044  *Lbo = FirstLboOfCurrentRun + (Vbo - FirstVboOfCurrentRun);
1045 
1046  *ByteCount = CurrentVbo - Vbo;
1047 
1048  if (ARGUMENT_PRESENT(Index)) {
1049 
1050  //
1051  // Note that Runs only needs to be accurate with respect to where we
1052  // ended. Since partial-lookup cases will occur without exclusive
1053  // synchronization, the Mcb itself may be much bigger by now.
1054  //
1055 
1056  *Index = Runs - 1;
1057  }
1058 
1059  try_exit: NOTHING;
1060 
1061  } _SEH2_FINALLY {
1062 
1063  DebugUnwind( FatLookupFileAllocation );
1064 
1065  //
1066  // We are done reading the Fat, so unpin the last page of fat
1067  // that is hanging around
1068  //
1069 
1070  FatUnpinBcb( IrpContext, Context.Bcb );
1071 
1072  DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
1073  } _SEH2_END;
1074 
1075  return;
1076 }
CLUSTER_TYPE FatInterpretClusterType(IN PVCB Vcb, IN FAT_ENTRY Entry)
Definition: allocsup.c:3475
#define TRUE
Definition: types.h:120
#define FatGetLboFromIndex(VCB, FAT_INDEX)
Definition: fat.h:559
FSRTL_ADVANCED_FCB_HEADER Header
Definition: cdstruc.h:931
ULONG32 VBO
Definition: fat.h:38
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
IN PFCB IN VBO OUT PLBO Lbo
Definition: fatprocs.h:297
$ULONG LowPart
Definition: ntbasedef.h:576
Definition: cdstruc.h:504
BOOLEAN FatLookupLastMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, OUT PVBO Vbo, OUT PLBO Lbo, OUT PULONG Index OPTIONAL)
Definition: fsctrl.c:494
#define FatRaiseStatus(IRPCONTEXT, STATUS)
Definition: fatprocs.h:2965
#define PAGED_CODE()
Definition: video.h:57
_SEH2_TRY
Definition: create.c:4250
IN PFCB FcbOrDcb
Definition: fatprocs.h:297
IN PFCB IN VBO OUT PLBO OUT PULONG OUT PBOOLEAN OUT PBOOLEAN EndOnMax
Definition: fatprocs.h:297
#define STATUS_FILE_CORRUPT_ERROR
Definition: udferr_usr.h:168
ULONGLONG QuadPart
Definition: ms-dtyp.idl:185
BOOLEAN FatLookupMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, OUT PLBO Lbo, OUT PULONG ByteCount OPTIONAL, OUT PULONG Index OPTIONAL)
Definition: fsctrl.c:418
LONGLONG LBO
Definition: fat.h:34
#define FatBugCheck(A, B, C)
Definition: nodetype.h:104
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:537
unsigned char BOOLEAN
IN PFCB IN VBO OUT PLBO OUT PULONG OUT PBOOLEAN Allocated
Definition: fatprocs.h:297
smooth NULL
Definition: ftsmooth.c:416
#define FatGetIndexFromLbo(VCB, LBO)
Definition: fat.h:566
#define DebugUnwind(X)
Definition: fatdata.h:315
$ULONG HighPart
Definition: ntbasedef.h:577
#define Dbg
Definition: allocsup.c:28
#define try_return(S)
Definition: cdprocs.h:2189
#define ARGUMENT_PRESENT(ArgumentPointer)
#define Vcb
Definition: cdprocs.h:1425
static const UCHAR Index[8]
Definition: usbohci.c:18
CD_MCB Mcb
Definition: cdstruc.h:1022
Definition: fsck.fat.h:192
#define VOID
Definition: acefi.h:82
#define NOTHING
Definition: env_spec_w32.h:461
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _In_ LARGE_INTEGER ByteCount
Definition: iotypes.h:1061
ULONG Runs
Definition: symtest.c:7
BOOLEAN FatAddMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN LBO Lbo, IN ULONG SectorCount)
Definition: fsctrl.c:364
#define MAXULONG
Definition: typedefs.h:250
_SEH2_END
Definition: create.c:4424
_SEH2_FINALLY
Definition: create.c:4395
unsigned int * PULONG
Definition: retypes.h:1
ULONG FirstClusterOfFile
Definition: fatstruc.h:817
ULONG32 FAT_ENTRY
Definition: fat.h:225
struct tagContext Context
Definition: acpixf.h:1034
unsigned int ULONG
Definition: retypes.h:1
PVCB Vcb
Definition: cdstruc.h:939
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:373
#define UInt32x32To64(a, b)
Definition: intsafe.h:258
IN PFCB IN VBO Vbo
Definition: fatprocs.h:297
VOID FatLookupFatEntry(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG FatIndex, IN OUT PULONG FatEntry, IN OUT PFAT_ENUMERATION_CONTEXT Context)
Definition: allocsup.c:3567
#define NT_ASSERT
Definition: rtlfuncs.h:3312

◆ FatExamineFatEntries()

VOID FatExamineFatEntries ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb,
IN ULONG StartIndex  OPTIONAL,
IN ULONG EndIndex  OPTIONAL,
IN BOOLEAN  SetupWindows,
IN PFAT_WINDOW SwitchToWindow  OPTIONAL,
IN PULONG BitMapBuffer  OPTIONAL 
)

Definition at line 4722 of file allocsup.c.

4772 {
4774  ULONG Page = 0;
4775  ULONG Offset = 0;
4776  ULONG FatIndex;
4778  FAT_ENTRY FirstFatEntry = FAT_CLUSTER_AVAILABLE;
4779  PUSHORT FatBuffer;
4780  PVOID pv;
4781  PBCB Bcb = NULL;
4782  ULONG EntriesPerWindow;
4783 
4784  ULONG ClustersThisRun;
4785  ULONG StartIndexOfThisRun;
4786 
4787  PULONG FreeClusterCount = NULL;
4788 
4789  PFAT_WINDOW CurrentWindow = NULL;
4790 
4791  PVOID NewBitMapBuffer = NULL;
4792  PRTL_BITMAP BitMap = NULL;
4793  RTL_BITMAP PrivateBitMap;
4794 
4795  ULONG ClusterSize = 0;
4796  ULONG PrefetchPages = 0;
4797  ULONG FatPages = 0;
4798 
4799  VBO BadClusterVbo = 0;
4800  LBO Lbo = 0;
4801 
4802  enum RunType {
4803  FreeClusters,
4804  AllocatedClusters,
4805  UnknownClusters
4806  } CurrentRun;
4807 
4808  PAGED_CODE();
4809 
4810  //
4811  // Now assert correct usage.
4812  //
4813 
4814  FatIndexBitSize = Vcb->AllocationSupport.FatIndexBitSize;
4815 
4816  NT_ASSERT( !(SetupWindows && (SwitchToWindow || BitMapBuffer)));
4817  NT_ASSERT( !(SetupWindows && FatIndexBitSize != 32));
4818 
4819  if (Vcb->NumberOfWindows > 1) {
4820 
4821  //
4822  // FAT32: Calculate the number of FAT entries covered by a window. This is
4823  // equal to the number of bits in the freespace bitmap, the size of which
4824  // is hardcoded.
4825  //
4826 
4827  EntriesPerWindow = MAX_CLUSTER_BITMAP_SIZE;
4828 
4829  } else {
4830 
4831  EntriesPerWindow = Vcb->AllocationSupport.NumberOfClusters;
4832  }
4833 
4834  //
4835  // We will also fill in the cumulative count of free clusters for
4836  // the entire volume. If this is not appropriate, NULL it out
4837  // shortly.
4838  //
4839 
4840  FreeClusterCount = &Vcb->AllocationSupport.NumberOfFreeClusters;
4841 
4842  if (SetupWindows) {
4843 
4844  NT_ASSERT(BitMapBuffer == NULL);
4845 
4846  //
4847  // In this case we're just supposed to scan the fat and set up
4848  // the information regarding where the buckets fall and how many
4849  // free clusters are in each.
4850  //
4851  // It is fine to monkey with the real windows, we must be able
4852  // to do this to activate the volume.
4853  //
4854 
4855  BitMap = NULL;
4856 
4857  CurrentWindow = &Vcb->Windows[0];
4858  CurrentWindow->FirstCluster = StartIndex;
4859  CurrentWindow->ClustersFree = 0;
4860 
4861  //
4862  // We always wish to calculate total free clusters when
4863  // setting up the FAT windows.
4864  //
4865 
4866  } else if (BitMapBuffer == NULL) {
4867 
4868  //
4869  // We will be filling in the free cluster bitmap for the volume.
4870  // Careful, we can raise out of here and be hopelessly hosed if
4871  // we built this up in the main bitmap/window itself.
4872  //
4873  // For simplicity's sake, we'll do the swap for everyone. FAT32
4874  // provokes the need since we can't tolerate partial results
4875  // when switching windows.
4876  //
4877 
4878  NT_ASSERT( SwitchToWindow );
4879 
4880  CurrentWindow = SwitchToWindow;
4881  StartIndex = CurrentWindow->FirstCluster;
4882  EndIndex = CurrentWindow->LastCluster;
4883 
4884  BitMap = &PrivateBitMap;
4885  NewBitMapBuffer = FsRtlAllocatePoolWithTag( PagedPool,
4886  (EntriesPerWindow + 7) / 8,
4887  TAG_FAT_BITMAP );
4888 
4889  RtlInitializeBitMap( &PrivateBitMap,
4890  NewBitMapBuffer,
4891  EndIndex - StartIndex + 1);
4892 
4893  if ((FatIndexBitSize == 32) &&
4894  (Vcb->NumberOfWindows > 1)) {
4895 
4896  //
4897  // We do not wish count total clusters here.
4898  //
4899 
4900  FreeClusterCount = NULL;
4901 
4902  }
4903 
4904  } else {
4905 
4906  BitMap = &PrivateBitMap;
4907  RtlInitializeBitMap(&PrivateBitMap,
4908  BitMapBuffer,
4909  EndIndex - StartIndex + 1);
4910 
4911  //
4912  // We do not count total clusters here.
4913  //
4914 
4915  FreeClusterCount = NULL;
4916  }
4917 
4918  //
4919  // Now, our start index better be in the file heap.
4920  //
4921 
4922  NT_ASSERT( StartIndex >= 2 );
4923 
4924  _SEH2_TRY {
4925 
4926  //
4927  // Pick up the initial chunk of the FAT and first entry.
4928  //
4929 
4930  if (FatIndexBitSize == 12) {
4931 
4932  //
4933  // We read in the entire fat in the 12 bit case.
4934  //
4935 
4936  FatReadVolumeFile( IrpContext,
4937  Vcb,
4938  FatReservedBytes( &Vcb->Bpb ),
4939  FatBytesPerFat( &Vcb->Bpb ),
4940  &Bcb,
4941  (PVOID *)&FatBuffer );
4942 
4943  FatLookup12BitEntry(FatBuffer, 0, &FirstFatEntry);
4944 
4945  } else {
4946 
4947  //
4948  // Read in one page of fat at a time. We cannot read in the
4949  // all of the fat we need because of cache manager limitations.
4950  //
4951 
4952  ULONG BytesPerEntry = FatIndexBitSize >> 3;
4953 
4954  FatPages = (FatReservedBytes(&Vcb->Bpb) + FatBytesPerFat(&Vcb->Bpb) + (PAGE_SIZE - 1)) / PAGE_SIZE;
4955  Page = (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) / PAGE_SIZE;
4956 
4957  Offset = Page * PAGE_SIZE;
4958 
4959  //
4960  // Prefetch the FAT entries in memory for optimal performance.
4961  //
4962 
4963  PrefetchPages = FatPages - Page;
4964 
4965  if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
4966 
4967  PrefetchPages = ALIGN_UP_BY(Page, FAT_PREFETCH_PAGE_COUNT) - Page;
4968  }
4969 
4970 #if (NTDDI_VERSION >= NTDDI_WIN8)
4971  FatPrefetchPages( IrpContext,
4972  Vcb->VirtualVolumeFile,
4973  Page,
4974  PrefetchPages );
4975 #endif
4976 
4977  FatReadVolumeFile( IrpContext,
4978  Vcb,
4979  Offset,
4980  PAGE_SIZE,
4981  &Bcb,
4982  &pv);
4983 
4984  if (FatIndexBitSize == 32) {
4985 
4986  FatBuffer = (PUSHORT)((PUCHAR)pv +
4987  (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) %
4988  PAGE_SIZE);
4989 
4990  FirstFatEntry = *((PULONG)FatBuffer);
4991  FirstFatEntry = FirstFatEntry & FAT32_ENTRY_MASK;
4992 
4993  } else {
4994 
4995  FatBuffer = (PUSHORT)((PUCHAR)pv +
4996  FatReservedBytes(&Vcb->Bpb) % PAGE_SIZE) + 2;
4997 
4998  FirstFatEntry = *FatBuffer;
4999  }
5000 
5001  }
5002 
5003  ClusterSize = 1 << (Vcb->AllocationSupport.LogOfBytesPerCluster);
5004 
5005  CurrentRun = (FirstFatEntry == FAT_CLUSTER_AVAILABLE) ?
5006  FreeClusters : AllocatedClusters;
5007 
5008  StartIndexOfThisRun = StartIndex;
5009 
5010  for (FatIndex = StartIndex; FatIndex <= EndIndex; FatIndex++) {
5011 
5012  if (FatIndexBitSize == 12) {
5013 
5014  FatLookup12BitEntry(FatBuffer, FatIndex, &FatEntry);
5015 
5016  } else {
5017 
5018  //
5019  // If we are setting up the FAT32 windows and have stepped into a new
5020  // bucket, finalize this one and move forward.
5021  //
5022 
5023  if (SetupWindows &&
5024  FatIndex > StartIndex &&
5025  (FatIndex - 2) % EntriesPerWindow == 0) {
5026 
5027  CurrentWindow->LastCluster = FatIndex - 1;
5028 
5029  if (CurrentRun == FreeClusters) {
5030 
5031  //
5032  // We must be counting clusters in order to modify the
5033  // contents of the window.
5034  //
5035 
5036  NT_ASSERT( FreeClusterCount );
5037 
5038  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5039  CurrentWindow->ClustersFree += ClustersThisRun;
5040 
5041  if (FreeClusterCount) {
5042  *FreeClusterCount += ClustersThisRun;
5043  }
5044 
5045  } else {
5046 
5047  NT_ASSERT(CurrentRun == AllocatedClusters);
5048 
5049  }
5050 
5051  StartIndexOfThisRun = FatIndex;
5052  CurrentRun = UnknownClusters;
5053 
5054  CurrentWindow++;
5055  CurrentWindow->ClustersFree = 0;
5056  CurrentWindow->FirstCluster = FatIndex;
5057  }
5058 
5059  //
5060  // If we just stepped onto a new page, grab a new pointer.
5061  //
5062 
5063  if (((ULONG_PTR)FatBuffer & (PAGE_SIZE - 1)) == 0) {
5064 
5065  FatUnpinBcb( IrpContext, Bcb );
5066 
5067  Page++;
5068  Offset += PAGE_SIZE;
5069 
5070 #if (NTDDI_VERSION >= NTDDI_WIN8)
5071  //
5072  // If we have exhausted all the prefetch pages, prefetch the next chunk.
5073  //
5074 
5075  if (--PrefetchPages == 0) {
5076 
5077  PrefetchPages = FatPages - Page;
5078 
5079  if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
5080 
5081  PrefetchPages = FAT_PREFETCH_PAGE_COUNT;
5082  }
5083 
5084  FatPrefetchPages( IrpContext,
5085  Vcb->VirtualVolumeFile,
5086  Page,
5087  PrefetchPages );
5088  }
5089 #endif
5090 
5091  FatReadVolumeFile( IrpContext,
5092  Vcb,
5093  Offset,
5094  PAGE_SIZE,
5095  &Bcb,
5096  &pv );
5097 
5098  FatBuffer = (PUSHORT)pv;
5099  }
5100 
5101  if (FatIndexBitSize == 32) {
5102 
5103 #ifndef __REACTOS__
5104 #ifdef _MSC_VER
5105 #pragma warning( suppress: 4213 )
5106 #endif
5107  FatEntry = *((PULONG)FatBuffer)++;
5109 #else
5110  FatEntry = *FatBuffer;
5111  FatBuffer += 1;
5113 #endif
5114 
5115  } else {
5116 
5117  FatEntry = *FatBuffer;
5118  FatBuffer += 1;
5119  }
5120  }
5121 
5122  if (CurrentRun == UnknownClusters) {
5123 
5124  CurrentRun = (FatEntry == FAT_CLUSTER_AVAILABLE) ?
5125  FreeClusters : AllocatedClusters;
5126  }
5127 
5128  //
5129  // Are we switching from a free run to an allocated run?
5130  //
5131 
5132  if (CurrentRun == FreeClusters &&
5134 
5135  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5136 
5137  if (FreeClusterCount) {
5138 
5139  *FreeClusterCount += ClustersThisRun;
5140  CurrentWindow->ClustersFree += ClustersThisRun;
5141  }
5142 
5143  if (BitMap) {
5144 
5145  RtlClearBits( BitMap,
5146  StartIndexOfThisRun - StartIndex,
5147  ClustersThisRun );
5148  }
5149 
5150  CurrentRun = AllocatedClusters;
5151  StartIndexOfThisRun = FatIndex;
5152  }
5153 
5154  //
5155  // Are we switching from an allocated run to a free run?
5156  //
5157 
5158  if (CurrentRun == AllocatedClusters &&
5160 
5161  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5162 
5163  if (BitMap) {
5164 
5165  RtlSetBits( BitMap,
5166  StartIndexOfThisRun - StartIndex,
5167  ClustersThisRun );
5168  }
5169 
5170  CurrentRun = FreeClusters;
5171  StartIndexOfThisRun = FatIndex;
5172  }
5173 
5174  //
5175  // If the entry is marked bad, add it to the bad block MCB
5176  //
5177 
5178  if ((SetupWindows || (Vcb->NumberOfWindows == 1)) &&
5180 
5181  //
5182  // This cluster is marked bad.
5183  // Add it to the BadBlockMcb.
5184  //
5185 
5187  FatAddMcbEntry( Vcb, &Vcb->BadBlockMcb, BadClusterVbo, Lbo, ClusterSize );
5188  BadClusterVbo += ClusterSize;
5189  }
5190  }
5191 
5192  //
5193  // If we finished the scan, then we know about all the possible bad clusters.
5194  //
5195 
5197 
5198  //
5199  // Now we have to record the final run we encountered
5200  //
5201 
5202  ClustersThisRun = FatIndex - StartIndexOfThisRun;
5203 
5204  if (CurrentRun == FreeClusters) {
5205 
5206  if (FreeClusterCount) {
5207 
5208  *FreeClusterCount += ClustersThisRun;
5209  CurrentWindow->ClustersFree += ClustersThisRun;
5210  }
5211 
5212  if (BitMap) {
5213 
5214  RtlClearBits( BitMap,
5215  StartIndexOfThisRun - StartIndex,
5216  ClustersThisRun );
5217  }
5218 
5219  } else {
5220 
5221  if (BitMap) {
5222 
5223  RtlSetBits( BitMap,
5224  StartIndexOfThisRun - StartIndex,
5225  ClustersThisRun );
5226  }
5227  }
5228 
5229  //
5230  // And finish the last window if we are in setup.
5231  //
5232 
5233  if (SetupWindows) {
5234 
5235  CurrentWindow->LastCluster = FatIndex - 1;
5236  }
5237 
5238  //
5239  // Now switch the active window if required. We've succesfully gotten everything
5240  // nailed down.
5241  //
5242  // If we were tracking the free cluster count, this means we should update the
5243  // window. This is the case of FAT12/16 initialization.
5244  //
5245 
5246  if (SwitchToWindow) {
5247 
5248  if (Vcb->FreeClusterBitMap.Buffer) {
5249 
5250  ExFreePool( Vcb->FreeClusterBitMap.Buffer );
5251  }
5252 
5253  RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
5254  NewBitMapBuffer,
5255  EndIndex - StartIndex + 1 );
5256 
5257  NewBitMapBuffer = NULL;
5258 
5259  Vcb->CurrentWindow = SwitchToWindow;
5260  Vcb->ClusterHint = (ULONG)-1;
5261 
5262  if (FreeClusterCount) {
5263 
5264  NT_ASSERT( !SetupWindows );
5265 
5266  Vcb->CurrentWindow->ClustersFree = *FreeClusterCount;
5267  }
5268  }
5269 
5270  //
5271  // Make sure plausible things occured ...
5272  //
5273 
5274  if (!SetupWindows && BitMapBuffer == NULL) {
5275 
5277  }
5278 
5279  NT_ASSERT(Vcb->AllocationSupport.NumberOfFreeClusters <= Vcb->AllocationSupport.NumberOfClusters);
5280 
5281  } _SEH2_FINALLY {
5282 
5283  //
5284  // Unpin the last bcb and drop the temporary bitmap buffer if it exists.
5285  //
5286 
5287  FatUnpinBcb( IrpContext, Bcb);
5288 
5289  if (NewBitMapBuffer) {
5290 
5291  ExFreePool( NewBitMapBuffer );
5292  }
5293  } _SEH2_END;
5294 }
CLUSTER_TYPE FatInterpretClusterType(IN PVCB Vcb, IN FAT_ENTRY Entry)
Definition: allocsup.c:3475
#define MAX_CLUSTER_BITMAP_SIZE
Definition: allocsup.c:247
#define FatGetLboFromIndex(VCB, FAT_INDEX)
Definition: fat.h:559
ULONG ClustersFree
Definition: fatstruc.h:174
IN PVCB IN VBO IN ULONG OUT PBCB * Bcb
Definition: fatprocs.h:402
#define ASSERT_CURRENT_WINDOW_GOOD(VCB)
Definition: allocsup.c:83
PVOID NTAPI FsRtlAllocatePoolWithTag(IN POOL_TYPE PoolType, IN ULONG NumberOfBytes, IN ULONG Tag)
Definition: filter.c:229
#define FatIndexBitSize(B)
Definition: fat.h:515
NTSTATUS FatPrefetchPages(IN PIRP_CONTEXT IrpContext, IN PFILE_OBJECT FileObject, IN ULONG StartingPage, IN ULONG PageCount)
Definition: cachesup.c:1929
NTSTATUS FreeClusters(PNTFS_VCB Vcb, PNTFS_ATTR_CONTEXT AttrContext, ULONG AttrOffset, PFILE_RECORD_HEADER FileRecord, ULONG ClustersToFree)
Definition: attrib.c:1057
ULONG32 VBO
Definition: fat.h:38
unsigned char * PUCHAR
Definition: retypes.h:3
NTSYSAPI void WINAPI RtlInitializeBitMap(PRTL_BITMAP, PULONG, ULONG)
IN PFCB IN VBO OUT PLBO Lbo
Definition: fatprocs.h:297
VOID FatReadVolumeFile(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN VBO StartingVbo, IN ULONG ByteCount, OUT PBCB *Bcb, OUT PVOID *Buffer)
Definition: cachesup.c:102
ULONG FirstCluster
Definition: fatstruc.h:172
#define PAGED_CODE()
Definition: video.h:57
_SEH2_TRY
Definition: create.c:4250
uint32_t ULONG_PTR
Definition: typedefs.h:64
#define FatReservedBytes(B)
Definition: fat.h:414
_In_ ULONG _In_ ULONG Offset
Definition: ntddpcm.h:101
#define VCB_STATE_FLAG_BAD_BLOCKS_POPULATED
Definition: fatstruc.h:575
LONGLONG LBO
Definition: fat.h:34
#define FatBytesPerFat(B)
Definition: fat.h:410
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:537
smooth NULL
Definition: ftsmooth.c:416
#define FatLookup12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:584
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255
#define Vcb
Definition: cdprocs.h:1425
DWORD ClusterSize
Definition: format.c:67
ULONG LastCluster
Definition: fatstruc.h:173
Definition: fsck.fat.h:192
IN PVCB IN ULONG FatIndex
Definition: fatprocs.h:373
#define PAGE_SIZE
Definition: env_spec_w32.h:49
#define TAG_FAT_BITMAP
Definition: nodetype.h:163
BOOLEAN FatAddMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN LBO Lbo, IN ULONG SectorCount)
Definition: fsctrl.c:364
NTSYSAPI void WINAPI RtlClearBits(PRTL_BITMAP, ULONG, ULONG)
#define SetFlag(_F, _SF)
Definition: ext2fs.h:187
_SEH2_END
Definition: create.c:4424
NTSYSAPI void WINAPI RtlSetBits(PRTL_BITMAP, ULONG, ULONG)
_SEH2_FINALLY
Definition: create.c:4395
unsigned int * PULONG
Definition: retypes.h:1
unsigned int ULONG
Definition: retypes.h:1
#define ALIGN_UP_BY(size, align)
#define FAT32_ENTRY_MASK
Definition: fat.h:227
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:373
unsigned short * PUSHORT
Definition: retypes.h:2
#define ExFreePool(addr)
Definition: env_spec_w32.h:352
#define FAT_PREFETCH_PAGE_COUNT
Definition: allocsup.c:36
#define NT_ASSERT
Definition: rtlfuncs.h:3312

Referenced by FatSetupAllocationSupport().

◆ FatInterpretClusterType()

CLUSTER_TYPE FatInterpretClusterType ( IN PVCB  Vcb,
IN FAT_ENTRY  Entry 
)

Definition at line 3475 of file allocsup.c.

3501 {
3502  DebugTrace(+1, Dbg, "InterpretClusterType\n", 0);
3503  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3504  DebugTrace( 0, Dbg, " Entry = %8lx\n", Entry);
3505 
3506  PAGED_CODE();
3507 
3508  switch(Vcb->AllocationSupport.FatIndexBitSize ) {
3509  case 32:
3511  break;
3512 
3513  case 12:
3514  NT_ASSERT( Entry <= 0xfff );
3515  if (Entry >= 0x0ff0) {
3516  Entry |= 0x0FFFF000;
3517  }
3518  break;
3519 
3520  default:
3521  case 16:
3522  NT_ASSERT( Entry <= 0xffff );
3523  if (Entry >= 0x0fff0) {
3524  Entry |= 0x0FFF0000;
3525  }
3526  break;
3527  }
3528 
3529  if (Entry == FAT_CLUSTER_AVAILABLE) {
3530 
3531  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterAvailable\n", 0);
3532 
3533  return FatClusterAvailable;
3534 
3535  } else if (Entry < FAT_CLUSTER_RESERVED) {
3536 
3537  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterNext\n", 0);
3538 
3539  return FatClusterNext;
3540 
3541  } else if (Entry < FAT_CLUSTER_BAD) {
3542 
3543  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterReserved\n", 0);
3544 
3545  return FatClusterReserved;
3546 
3547  } else if (Entry == FAT_CLUSTER_BAD) {
3548 
3549  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterBad\n", 0);
3550 
3551  return FatClusterBad;
3552 
3553  } else {
3554 
3555  DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterLast\n", 0);
3556 
3557  return FatClusterLast;
3558  }
3559 }
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
#define FAT_CLUSTER_BAD
Definition: fat.h:257
#define PAGED_CODE()
Definition: video.h:57
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255
#define Dbg
Definition: allocsup.c:28
#define Vcb
Definition: cdprocs.h:1425
#define FAT_CLUSTER_RESERVED
Definition: fat.h:256
#define FAT32_ENTRY_MASK
Definition: fat.h:227
base of all file and directory entries
Definition: entries.h:82
#define NT_ASSERT
Definition: rtlfuncs.h:3312

Referenced by _Requires_lock_held_(), and FatExamineFatEntries().

◆ FatLogOf()

UCHAR FatLogOf ( IN ULONG  Value)

Definition at line 4657 of file allocsup.c.

4678 {
4679  UCHAR Log = 0;
4680 
4681 #if FASTFATDBG
4682  ULONG OrigValue = Value;
4683 #endif
4684 
4685  PAGED_CODE();
4686 
4687  //
4688  // Knock bits off until we we get a one at position 0
4689  //
4690 
4691  while ( (Value & 0xfffffffe) != 0 ) {
4692 
4693  Log++;
4694  Value >>= 1;
4695  }
4696 
4697  //
4698  // If there was more than one bit set, the file system messed up,
4699  // Bug Check.
4700  //
4701 
4702  if (Value != 0x1) {
4703 
4704  DebugTrace(+1, Dbg, "LogOf\n", 0);
4705  DebugTrace( 0, Dbg, " Value = %8lx\n", OrigValue);
4706 
4707  DebugTrace( 0, Dbg, "Received non power of 2.\n", 0);
4708 
4709  DebugTrace(-1, Dbg, "LogOf -> %8lx\n", Log);
4710 
4711 #ifdef _MSC_VER
4712 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
4713 #endif
4714  FatBugCheck( Value, Log, 0 );
4715  }
4716 
4717  return Log;
4718 }
_In_opt_ ULONG _Out_ PULONG Value
Definition: rtlfuncs.h:2374
_In_ CLIPOBJ _In_ BRUSHOBJ _In_ LONG x1
Definition: winddi.h:3706
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
#define PAGED_CODE()
Definition: video.h:57
#define FatBugCheck(A, B, C)
Definition: nodetype.h:104
#define Dbg
Definition: allocsup.c:28
unsigned char UCHAR
Definition: xmlstorage.h:181
unsigned int ULONG
Definition: retypes.h:1

Referenced by FatSetupAllocationSupport().

◆ FatLookupFatEntry()

VOID FatLookupFatEntry ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb,
IN ULONG  FatIndex,
IN OUT PULONG  FatEntry,
IN OUT PFAT_ENUMERATION_CONTEXT  Context 
)

Definition at line 3567 of file allocsup.c.

3600 {
3601  PAGED_CODE();
3602 
3603  DebugTrace(+1, Dbg, "FatLookupFatEntry\n", 0);
3604  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3605  DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
3606  DebugTrace( 0, Dbg, " FatEntry = %8lx\n", FatEntry);
3607 
3608  //
3609  // Make sure they gave us a valid fat index.
3610  //
3611 
3612  FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
3613 
3614  //
3615  // Case on 12 or 16 bit fats.
3616  //
3617  // In the 12 bit case (mostly floppies) we always have the whole fat
3618  // (max 6k bytes) pinned during allocation operations. This is possibly
3619  // a wee bit slower, but saves headaches over fat entries with 8 bits
3620  // on one page, and 4 bits on the next.
3621  //
3622  // The 16 bit case always keeps the last used page pinned until all
3623  // operations are done and it is unpinned.
3624  //
3625 
3626  //
3627  // DEAL WITH 12 BIT CASE
3628  //
3629 
3630  if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
3631 
3632  //
3633  // Check to see if the fat is already pinned, otherwise pin it.
3634  //
3635 
3636  if (Context->Bcb == NULL) {
3637 
3638  FatReadVolumeFile( IrpContext,
3639  Vcb,
3640  FatReservedBytes( &Vcb->Bpb ),
3641  FatBytesPerFat( &Vcb->Bpb ),
3642  &Context->Bcb,
3643  &Context->PinnedPage );
3644  }
3645 
3646  //
3647  // Load the return value.
3648  //
3649 
3650 
3651  FatLookup12BitEntry( Context->PinnedPage, FatIndex, FatEntry );
3652 
3653  } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
3654 
3655  //
3656  // DEAL WITH 32 BIT CASE
3657  //
3658 
3659  ULONG PageEntryOffset;
3660  ULONG OffsetIntoVolumeFile;
3661 
3662  //
3663  // Initialize two local variables that help us.
3664  //
3665  OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(FAT_ENTRY);
3666  PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(FAT_ENTRY);
3667 
3668  //
3669  // Check to see if we need to read in a new page of fat
3670  //
3671 
3672  if ((Context->Bcb == NULL) ||
3673  (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3674 
3675  //
3676  // The entry wasn't in the pinned page, so must we unpin the current
3677  // page (if any) and read in a new page.
3678  //
3679 
3680  FatUnpinBcb( IrpContext, Context->Bcb );
3681 
3682  FatReadVolumeFile( IrpContext,
3683  Vcb,
3684  OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3685  PAGE_SIZE,
3686  &Context->Bcb,
3687  &Context->PinnedPage );
3688 
3689  Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3690  }
3691 
3692  //
3693  // Grab the fat entry from the pinned page, and return
3694  //
3695 
3696  *FatEntry = ((PULONG)(Context->PinnedPage))[PageEntryOffset] & FAT32_ENTRY_MASK;
3697 
3698  } else {
3699 
3700  //
3701  // DEAL WITH 16 BIT CASE
3702  //
3703 
3704  ULONG PageEntryOffset;
3705  ULONG OffsetIntoVolumeFile;
3706 
3707  //
3708  // Initialize two local variables that help us.
3709  //
3710 
3711  OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(USHORT);
3712  PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(USHORT);
3713 
3714  //
3715  // Check to see if we need to read in a new page of fat
3716  //
3717 
3718  if ((Context->Bcb == NULL) ||
3719  (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3720 
3721  //
3722  // The entry wasn't in the pinned page, so must we unpin the current
3723  // page (if any) and read in a new page.
3724  //
3725 
3726  FatUnpinBcb( IrpContext, Context->Bcb );
3727 
3728  FatReadVolumeFile( IrpContext,
3729  Vcb,
3730  OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3731  PAGE_SIZE,
3732  &Context->Bcb,
3733  &Context->PinnedPage );
3734 
3735  Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3736  }
3737 
3738  //
3739  // Grab the fat entry from the pinned page, and return
3740  //
3741 
3742  *FatEntry = ((PUSHORT)(Context->PinnedPage))[PageEntryOffset];
3743  }
3744 
3745  DebugTrace(-1, Dbg, "FatLookupFatEntry -> (VOID)\n", 0);
3746  return;
3747 }
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
VOID FatReadVolumeFile(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN VBO StartingVbo, IN ULONG ByteCount, OUT PBCB *Bcb, OUT PVOID *Buffer)
Definition: cachesup.c:102
#define PAGED_CODE()
Definition: video.h:57
#define FatVerifyIndexIsValid(IC, V, I)
Definition: fat.h:532
#define FatReservedBytes(B)
Definition: fat.h:414
#define FatBytesPerFat(B)
Definition: fat.h:410
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:537
smooth NULL
Definition: ftsmooth.c:416
#define FatLookup12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:584
#define Dbg
Definition: allocsup.c:28
#define Vcb
Definition: cdprocs.h:1425
Definition: fsck.fat.h:192
IN PVCB IN ULONG FatIndex
Definition: fatprocs.h:373
#define PAGE_SIZE
Definition: env_spec_w32.h:49
unsigned short USHORT
Definition: pedump.c:61
unsigned int * PULONG
Definition: retypes.h:1
ULONG32 FAT_ENTRY
Definition: fat.h:225
unsigned int ULONG
Definition: retypes.h:1
#define FAT32_ENTRY_MASK
Definition: fat.h:227
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:373
unsigned short * PUSHORT
Definition: retypes.h:2

Referenced by _Requires_lock_held_().

◆ FatSelectBestWindow()

INLINE ULONG FatSelectBestWindow ( IN PVCB  Vcb)

Definition at line 277 of file allocsup.c.

299 {
300  ULONG i, Fave = 0;
301  ULONG MaxFree = 0;
302  ULONG FirstEmpty = (ULONG)-1;
303  ULONG ClustersPerWindow = MAX_CLUSTER_BITMAP_SIZE;
304 
305  NT_ASSERT( 1 != Vcb->NumberOfWindows);
306 
307  for (i = 0; i < Vcb->NumberOfWindows; i++) {
308 
309  if (Vcb->Windows[i].ClustersFree == ClustersPerWindow) {
310 
311  if (-1 == FirstEmpty) {
312 
313  //
314  // Keep note of the first empty window on the disc
315  //
316 
317  FirstEmpty = i;
318  }
319  }
320  else if (Vcb->Windows[i].ClustersFree > MaxFree) {
321 
322  //
323  // This window has the most free clusters, so far
324  //
325 
326  MaxFree = Vcb->Windows[i].ClustersFree;
327  Fave = i;
328 
329  //
330  // If this window has >50% free clusters, then we will take it,
331  // so don't bother considering more windows.
332  //
333 
334  if (MaxFree >= (ClustersPerWindow >> 1)) {
335 
336  break;
337  }
338  }
339  }
340 
341  //
342  // If there were no windows with 50% or more freespace, then select the
343  // first empty window on the disc, if any - otherwise we'll just go with
344  // the one with the most free clusters.
345  //
346 
347  if ((MaxFree < (ClustersPerWindow >> 1)) && (-1 != FirstEmpty)) {
348 
349  Fave = FirstEmpty;
350  }
351 
352  return Fave;
353 }
#define MAX_CLUSTER_BITMAP_SIZE
Definition: allocsup.c:247
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
#define Vcb
Definition: cdprocs.h:1425
unsigned int ULONG
Definition: retypes.h:1
#define NT_ASSERT
Definition: rtlfuncs.h:3312

Referenced by FatSetupAllocationSupport().

◆ FatSetFatRun()

VOID FatSetFatRun ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb,
IN ULONG  StartingFatIndex,
IN ULONG  ClusterCount,
IN BOOLEAN  ChainTogether 
)

Definition at line 4141 of file allocsup.c.

4177 {
4178 #define MAXCOUNTCLUS 0x10000
4179 #define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
4180  PBCB SavedBcbs[COUNTSAVEDBCBS][2];
4181 
4182  ULONG SectorSize;
4183  ULONG Cluster;
4184 
4185  LBO StartSectorLbo;
4186  LBO FinalSectorLbo;
4187  LBO Lbo;
4188 
4189  PVOID PinnedFat;
4190 
4192 
4193  ULONG SavedStartingFatIndex = StartingFatIndex;
4194 
4195  PAGED_CODE();
4196 
4197  DebugTrace(+1, Dbg, "FatSetFatRun\n", 0);
4198  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
4199  DebugTrace( 0, Dbg, " StartingFatIndex = %8x\n", StartingFatIndex);
4200  DebugTrace( 0, Dbg, " ClusterCount = %8lx\n", ClusterCount);
4201  DebugTrace( 0, Dbg, " ChainTogether = %s\n", ChainTogether ? "TRUE":"FALSE");
4202 
4203  //
4204  // Make sure they gave us a valid fat run.
4205  //
4206 
4207  FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex);
4208  FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex + ClusterCount - 1);
4209 
4210  //
4211  // Check special case
4212  //
4213 
4214  if (ClusterCount == 0) {
4215 
4216  DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4217  return;
4218  }
4219 
4220  //
4221  // Set Sector Size
4222  //
4223 
4224  SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
4225 
4226  //
4227  // Case on 12 or 16 bit fats.
4228  //
4229  // In the 12 bit case (mostly floppies) we always have the whole fat
4230  // (max 6k bytes) pinned during allocation operations. This is possibly
4231  // a wee bit slower, but saves headaches over fat entries with 8 bits
4232  // on one page, and 4 bits on the next.
4233  //
4234  // In the 16 bit case we only read one page at a time, as needed.
4235  //
4236 
4237  //
4238  // DEAL WITH 12 BIT CASE
4239  //
4240 
4241  _SEH2_TRY {
4242 
4243  if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
4244 
4245  //
4246  // We read in the entire fat. Note that using prepare write marks
4247  // the bcb pre-dirty, so we don't have to do it explicitly.
4248  //
4249 
4250  RtlZeroMemory( &SavedBcbs[0][0], 2 * sizeof(PBCB) * 2);
4251 
4252  FatPrepareWriteVolumeFile( IrpContext,
4253  Vcb,
4254  FatReservedBytes( &Vcb->Bpb ),
4255  FatBytesPerFat( &Vcb->Bpb ),
4256  &SavedBcbs[0][0],
4257  &PinnedFat,
4258  TRUE,
4259  FALSE );
4260 
4261  //
4262  // Mark the affected sectors dirty. Note that FinalSectorLbo is
4263  // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
4264  // we catch the case of a dirty fat entry straddling a sector boundry.
4265  //
4266  // Note that if the first AddMcbEntry succeeds, all following ones
4267  // will simply coalese, and thus also succeed.
4268  //
4269 
4270  StartSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + StartingFatIndex * 3 / 2)
4271  & ~(SectorSize - 1);
4272 
4273  FinalSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + ((StartingFatIndex +
4274  ClusterCount) * 3 + 2) / 2) & ~(SectorSize - 1);
4275 
4276  for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4277 
4278  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4279  }
4280 
4281  //
4282  // Store the entries into the fat; we need a little
4283  // synchonization here and can't use a spinlock since the bytes
4284  // might not be resident.
4285  //
4286 
4288  ReleaseMutex = TRUE;
4289 
4290  for (Cluster = StartingFatIndex;
4291  Cluster < StartingFatIndex + ClusterCount - 1;
4292  Cluster++) {
4293 
4294  FatSet12BitEntry( PinnedFat,
4295  Cluster,
4296  ChainTogether ? Cluster + 1 : FAT_CLUSTER_AVAILABLE );
4297  }
4298 
4299  //
4300  // Save the last entry
4301  //
4302 
4303  FatSet12BitEntry( PinnedFat,
4304  Cluster,
4305  ChainTogether ?
4307 
4309  ReleaseMutex = FALSE;
4310 
4311  } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
4312 
4313  //
4314  // DEAL WITH 32 BIT CASE
4315  //
4316 
4317  for (;;) {
4318 
4319  VBO StartOffsetInVolume;
4320  VBO FinalOffsetInVolume;
4321 
4322  ULONG Page;
4323  ULONG FinalCluster;
4324  PULONG FatEntry = NULL;
4325  ULONG ClusterCountThisRun;
4326 
4327  StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4328  StartingFatIndex * sizeof(FAT_ENTRY);
4329 
4330  if (ClusterCount > MAXCOUNTCLUS) {
4331  ClusterCountThisRun = MAXCOUNTCLUS;
4332  } else {
4333  ClusterCountThisRun = ClusterCount;
4334  }
4335 
4336  FinalOffsetInVolume = StartOffsetInVolume +
4337  (ClusterCountThisRun - 1) * sizeof(FAT_ENTRY);
4338 
4339  {
4340  ULONG NumberOfPages;
4341  ULONG Offset;
4342 
4343  NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4344  (StartOffsetInVolume / PAGE_SIZE) + 1;
4345 
4346  RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4347 
4348  for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4349  Page < NumberOfPages;
4350  Page++, Offset += PAGE_SIZE ) {
4351 
4352  FatPrepareWriteVolumeFile( IrpContext,
4353  Vcb,
4354  Offset,
4355  PAGE_SIZE,
4356  &SavedBcbs[Page][0],
4357  (PVOID *)&SavedBcbs[Page][1],
4358  TRUE,
4359  FALSE );
4360 
4361  if (Page == 0) {
4362 
4363  FatEntry = (PULONG)((PUCHAR)SavedBcbs[0][1] +
4364  (StartOffsetInVolume % PAGE_SIZE));
4365  }
4366  }
4367  }
4368 
4369  //
4370  // Mark the run dirty
4371  //
4372 
4373  StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4374  FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4375 
4376  for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4377 
4378  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO)Lbo, Lbo, SectorSize );
4379  }
4380 
4381  //
4382  // Store the entries
4383  //
4384  // We need extra synchronization here for broken architectures
4385  // like the ALPHA that don't support atomic 16 bit writes.
4386  //
4387 
4388 #ifdef ALPHA
4390  ReleaseMutex = TRUE;
4391 #endif // ALPHA
4392 
4393  FinalCluster = StartingFatIndex + ClusterCountThisRun - 1;
4394  Page = 0;
4395 
4396  for (Cluster = StartingFatIndex;
4397  Cluster <= FinalCluster;
4398  Cluster++, FatEntry++) {
4399 
4400  //
4401  // If we just crossed a page boundry (as opposed to starting
4402  // on one), update our idea of FatEntry.
4403 
4404  if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4405  (Cluster != StartingFatIndex) ) {
4406 
4407  Page += 1;
4408  FatEntry = (PULONG)SavedBcbs[Page][1];
4409  }
4410 
4411  *FatEntry = ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4413  }
4414 
4415  //
4416  // Fix up the last entry if we were chaining together
4417  //
4418 
4419  if ((ClusterCount <= MAXCOUNTCLUS) &&
4420  ChainTogether ) {
4421 
4422  *(FatEntry-1) = FAT_CLUSTER_LAST;
4423  }
4424 
4425 #ifdef ALPHA
4427  ReleaseMutex = FALSE;
4428 #endif // ALPHA
4429 
4430  {
4431  ULONG i;
4432 
4433  //
4434  // Unpin the Bcbs
4435  //
4436 
4437  for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4438 
4439  FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4440  SavedBcbs[i][0] = NULL;
4441  }
4442  }
4443 
4444  if (ClusterCount <= MAXCOUNTCLUS) {
4445 
4446  break;
4447 
4448  } else {
4449 
4450  StartingFatIndex += MAXCOUNTCLUS;
4451  ClusterCount -= MAXCOUNTCLUS;
4452  }
4453  }
4454 
4455  } else {
4456 
4457  //
4458  // DEAL WITH 16 BIT CASE
4459  //
4460 
4461  VBO StartOffsetInVolume;
4462  VBO FinalOffsetInVolume;
4463 
4464  ULONG Page;
4465  ULONG FinalCluster;
4466  PUSHORT FatEntry = NULL;
4467 
4468  StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4469  StartingFatIndex * sizeof(USHORT);
4470 
4471  FinalOffsetInVolume = StartOffsetInVolume +
4472  (ClusterCount - 1) * sizeof(USHORT);
4473 
4474  //
4475  // Read in one page of fat at a time. We cannot read in the
4476  // all of the fat we need because of cache manager limitations.
4477  //
4478  // SavedBcb was initialized to be able to hold the largest
4479  // possible number of pages in a fat plus and extra one to
4480  // accomadate the boot sector, plus one more to make sure there
4481  // is enough room for the RtlZeroMemory below that needs the mark
4482  // the first Bcb after all the ones we will use as an end marker.
4483  //
4484 
4485  {
4486  ULONG NumberOfPages;
4487  ULONG Offset;
4488 
4489  NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4490  (StartOffsetInVolume / PAGE_SIZE) + 1;
4491 
4492  RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4493 
4494  for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4495  Page < NumberOfPages;
4496  Page++, Offset += PAGE_SIZE ) {
4497 
4498  FatPrepareWriteVolumeFile( IrpContext,
4499  Vcb,
4500  Offset,
4501  PAGE_SIZE,
4502  &SavedBcbs[Page][0],
4503  (PVOID *)&SavedBcbs[Page][1],
4504  TRUE,
4505  FALSE );
4506 
4507  if (Page == 0) {
4508 
4509  FatEntry = (PUSHORT)((PUCHAR)SavedBcbs[0][1] +
4510  (StartOffsetInVolume % PAGE_SIZE));
4511  }
4512  }
4513  }
4514 
4515  //
4516  // Mark the run dirty
4517  //
4518 
4519  StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4520  FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4521 
4522  for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4523 
4524  FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4525  }
4526 
4527  //
4528  // Store the entries
4529  //
4530  // We need extra synchronization here for broken architectures
4531  // like the ALPHA that don't support atomic 16 bit writes.
4532  //
4533 
4534 #ifdef ALPHA
4536  ReleaseMutex = TRUE;
4537 #endif // ALPHA
4538 
4539  FinalCluster = StartingFatIndex + ClusterCount - 1;
4540  Page = 0;
4541 
4542  for (Cluster = StartingFatIndex;
4543  Cluster <= FinalCluster;
4544  Cluster++, FatEntry++) {
4545 
4546  //
4547  // If we just crossed a page boundry (as opposed to starting
4548  // on one), update our idea of FatEntry.
4549 
4550  if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4551  (Cluster != StartingFatIndex) ) {
4552 
4553  Page += 1;
4554  FatEntry = (PUSHORT)SavedBcbs[Page][1];
4555  }
4556 
4557  *FatEntry = (USHORT) (ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4559  }
4560 
4561  //
4562  // Fix up the last entry if we were chaining together
4563  //
4564 
4565  if ( ChainTogether ) {
4566 
4567 #ifdef _MSC_VER
4568 #pragma warning( suppress: 4310 )
4569 #endif
4571 
4572  }
4573 #ifdef ALPHA
4575  ReleaseMutex = FALSE;
4576 #endif // ALPHA
4577  }
4578 
4579  } _SEH2_FINALLY {
4580 
4581  ULONG i;
4582 
4584 
4585  //
4586  // If we still somehow have the Mutex, release it.
4587  //
4588 
4589  if (ReleaseMutex) {
4590 
4592 
4594  }
4595 
4596  //
4597  // Unpin the Bcbs
4598  //
4599 
4600  for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4601 
4602  FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4603  }
4604 
4605  //
4606  // At this point nothing in this finally clause should have raised.
4607  // So, now comes the unsafe (sigh) stuff.
4608  //
4609 
4610  if ( _SEH2_AbnormalTermination() &&
4611  (Vcb->AllocationSupport.FatIndexBitSize == 32) ) {
4612 
4613  //
4614  // Fat32 unwind
4615  //
4616  // This case is more complex because the FAT12 and FAT16 cases
4617  // pin all the needed FAT pages (128K max), after which it
4618  // can't fail, before changing any FAT entries. In the Fat32
4619  // case, it may not be practical to pin all the needed FAT
4620  // pages, because that could span many megabytes. So Fat32
4621  // attacks in chunks, and if a failure occurs once the first
4622  // chunk has been updated, we have to back out the updates.
4623  //
4624  // The unwind consists of walking back over each FAT entry we
4625  // have changed, setting it back to the previous value. Note
4626  // that the previous value with either be FAT_CLUSTER_AVAILABLE
4627  // (if ChainTogether==TRUE) or a simple link to the successor
4628  // (if ChainTogether==FALSE).
4629  //
4630  // We concede that any one of these calls could fail too; our
4631  // objective is to make this case no more likely than the case
4632  // for a file consisting of multiple disjoint runs.
4633  //
4634 
4635  while ( StartingFatIndex > SavedStartingFatIndex ) {
4636 
4637  StartingFatIndex--;
4638 
4639  FatSetFatEntry( IrpContext, Vcb, StartingFatIndex,
4640  ChainTogether ?
4641  StartingFatIndex + 1 : FAT_CLUSTER_AVAILABLE );
4642  }
4643  }
4644 
4645  DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4646  } _SEH2_END;
4647 
4648  return;
4649 }
#define FAT_CLUSTER_LAST
Definition: fat.h:258
#define FatLockFreeClusterBitMap(VCB)
Definition: allocsup.c:99
#define TRUE
Definition: types.h:120
ULONG32 VBO
Definition: fat.h:38
unsigned char * PUCHAR
Definition: retypes.h:3
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
IN PFCB IN VBO OUT PLBO Lbo
Definition: fatprocs.h:297
#define MAXCOUNTCLUS
#define PAGED_CODE()
Definition: video.h:57
#define FatVerifyIndexIsValid(IC, V, I)
Definition: fat.h:532
_SEH2_TRY
Definition: create.c:4250
uint32_t ULONG_PTR
Definition: typedefs.h:64
#define FatReservedBytes(B)
Definition: fat.h:414
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
_In_ ULONG _In_ ULONG Offset
Definition: ntddpcm.h:101
LONGLONG LBO
Definition: fat.h:34
#define FatBytesPerFat(B)
Definition: fat.h:410
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:537
unsigned char BOOLEAN
smooth NULL
Definition: ftsmooth.c:416
#define _SEH2_AbnormalTermination()
Definition: pseh2_64.h:13
VOID FatSetFatRun(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartingFatIndex, IN ULONG ClusterCount, IN BOOLEAN ChainTogether)
Definition: allocsup.c:4141
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255
#define DebugUnwind(X)
Definition: fatdata.h:315
#define Dbg
Definition: allocsup.c:28
#define COUNTSAVEDBCBS
#define Vcb
Definition: cdprocs.h:1425
BOOL WINAPI DECLSPEC_HOTPATCH ReleaseMutex(IN HANDLE hMutex)
Definition: synch.c:618
Definition: fsck.fat.h:192
#define FatUnlockFreeClusterBitMap(VCB)
Definition: allocsup.c:112
#define PAGE_SIZE
Definition: env_spec_w32.h:49
BOOLEAN FatAddMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN LBO Lbo, IN ULONG SectorCount)
Definition: fsctrl.c:364
_SEH2_END
Definition: create.c:4424
unsigned short USHORT
Definition: pedump.c:61
_SEH2_FINALLY
Definition: create.c:4395
unsigned int * PULONG
Definition: retypes.h:1
#define FatSet12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:603
ULONG32 FAT_ENTRY
Definition: fat.h:225
unsigned int ULONG
Definition: retypes.h:1
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:261
_In_ ULONG SectorSize
Definition: halfuncs.h:291
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:373
unsigned short * PUSHORT
Definition: retypes.h:2
#define NT_ASSERT
Definition: rtlfuncs.h:3312

◆ FatSetupAllocationSupport()

VOID FatSetupAllocationSupport ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb 
)

Definition at line 357 of file allocsup.c.

378 {
379  ULONG BitIndex;
380  ULONG ClustersDescribableByFat;
381 
382  PAGED_CODE();
383 
384  DebugTrace(+1, Dbg, "FatSetupAllocationSupport\n", 0);
385  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
386 
387  //
388  // Compute a number of fields for Vcb.AllocationSupport
389  //
390 
391  Vcb->AllocationSupport.RootDirectoryLbo = FatRootDirectoryLbo( &Vcb->Bpb );
392  Vcb->AllocationSupport.RootDirectorySize = FatRootDirectorySize( &Vcb->Bpb );
393 
394  Vcb->AllocationSupport.FileAreaLbo = FatFileAreaLbo( &Vcb->Bpb );
395 
396  Vcb->AllocationSupport.NumberOfClusters = FatNumberOfClusters( &Vcb->Bpb );
397 
398  Vcb->AllocationSupport.FatIndexBitSize = FatIndexBitSize( &Vcb->Bpb );
399 
400  Vcb->AllocationSupport.LogOfBytesPerSector = FatLogOf(Vcb->Bpb.BytesPerSector);
401  Vcb->AllocationSupport.LogOfBytesPerCluster = FatLogOf(FatBytesPerCluster( &Vcb->Bpb ));
402  Vcb->AllocationSupport.NumberOfFreeClusters = 0;
403 
404 
405  //
406  // Deal with a bug in DOS 5 format, if the Fat is not big enough to
407  // describe all the clusters on the disk, reduce this number. We expect
408  // that fat32 volumes will not have this problem.
409  //
410  // Turns out this was not a good assumption. We have to do this always now.
411  //
412 
413  ClustersDescribableByFat = ( ((FatIsFat32(Vcb)? Vcb->Bpb.LargeSectorsPerFat :
414  Vcb->Bpb.SectorsPerFat) *
415  Vcb->Bpb.BytesPerSector * 8)
416  / FatIndexBitSize(&Vcb->Bpb) ) - 2;
417 
418  if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByFat) {
419 
420  Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByFat;
421  }
422 
423  //
424  // Extend the virtual volume file to include the Fat
425  //
426 
427  {
429 
432  FatBytesPerFat( &Vcb->Bpb ));
434 
435  if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) {
436 
437  FatInitializeCacheMap( Vcb->VirtualVolumeFile,
438  &FileSizes,
439  TRUE,
441  Vcb );
442 
443  } else {
444 
445  CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes );
446  }
447  }
448 
449  _SEH2_TRY {
450 
451  if (FatIsFat32(Vcb) &&
452  Vcb->AllocationSupport.NumberOfClusters > MAX_CLUSTER_BITMAP_SIZE) {
453 
454  Vcb->NumberOfWindows = (Vcb->AllocationSupport.NumberOfClusters +
457 
458  } else {
459 
460  Vcb->NumberOfWindows = 1;
461  }
462 
464  Vcb->NumberOfWindows * sizeof(FAT_WINDOW),
465  TAG_FAT_WINDOW );
466 
467  RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
468  NULL,
469  0 );
470 
471  //
472  // Chose a FAT window to begin operation in.
473  //
474 
475  if (Vcb->NumberOfWindows > 1) {
476 
477  //
478  // Read the fat and count up free clusters. We bias by the two reserved
479  // entries in the FAT.
480  //
481 
482  FatExamineFatEntries( IrpContext, Vcb,
483  2,
484  Vcb->AllocationSupport.NumberOfClusters + 2 - 1,
485  TRUE,
486  NULL,
487  NULL);
488 
489 
490  //
491  // Pick a window to begin allocating from
492  //
493 
494  Vcb->CurrentWindow = &Vcb->Windows[ FatSelectBestWindow( Vcb)];
495 
496  } else {
497 
498  Vcb->CurrentWindow = &Vcb->Windows[0];
499 
500  //
501  // Carefully bias ourselves by the two reserved entries in the FAT.
502  //
503 
504  Vcb->CurrentWindow->FirstCluster = 2;
505  Vcb->CurrentWindow->LastCluster = Vcb->AllocationSupport.NumberOfClusters + 2 - 1;
506  }
507 
508  //
509  // Now transition to the FAT window we have chosen.
510  //
511 
512  FatExamineFatEntries( IrpContext, Vcb,
513  0,
514  0,
515  FALSE,
516  Vcb->CurrentWindow,
517  NULL);
518 
519  //
520  // Now set the ClusterHint to the first free bit in our favorite
521  // window (except the ClusterHint is off by two).
522  //
523 
524  Vcb->ClusterHint =
525  (BitIndex = RtlFindClearBits( &Vcb->FreeClusterBitMap, 1, 0 )) != -1 ?
526  BitIndex + 2 : 2;
527 
528  } _SEH2_FINALLY {
529 
531 
532  //
533  // If we hit an exception, back out.
534  //
535 
537 
538  FatTearDownAllocationSupport( IrpContext, Vcb );
539  }
540  } _SEH2_END;
541 
542  return;
543 }
#define FatFileAreaLbo(B)
Definition: fat.h:458
#define TRUE
Definition: types.h:120
#define MAX_CLUSTER_BITMAP_SIZE
Definition: allocsup.c:247
PVOID NTAPI FsRtlAllocatePoolWithTag(IN POOL_TYPE PoolType, IN ULONG NumberOfBytes, IN ULONG Tag)
Definition: filter.c:229
#define FatIndexBitSize(B)
Definition: fat.h:515
NTSYSAPI void WINAPI RtlInitializeBitMap(PRTL_BITMAP, PULONG, ULONG)
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
#define TAG_FAT_WINDOW
Definition: nodetype.h:166
VOID FatExamineFatEntries(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartIndex OPTIONAL, IN ULONG EndIndex OPTIONAL, IN BOOLEAN SetupWindows, IN PFAT_WINDOW SwitchToWindow OPTIONAL, IN PULONG BitMapBuffer OPTIONAL)
Definition: allocsup.c:4722
static CC_FILE_SIZES FileSizes
#define PAGED_CODE()
Definition: video.h:57
VOID FatSetupAllocationSupport(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
Definition: allocsup.c:357
LARGE_INTEGER FileSize
Definition: cctypes.h:16
_SEH2_TRY
Definition: create.c:4250
#define FatReservedBytes(B)
Definition: fat.h:414
INLINE ULONG FatSelectBestWindow(IN PVCB Vcb)
Definition: allocsup.c:277
#define FatBytesPerFat(B)
Definition: fat.h:410
UCHAR FatLogOf(IN ULONG Value)
Definition: allocsup.c:4657
smooth NULL
Definition: ftsmooth.c:416
#define _SEH2_AbnormalTermination()
Definition: pseh2_64.h:13
#define FatRootDirectorySize(B)
Definition: fat.h:427
NTSYSAPI ULONG WINAPI RtlFindClearBits(PCRTL_BITMAP, ULONG, ULONG)
VOID FatTearDownAllocationSupport(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
Definition: allocsup.c:547
#define DebugUnwind(X)
Definition: fatdata.h:315
FAT_DATA FatData
Definition: fatdata.c:56
#define Dbg
Definition: allocsup.c:28
#define Vcb
Definition: cdprocs.h:1425
LARGE_INTEGER ValidDataLength
Definition: cctypes.h:17
#define FatIsFat32(VCB)
Definition: fatprocs.h:1437
#define FatNumberOfClusters(B)
Definition: fat.h:482
VOID FatInitializeCacheMap(_In_ PFILE_OBJECT FileObject, _In_ PCC_FILE_SIZES FileSizes, _In_ BOOLEAN PinAccess, _In_ PCACHE_MANAGER_CALLBACKS Callbacks, _In_ PVOID LazyWriteContext)
Definition: cachesup.c:62
_SEH2_END
Definition: create.c:4424
LARGE_INTEGER AllocationSize
Definition: cctypes.h:15
LARGE_INTEGER FatMaxLarge
Definition: fatdata.c:63
VOID NTAPI CcSetFileSizes(IN PFILE_OBJECT FileObject, IN PCC_FILE_SIZES FileSizes)
Definition: fssup.c:354
_SEH2_FINALLY
Definition: create.c:4395
unsigned int ULONG
Definition: retypes.h:1
#define FatRootDirectoryLbo(B)
Definition: fat.h:445
LONGLONG QuadPart
Definition: typedefs.h:113
#define FatBytesPerCluster(B)
Definition: fat.h:408
CACHE_MANAGER_CALLBACKS CacheManagerNoOpCallbacks
Definition: fatstruc.h:159

Referenced by FatSetupAllocationSupport().

◆ FatTearDownAllocationSupport()

VOID FatTearDownAllocationSupport ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb 
)

Definition at line 547 of file allocsup.c.

570 {
571  DebugTrace(+1, Dbg, "FatTearDownAllocationSupport\n", 0);
572  DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
573 
574  PAGED_CODE();
575 
576  //
577  // If there are FAT buckets, free them.
578  //
579 
580  if ( Vcb->Windows != NULL ) {
581 
582  ExFreePool( Vcb->Windows );
583  Vcb->Windows = NULL;
584  }
585 
586  //
587  // Free the memory associated with the free cluster bitmap.
588  //
589 
590  if ( Vcb->FreeClusterBitMap.Buffer != NULL ) {
591 
592  ExFreePool( Vcb->FreeClusterBitMap.Buffer );
593 
594  //
595  // NULL this field as an flag.
596  //
597 
598  Vcb->FreeClusterBitMap.Buffer = NULL;
599  }
600 
601  //
602  // And remove all the runs in the dirty fat Mcb
603  //
604 
605  FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 0, 0xFFFFFFFF );
606 
607  DebugTrace(-1, Dbg, "FatTearDownAllocationSupport -> (VOID)\n", 0);
608 
609  UNREFERENCED_PARAMETER( IrpContext );
610 
611  return;
612 }
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:323
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
#define PAGED_CODE()
Definition: video.h:57
VOID FatRemoveMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN ULONG SectorCount)
Definition: fsctrl.c:599
smooth NULL
Definition: ftsmooth.c:416
#define Dbg
Definition: allocsup.c:28
#define Vcb
Definition: cdprocs.h:1425
#define ExFreePool(addr)
Definition: env_spec_w32.h:352

Referenced by FatDeleteVcb(), and FatSetupAllocationSupport().