ReactOS 0.4.15-dev-7842-g558ab78
allocsup.c File Reference
#include "fatprocs.h"
Include dependency graph for allocsup.c:

Go to the source code of this file.

Macros

#define BugCheckFileId   (FAT_BUG_CHECK_ALLOCSUP)
 
#define Dbg   (DEBUG_TRACE_ALLOCSUP)
 
#define FatMin(a, b)   ((a) < (b) ? (a) : (b))
 
#define FAT_PREFETCH_PAGE_COUNT   0x100
 
#define ASSERT_CURRENT_WINDOW_GOOD(VCB)
 
#define FatLockFreeClusterBitMap(VCB)
 
#define FatUnlockFreeClusterBitMap(VCB)
 
#define FatIsClusterFree(IRPCONTEXT, VCB, FAT_INDEX)    (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)
 
#define FatFreeClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatAllocateClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatUnreserveClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatReserveClusters(IRPCONTEXT, VCB, FAT_INDEX, CLUSTER_COUNT)
 
#define FatFindFreeClusterRun(IRPCONTEXT, VCB, CLUSTER_COUNT, CLUSTER_HINT)
 
#define MAX_CLUSTER_BITMAP_SIZE   (1 << 16)
 
#define FatWindowOfCluster(C)   (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)
 
#define MAX_ZERO_MDL_SIZE   (1*1024*1024)
 
#define MAXCOUNTCLUS   0x10000
 
#define COUNTSAVEDBCBS   ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
 

Functions

VOID FatLookupFatEntry (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG FatIndex, IN OUT PULONG FatEntry, IN OUT PFAT_ENUMERATION_CONTEXT Context)
 
VOID FatSetFatRun (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartingFatIndex, IN ULONG ClusterCount, IN BOOLEAN ChainTogether)
 
UCHAR FatLogOf (IN ULONG Value)
 
INLINE ULONG FatSelectBestWindow (IN PVCB Vcb)
 
VOID FatSetupAllocationSupport (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
 
VOID FatTearDownAllocationSupport (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
 
 _Requires_lock_held_ (_Global_critical_region_)
 
CLUSTER_TYPE FatInterpretClusterType (IN PVCB Vcb, IN FAT_ENTRY Entry)
 
VOID FatExamineFatEntries (IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartIndex OPTIONAL, IN ULONG EndIndex OPTIONAL, IN BOOLEAN SetupWindows, IN PFAT_WINDOW SwitchToWindow OPTIONAL, IN PULONG BitMapBuffer OPTIONAL)
 

Macro Definition Documentation

◆ ASSERT_CURRENT_WINDOW_GOOD

#define ASSERT_CURRENT_WINDOW_GOOD (   VCB)

Definition at line 83 of file allocsup.c.

◆ BugCheckFileId

#define BugCheckFileId   (FAT_BUG_CHECK_ALLOCSUP)

Definition at line 22 of file allocsup.c.

◆ COUNTSAVEDBCBS

#define COUNTSAVEDBCBS   ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)

◆ Dbg

#define Dbg   (DEBUG_TRACE_ALLOCSUP)

Definition at line 28 of file allocsup.c.

◆ FAT_PREFETCH_PAGE_COUNT

#define FAT_PREFETCH_PAGE_COUNT   0x100

Definition at line 36 of file allocsup.c.

◆ FatAllocateClusters

#define FatAllocateClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
if ((CLUSTER_COUNT) == 1) { \
FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \
} else { \
FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \
} \
}
#define TRUE
Definition: types.h:120
#define FAT_CLUSTER_LAST
Definition: fat.h:258
Definition: cdstruc.h:498

Definition at line 158 of file allocsup.c.

◆ FatFindFreeClusterRun

#define FatFindFreeClusterRun (   IRPCONTEXT,
  VCB,
  CLUSTER_COUNT,
  CLUSTER_HINT 
)
Value:
( \
(CLUSTER_COUNT == 1) && \
FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \
(CLUSTER_HINT) : \
RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
(CLUSTER_COUNT), \
(CLUSTER_HINT) - 2) + 2 \
)
struct _VCB VCB
#define FatIsClusterFree(IRPCONTEXT, VCB, FAT_INDEX)
Definition: allocsup.c:127

Definition at line 229 of file allocsup.c.

◆ FatFreeClusters

#define FatFreeClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
if ((CLUSTER_COUNT) == 1) { \
FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \
} else { \
FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \
} \
}
#define FALSE
Definition: types.h:117
#define FAT_CLUSTER_AVAILABLE
Definition: fat.h:255

Definition at line 140 of file allocsup.c.

◆ FatIsClusterFree

#define FatIsClusterFree (   IRPCONTEXT,
  VCB,
  FAT_INDEX 
)     (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)

Definition at line 127 of file allocsup.c.

◆ FatLockFreeClusterBitMap

#define FatLockFreeClusterBitMap (   VCB)
Value:
{ \
NT_ASSERT(KeAreApcsDisabled()); \
ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
ASSERT_CURRENT_WINDOW_GOOD(VCB) \
}
BOOLEAN NTAPI KeAreApcsDisabled(VOID)
Definition: apc.c:958

Definition at line 99 of file allocsup.c.

◆ FatMin

#define FatMin (   a,
  b 
)    ((a) < (b) ? (a) : (b))

Definition at line 30 of file allocsup.c.

◆ FatReserveClusters

#define FatReserveClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \
NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
NT_ASSERT( (FAT_INDEX) >= 2); \
RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
\
if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \
_AfterRun = 2; \
} \
if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \
(VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \
if (1 == (VCB)->ClusterHint) { \
(VCB)->ClusterHint = 2; \
} \
} \
else { \
(VCB)->ClusterHint = _AfterRun; \
} \
}
NTSYSAPI ULONG WINAPI RtlFindClearBits(PCRTL_BITMAP, ULONG, ULONG)
uint32_t ULONG
Definition: typedefs.h:59
#define RtlCheckBit(BMH, BP)
Definition: rtlfuncs.h:3152

Definition at line 197 of file allocsup.c.

◆ FatUnlockFreeClusterBitMap

#define FatUnlockFreeClusterBitMap (   VCB)
Value:
{ \
ASSERT_CURRENT_WINDOW_GOOD(VCB) \
NT_ASSERT(KeAreApcsDisabled()); \
ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
}

Definition at line 112 of file allocsup.c.

◆ FatUnreserveClusters

#define FatUnreserveClusters (   IRPCONTEXT,
  VCB,
  FAT_INDEX,
  CLUSTER_COUNT 
)
Value:
{ \
NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
NT_ASSERT( (FAT_INDEX) >= 2); \
RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
if ((FAT_INDEX) < (VCB)->ClusterHint) { \
(VCB)->ClusterHint = (FAT_INDEX); \
} \
}

Definition at line 176 of file allocsup.c.

◆ FatWindowOfCluster

#define FatWindowOfCluster (   C)    (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)

Definition at line 253 of file allocsup.c.

◆ MAX_CLUSTER_BITMAP_SIZE

#define MAX_CLUSTER_BITMAP_SIZE   (1 << 16)

Definition at line 247 of file allocsup.c.

◆ MAX_ZERO_MDL_SIZE

#define MAX_ZERO_MDL_SIZE   (1*1024*1024)

Definition at line 2602 of file allocsup.c.

◆ MAXCOUNTCLUS

#define MAXCOUNTCLUS   0x10000

Function Documentation

◆ _Requires_lock_held_()

_Requires_lock_held_ ( _Global_critical_region_  )

Definition at line 617 of file allocsup.c.

660{
661 VBO CurrentVbo;
662 LBO CurrentLbo;
663 LBO PriorLbo;
664
665 VBO FirstVboOfCurrentRun = 0;
666 LBO FirstLboOfCurrentRun;
667
668 BOOLEAN LastCluster;
669 ULONG Runs;
670
671 PVCB Vcb;
673 ULONG BytesPerCluster;
674 ULARGE_INTEGER BytesOnVolume;
675
677
678 PAGED_CODE();
679
680 Vcb = FcbOrDcb->Vcb;
681
682
683 DebugTrace(+1, Dbg, "FatLookupFileAllocation\n", 0);
684 DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
685 DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo);
686 DebugTrace( 0, Dbg, " pLbo = %8lx\n", Lbo);
687 DebugTrace( 0, Dbg, " pByteCount = %8lx\n", ByteCount);
688 DebugTrace( 0, Dbg, " pAllocated = %8lx\n", Allocated);
689
690 Context.Bcb = NULL;
691
692 *EndOnMax = FALSE;
693
694 //
695 // Check the trivial case that the mapping is already in our
696 // Mcb.
697 //
698
700
701 *Allocated = TRUE;
702
703 NT_ASSERT( *ByteCount != 0 );
704
705 //
706 // Detect the overflow case, trim and claim the condition.
707 //
708
709 if (Vbo + *ByteCount == 0) {
710
711 *EndOnMax = TRUE;
712 }
713
714 DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0);
715 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
716 return;
717 }
718
719 //
720 // Initialize the Vcb, the cluster size, LastCluster, and
721 // FirstLboOfCurrentRun (to be used as an indication of the first
722 // iteration through the following while loop).
723 //
724
725 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
726
727 BytesOnVolume.QuadPart = UInt32x32To64( Vcb->AllocationSupport.NumberOfClusters, BytesPerCluster );
728
729 LastCluster = FALSE;
730 FirstLboOfCurrentRun = 0;
731
732 //
733 // Discard the case that the request extends beyond the end of
734 // allocation. Note that if the allocation size if not known
735 // AllocationSize is set to 0xffffffff.
736 //
737
738 if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) {
739
740 *Allocated = FALSE;
741
742 DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0);
743 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
744 return;
745 }
746
747 //
748 // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
749 // and FatEntry to describe the beginning of the last entry in the Mcb.
750 // This is used as initialization for the following loop.
751 //
752 // If the Mcb was empty, we start at the beginning of the file with
753 // CurrentVbo set to 0 to indicate a new run.
754 //
755
756 if (FatLookupLastMcbEntry( Vcb, &FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo, &Runs )) {
757
758 DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1);
759
760 CurrentVbo -= (BytesPerCluster - 1);
761 CurrentLbo -= (BytesPerCluster - 1);
762
763 //
764 // Convert an index to a count.
765 //
766
767 Runs += 1;
768
769 } else {
770
771 DebugTrace( 0, Dbg, "Mcb empty.\n", 0);
772
773 //
774 // Check for an FcbOrDcb that has no allocation
775 //
776
777 if (FcbOrDcb->FirstClusterOfFile == 0) {
778
779 *Allocated = FALSE;
780
781 DebugTrace( 0, Dbg, "File has no allocation.\n", 0);
782 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
783 return;
784
785 } else {
786
787 CurrentVbo = 0;
789 FirstVboOfCurrentRun = CurrentVbo;
790 FirstLboOfCurrentRun = CurrentLbo;
791
792 Runs = 0;
793
794 DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo);
795 }
796 }
797
798 //
799 // Now we know that we are looking up a valid Vbo, but it is
800 // not in the Mcb, which is a monotonically increasing list of
801 // Vbo's. Thus we have to go to the Fat, and update
802 // the Mcb as we go. We use a try-finally to unpin the page
803 // of fat hanging around. Also we mark *Allocated = FALSE, so that
804 // the caller wont try to use the data if we hit an exception.
805 //
806
807 *Allocated = FALSE;
808
809 _SEH2_TRY {
810
811 FatEntry = (FAT_ENTRY)FatGetIndexFromLbo( Vcb, CurrentLbo );
812
813 //
814 // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
815 // The assumption here, is that only whole clusters of Vbos and Lbos
816 // are mapped in the Mcb.
817 //
818
819 NT_ASSERT( ((CurrentLbo - Vcb->AllocationSupport.FileAreaLbo)
820 % BytesPerCluster == 0) &&
821 (CurrentVbo % BytesPerCluster == 0) );
822
823 //
824 // Starting from the first Vbo after the last Mcb entry, scan through
825 // the Fat looking for our Vbo. We continue through the Fat until we
826 // hit a noncontiguity beyond the desired Vbo, or the last cluster.
827 //
828
829 while ( !LastCluster ) {
830
831 //
832 // Get the next fat entry, and update our Current variables.
833 //
834
836
837 PriorLbo = CurrentLbo;
838 CurrentLbo = FatGetLboFromIndex( Vcb, FatEntry );
839 CurrentVbo += BytesPerCluster;
840
841 switch ( FatInterpretClusterType( Vcb, FatEntry )) {
842
843 //
844 // Check for a break in the Fat allocation chain.
845 //
846
849 case FatClusterBad:
850
851 DebugTrace( 0, Dbg, "Break in allocation chain, entry = %d\n", FatEntry);
852 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0);
853
854 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
856 break;
857
858 //
859 // If this is the last cluster, we must update the Mcb and
860 // exit the loop.
861 //
862
863 case FatClusterLast:
864
865 //
866 // Assert we know where the current run started. If the
867 // Mcb was empty when we were called, thenFirstLboOfCurrentRun
868 // was set to the start of the file. If the Mcb contained an
869 // entry, then FirstLboOfCurrentRun was set on the first
870 // iteration through the loop. Thus if FirstLboOfCurrentRun
871 // is 0, then there was an Mcb entry and we are on our first
872 // iteration, meaing that the last cluster in the Mcb was
873 // really the last allocated cluster, but we checked Vbo
874 // against AllocationSize, and found it OK, thus AllocationSize
875 // must be too large.
876 //
877 // Note that, when we finally arrive here, CurrentVbo is actually
878 // the first Vbo beyond the file allocation and CurrentLbo is
879 // meaningless.
880 //
881
882 DebugTrace( 0, Dbg, "Read last cluster of file.\n", 0);
883
884 //
885 // Detect the case of the maximal file. Note that this really isn't
886 // a proper Vbo - those are zero-based, and this is a one-based number.
887 // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of
888 // 2^32 - 2.
889 //
890 // Just so we don't get confused here.
891 //
892
893 if (CurrentVbo == 0) {
894
895 *EndOnMax = TRUE;
896 CurrentVbo -= 1;
897 }
898
899 LastCluster = TRUE;
900
901 if (FirstLboOfCurrentRun != 0 ) {
902
903 DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
904 DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
905 DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
906 DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
907
909 &FcbOrDcb->Mcb,
910 FirstVboOfCurrentRun,
911 FirstLboOfCurrentRun,
912 CurrentVbo - FirstVboOfCurrentRun );
913
914 Runs += 1;
915 }
916
917 //
918 // Being at the end of allocation, make sure we have found
919 // the Vbo. If we haven't, seeing as we checked VBO
920 // against AllocationSize, the real disk allocation is less
921 // than that of AllocationSize. This comes about when the
922 // real allocation is not yet known, and AllocaitonSize
923 // contains MAXULONG.
924 //
925 // KLUDGE! - If we were called by FatLookupFileAllocationSize
926 // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup
927 // hint. Thus we merrily go along looking for a match that isn't
928 // there, but in the meantime building an Mcb. If this is
929 // the case, fill in AllocationSize and return.
930 //
931
932 if ( Vbo == MAXULONG - 1 ) {
933
934 *Allocated = FALSE;
935
936 FcbOrDcb->Header.AllocationSize.QuadPart = CurrentVbo;
937
938 DebugTrace( 0, Dbg, "New file allocation size = %08lx.\n", CurrentVbo);
940 }
941
942 //
943 // We will lie ever so slightly if we really terminated on the
944 // maximal byte of a file. It is really allocated.
945 //
946
947 if (Vbo >= CurrentVbo && !*EndOnMax) {
948
949 *Allocated = FALSE;
951 }
952
953 break;
954
955 //
956 // This is a continuation in the chain. If the run has a
957 // discontiguity at this point, update the Mcb, and if we are beyond
958 // the desired Vbo, this is the end of the run, so set LastCluster
959 // and exit the loop.
960 //
961
962 case FatClusterNext:
963
964 //
965 // This is the loop check. The Vbo must not be bigger than the size of
966 // the volume, and the Vbo must not have a) wrapped and b) not been at the
967 // very last cluster in the chain, for the case of the maximal file.
968 //
969
970 if ( CurrentVbo == 0 ||
971 (BytesOnVolume.HighPart == 0 && CurrentVbo > BytesOnVolume.LowPart)) {
972
973 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
975 }
976
977 if ( PriorLbo + BytesPerCluster != CurrentLbo ) {
978
979 //
980 // Note that on the first time through the loop
981 // (FirstLboOfCurrentRun == 0), we don't add the
982 // run to the Mcb since it curresponds to the last
983 // run already stored in the Mcb.
984 //
985
986 if ( FirstLboOfCurrentRun != 0 ) {
987
988 DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
989 DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
990 DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
991 DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
992
994 &FcbOrDcb->Mcb,
995 FirstVboOfCurrentRun,
996 FirstLboOfCurrentRun,
997 CurrentVbo - FirstVboOfCurrentRun );
998
999 Runs += 1;
1000 }
1001
1002 //
1003 // Since we are at a run boundry, with CurrentLbo and
1004 // CurrentVbo being the first cluster of the next run,
1005 // we see if the run we just added encompases the desired
1006 // Vbo, and if so exit. Otherwise we set up two new
1007 // First*boOfCurrentRun, and continue.
1008 //
1009
1010 if (CurrentVbo > Vbo) {
1011
1012 LastCluster = TRUE;
1013
1014 } else {
1015
1016 FirstVboOfCurrentRun = CurrentVbo;
1017 FirstLboOfCurrentRun = CurrentLbo;
1018 }
1019 }
1020 break;
1021
1022 default:
1023
1024 DebugTrace(0, Dbg, "Illegal Cluster Type.\n", FatEntry);
1025
1026#ifdef _MSC_VER
1027#pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
1028#endif
1029 FatBugCheck( 0, 0, 0 );
1030
1031 break;
1032
1033 } // switch()
1034 } // while()
1035
1036 //
1037 // Load up the return parameters.
1038 //
1039 // On exit from the loop, Vbo still contains the desired Vbo, and
1040 // CurrentVbo is the first byte after the run that contained the
1041 // desired Vbo.
1042 //
1043
1044 *Allocated = TRUE;
1045
1046 *Lbo = FirstLboOfCurrentRun + (Vbo - FirstVboOfCurrentRun);
1047
1048 *ByteCount = CurrentVbo - Vbo;
1049
1050 if (ARGUMENT_PRESENT(Index)) {
1051
1052 //
1053 // Note that Runs only needs to be accurate with respect to where we
1054 // ended. Since partial-lookup cases will occur without exclusive
1055 // synchronization, the Mcb itself may be much bigger by now.
1056 //
1057
1058 *Index = Runs - 1;
1059 }
1060
1061 try_exit: NOTHING;
1062
1063 } _SEH2_FINALLY {
1064
1065 DebugUnwind( FatLookupFileAllocation );
1066
1067 //
1068 // We are done reading the Fat, so unpin the last page of fat
1069 // that is hanging around
1070 //
1071
1072 FatUnpinBcb( IrpContext, Context.Bcb );
1073
1074 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
1075 } _SEH2_END;
1076
1077 return;
1078}
#define PAGED_CODE()
unsigned char BOOLEAN
#define VOID
Definition: acefi.h:82
#define try_return(S)
Definition: cdprocs.h:2179
#define NULL
Definition: types.h:112
LONGLONG LBO
Definition: fat.h:34
#define FatGetIndexFromLbo(VCB, LBO)
Definition: fat.h:566
ULONG32 VBO
Definition: fat.h:38
#define FatGetLboFromIndex(VCB, FAT_INDEX)
Definition: fat.h:559
#define FatBugCheck(A, B, C)
Definition: nodetype.h:104
CLUSTER_TYPE FatInterpretClusterType(IN PVCB Vcb, IN FAT_ENTRY Entry)
Definition: allocsup.c:3473
#define Dbg
Definition: allocsup.c:28
VOID FatLookupFatEntry(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG FatIndex, IN OUT PULONG FatEntry, IN OUT PFAT_ENUMERATION_CONTEXT Context)
Definition: allocsup.c:3565
#define DebugTrace(INDENT, LEVEL, X, Y)
Definition: fatdata.h:313
#define DebugUnwind(X)
Definition: fatdata.h:315
#define FatUnpinBcb(IRPCONTEXT, BCB)
Definition: fatprocs.h:546
BOOLEAN FatLookupMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, OUT PLBO Lbo, OUT PULONG ByteCount OPTIONAL, OUT PULONG Index OPTIONAL)
Definition: fsctrl.c:418
IN PFCB IN VBO OUT PLBO Lbo
Definition: fatprocs.h:308
IN PFCB FcbOrDcb
Definition: fatprocs.h:306
BOOLEAN FatAddMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN LBO Lbo, IN ULONG SectorCount)
Definition: fsctrl.c:364
IN PFCB IN VBO Vbo
Definition: fatprocs.h:307
IN PFCB IN VBO OUT PLBO OUT PULONG OUT PBOOLEAN OUT PBOOLEAN EndOnMax
Definition: fatprocs.h:311
#define FatRaiseStatus(IRPCONTEXT, STATUS)
Definition: fatprocs.h:2977
IN PFCB IN VBO OUT PLBO OUT PULONG OUT PBOOLEAN Allocated
Definition: fatprocs.h:310
IN PVCB IN ULONG IN FAT_ENTRY FatEntry
Definition: fatprocs.h:385
BOOLEAN FatLookupLastMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, OUT PVBO Vbo, OUT PLBO Lbo, OUT PULONG Index OPTIONAL)
Definition: fsctrl.c:494
@ FatClusterLast
Definition: fatstruc.h:1750
@ FatClusterBad
Definition: fatstruc.h:1749
@ FatClusterReserved
Definition: fatstruc.h:1748
@ FatClusterAvailable
Definition: fatstruc.h:1747
@ FatClusterNext
Definition: fatstruc.h:1751
#define _SEH2_FINALLY
Definition: filesup.c:21
#define _SEH2_END
Definition: filesup.c:22
#define _SEH2_TRY
Definition: filesup.c:19
#define NOTHING
Definition: input_list.c:10
#define UInt32x32To64(a, b)
Definition: intsafe.h:252
#define ARGUMENT_PRESENT(ArgumentPointer)
#define Vcb
Definition: cdprocs.h:1415
Definition: fsck.fat.h:192
PVCB Vcb
Definition: cdstruc.h:933
CD_MCB Mcb
Definition: cdstruc.h:1016
ULONG FirstClusterOfFile
Definition: fatstruc.h:818
FSRTL_ADVANCED_FCB_HEADER Header
Definition: cdstruc.h:925
$ULONG LowPart
Definition: ntbasedef.h:569
ULONGLONG QuadPart
Definition: ms-dtyp.idl:185
$ULONG HighPart
Definition: ntbasedef.h:570
ULONG Runs
Definition: symtest.c:7
#define MAXULONG
Definition: typedefs.h:251
uint32_t * PULONG
Definition: typedefs.h:59
#define STATUS_FILE_CORRUPT_ERROR
Definition: udferr_usr.h:168
_In_ WDFCOLLECTION _In_ ULONG Index
_Must_inspect_result_ typedef _In_ PHYSICAL_ADDRESS _In_ LARGE_INTEGER ByteCount
Definition: iotypes.h:1099
#define NT_ASSERT
Definition: rtlfuncs.h:3310

◆ FatExamineFatEntries()

VOID FatExamineFatEntries ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb,
IN ULONG StartIndex  OPTIONAL,
IN ULONG EndIndex  OPTIONAL,
IN BOOLEAN  SetupWindows,
IN PFAT_WINDOW SwitchToWindow  OPTIONAL,
IN PULONG BitMapBuffer  OPTIONAL 
)

Definition at line 4720 of file allocsup.c.

4770{
4772 ULONG Page = 0;
4773 ULONG Offset = 0;
4776 FAT_ENTRY FirstFatEntry = FAT_CLUSTER_AVAILABLE;
4777 PUSHORT FatBuffer;
4778 PVOID pv;
4779 PBCB Bcb = NULL;
4780 ULONG EntriesPerWindow;
4781
4782 ULONG ClustersThisRun;
4783 ULONG StartIndexOfThisRun;
4784
4785 PULONG FreeClusterCount = NULL;
4786
4787 PFAT_WINDOW CurrentWindow = NULL;
4788
4789 PVOID NewBitMapBuffer = NULL;
4790 PRTL_BITMAP BitMap = NULL;
4791 RTL_BITMAP PrivateBitMap;
4792
4793 ULONG ClusterSize = 0;
4794 ULONG PrefetchPages = 0;
4795 ULONG FatPages = 0;
4796
4797 VBO BadClusterVbo = 0;
4798 LBO Lbo = 0;
4799
4800 enum RunType {
4802 AllocatedClusters,
4803 UnknownClusters
4804 } CurrentRun;
4805
4806 PAGED_CODE();
4807
4808 //
4809 // Now assert correct usage.
4810 //
4811
4812 FatIndexBitSize = Vcb->AllocationSupport.FatIndexBitSize;
4813
4814 NT_ASSERT( !(SetupWindows && (SwitchToWindow || BitMapBuffer)));
4815 NT_ASSERT( !(SetupWindows && FatIndexBitSize != 32));
4816
4817 if (Vcb->NumberOfWindows > 1) {
4818
4819 //
4820 // FAT32: Calculate the number of FAT entries covered by a window. This is
4821 // equal to the number of bits in the freespace bitmap, the size of which
4822 // is hardcoded.
4823 //
4824
4825 EntriesPerWindow = MAX_CLUSTER_BITMAP_SIZE;
4826
4827 } else {
4828
4829 EntriesPerWindow = Vcb->AllocationSupport.NumberOfClusters;
4830 }
4831
4832 //
4833 // We will also fill in the cumulative count of free clusters for
4834 // the entire volume. If this is not appropriate, NULL it out
4835 // shortly.
4836 //
4837
4838 FreeClusterCount = &Vcb->AllocationSupport.NumberOfFreeClusters;
4839
4840 if (SetupWindows) {
4841
4842 NT_ASSERT(BitMapBuffer == NULL);
4843
4844 //
4845 // In this case we're just supposed to scan the fat and set up
4846 // the information regarding where the buckets fall and how many
4847 // free clusters are in each.
4848 //
4849 // It is fine to monkey with the real windows, we must be able
4850 // to do this to activate the volume.
4851 //
4852
4853 BitMap = NULL;
4854
4855 CurrentWindow = &Vcb->Windows[0];
4856 CurrentWindow->FirstCluster = StartIndex;
4857 CurrentWindow->ClustersFree = 0;
4858
4859 //
4860 // We always wish to calculate total free clusters when
4861 // setting up the FAT windows.
4862 //
4863
4864 } else if (BitMapBuffer == NULL) {
4865
4866 //
4867 // We will be filling in the free cluster bitmap for the volume.
4868 // Careful, we can raise out of here and be hopelessly hosed if
4869 // we built this up in the main bitmap/window itself.
4870 //
4871 // For simplicity's sake, we'll do the swap for everyone. FAT32
4872 // provokes the need since we can't tolerate partial results
4873 // when switching windows.
4874 //
4875
4876 NT_ASSERT( SwitchToWindow );
4877
4878 CurrentWindow = SwitchToWindow;
4879 StartIndex = CurrentWindow->FirstCluster;
4880 EndIndex = CurrentWindow->LastCluster;
4881
4882 BitMap = &PrivateBitMap;
4883 NewBitMapBuffer = FsRtlAllocatePoolWithTag( PagedPool,
4884 (EntriesPerWindow + 7) / 8,
4886
4887 RtlInitializeBitMap( &PrivateBitMap,
4888 NewBitMapBuffer,
4889 EndIndex - StartIndex + 1);
4890
4891 if ((FatIndexBitSize == 32) &&
4892 (Vcb->NumberOfWindows > 1)) {
4893
4894 //
4895 // We do not wish count total clusters here.
4896 //
4897
4898 FreeClusterCount = NULL;
4899
4900 }
4901
4902 } else {
4903
4904 BitMap = &PrivateBitMap;
4905 RtlInitializeBitMap(&PrivateBitMap,
4906 BitMapBuffer,
4907 EndIndex - StartIndex + 1);
4908
4909 //
4910 // We do not count total clusters here.
4911 //
4912
4913 FreeClusterCount = NULL;
4914 }
4915
4916 //
4917 // Now, our start index better be in the file heap.
4918 //
4919
4920 NT_ASSERT( StartIndex >= 2 );
4921
4922 _SEH2_TRY {
4923
4924 //
4925 // Pick up the initial chunk of the FAT and first entry.
4926 //
4927
4928 if (FatIndexBitSize == 12) {
4929
4930 //
4931 // We read in the entire fat in the 12 bit case.
4932 //
4933
4934 FatReadVolumeFile( IrpContext,
4935 Vcb,
4936 FatReservedBytes( &Vcb->Bpb ),
4937 FatBytesPerFat( &Vcb->Bpb ),
4938 &Bcb,
4939 (PVOID *)&FatBuffer );
4940
4941 FatLookup12BitEntry(FatBuffer, 0, &FirstFatEntry);
4942
4943 } else {
4944
4945 //
4946 // Read in one page of fat at a time. We cannot read in the
4947 // all of the fat we need because of cache manager limitations.
4948 //
4949
4950 ULONG BytesPerEntry = FatIndexBitSize >> 3;
4951
4952 FatPages = (FatReservedBytes(&Vcb->Bpb) + FatBytesPerFat(&Vcb->Bpb) + (PAGE_SIZE - 1)) / PAGE_SIZE;
4953 Page = (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) / PAGE_SIZE;
4954
4955 Offset = Page * PAGE_SIZE;
4956
4957 //
4958 // Prefetch the FAT entries in memory for optimal performance.
4959 //
4960
4961 PrefetchPages = FatPages - Page;
4962
4963 if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
4964
4965 PrefetchPages = ALIGN_UP_BY(Page, FAT_PREFETCH_PAGE_COUNT) - Page;
4966 }
4967
4968#if (NTDDI_VERSION >= NTDDI_WIN8)
4969 FatPrefetchPages( IrpContext,
4970 Vcb->VirtualVolumeFile,
4971 Page,
4972 PrefetchPages );
4973#endif
4974
4975 FatReadVolumeFile( IrpContext,
4976 Vcb,
4977 Offset,
4978 PAGE_SIZE,
4979 &Bcb,
4980 &pv);
4981
4982 if (FatIndexBitSize == 32) {
4983
4984 FatBuffer = (PUSHORT)((PUCHAR)pv +
4985 (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) %
4986 PAGE_SIZE);
4987
4988 FirstFatEntry = *((PULONG)FatBuffer);
4989 FirstFatEntry = FirstFatEntry & FAT32_ENTRY_MASK;
4990
4991 } else {
4992
4993 FatBuffer = (PUSHORT)((PUCHAR)pv +
4994 FatReservedBytes(&Vcb->Bpb) % PAGE_SIZE) + 2;
4995
4996 FirstFatEntry = *FatBuffer;
4997 }
4998
4999 }
5000
5001 ClusterSize = 1 << (Vcb->AllocationSupport.LogOfBytesPerCluster);
5002
5003 CurrentRun = (FirstFatEntry == FAT_CLUSTER_AVAILABLE) ?
5004 FreeClusters : AllocatedClusters;
5005
5006 StartIndexOfThisRun = StartIndex;
5007
5008 for (FatIndex = StartIndex; FatIndex <= EndIndex; FatIndex++) {
5009
5010 if (FatIndexBitSize == 12) {
5011
5013
5014 } else {
5015
5016 //
5017 // If we are setting up the FAT32 windows and have stepped into a new
5018 // bucket, finalize this one and move forward.
5019 //
5020
5021 if (SetupWindows &&
5022 FatIndex > StartIndex &&
5023 (FatIndex - 2) % EntriesPerWindow == 0) {
5024
5025 CurrentWindow->LastCluster = FatIndex - 1;
5026
5027 if (CurrentRun == FreeClusters) {
5028
5029 //
5030 // We must be counting clusters in order to modify the
5031 // contents of the window.
5032 //
5033
5034 NT_ASSERT( FreeClusterCount );
5035
5036 ClustersThisRun = FatIndex - StartIndexOfThisRun;
5037 CurrentWindow->ClustersFree += ClustersThisRun;
5038
5039 if (FreeClusterCount) {
5040 *FreeClusterCount += ClustersThisRun;
5041 }
5042
5043 } else {
5044
5045 NT_ASSERT(CurrentRun == AllocatedClusters);
5046
5047 }
5048
5049 StartIndexOfThisRun = FatIndex;
5050 CurrentRun = UnknownClusters;
5051
5052 CurrentWindow++;
5053 CurrentWindow->ClustersFree = 0;
5054 CurrentWindow->FirstCluster = FatIndex;
5055 }
5056
5057 //
5058 // If we just stepped onto a new page, grab a new pointer.
5059 //
5060
5061 if (((ULONG_PTR)FatBuffer & (PAGE_SIZE - 1)) == 0) {
5062
5063 FatUnpinBcb( IrpContext, Bcb );
5064
5065 Page++;
5066 Offset += PAGE_SIZE;
5067
5068#if (NTDDI_VERSION >= NTDDI_WIN8)
5069 //
5070 // If we have exhausted all the prefetch pages, prefetch the next chunk.
5071 //
5072
5073 if (--PrefetchPages == 0) {
5074
5075 PrefetchPages = FatPages - Page;
5076
5077 if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
5078
5079 PrefetchPages = FAT_PREFETCH_PAGE_COUNT;
5080 }
5081
5082 FatPrefetchPages( IrpContext,
5083 Vcb->VirtualVolumeFile,
5084 Page,
5085 PrefetchPages );
5086 }
5087#endif
5088
5089 FatReadVolumeFile( IrpContext,
5090 Vcb,
5091 Offset,
5092 PAGE_SIZE,
5093 &Bcb,
5094 &pv );
5095
5096 FatBuffer = (PUSHORT)pv;
5097 }
5098
5099 if (FatIndexBitSize == 32) {
5100
5101#ifndef __REACTOS__
5102#ifdef _MSC_VER
5103#pragma warning( suppress: 4213 )
5104#endif
5105 FatEntry = *((PULONG)FatBuffer)++;
5107#else
5108 FatEntry = *((PULONG)FatBuffer);
5109 FatBuffer += 2; /* PUSHORT FatBuffer */
5111#endif
5112
5113 } else {
5114
5115 FatEntry = *FatBuffer;
5116 FatBuffer += 1;
5117 }
5118 }
5119
5120 if (CurrentRun == UnknownClusters) {
5121
5122 CurrentRun = (FatEntry == FAT_CLUSTER_AVAILABLE) ?
5123 FreeClusters : AllocatedClusters;
5124 }
5125
5126 //
5127 // Are we switching from a free run to an allocated run?
5128 //
5129
5130 if (CurrentRun == FreeClusters &&
5132
5133 ClustersThisRun = FatIndex - StartIndexOfThisRun;
5134
5135 if (FreeClusterCount) {
5136
5137 *FreeClusterCount += ClustersThisRun;
5138 CurrentWindow->ClustersFree += ClustersThisRun;
5139 }
5140
5141 if (BitMap) {
5142
5143 RtlClearBits( BitMap,
5144 StartIndexOfThisRun - StartIndex,
5145 ClustersThisRun );
5146 }
5147
5148 CurrentRun = AllocatedClusters;
5149 StartIndexOfThisRun = FatIndex;
5150 }
5151
5152 //
5153 // Are we switching from an allocated run to a free run?
5154 //
5155
5156 if (CurrentRun == AllocatedClusters &&
5158
5159 ClustersThisRun = FatIndex - StartIndexOfThisRun;
5160
5161 if (BitMap) {
5162
5163 RtlSetBits( BitMap,
5164 StartIndexOfThisRun - StartIndex,
5165 ClustersThisRun );
5166 }
5167
5168 CurrentRun = FreeClusters;
5169 StartIndexOfThisRun = FatIndex;
5170 }
5171
5172 //
5173 // If the entry is marked bad, add it to the bad block MCB
5174 //
5175
5176 if ((SetupWindows || (Vcb->NumberOfWindows == 1)) &&
5178
5179 //
5180 // This cluster is marked bad.
5181 // Add it to the BadBlockMcb.
5182 //
5183
5185 FatAddMcbEntry( Vcb, &Vcb->BadBlockMcb, BadClusterVbo, Lbo, ClusterSize );
5186 BadClusterVbo += ClusterSize;
5187 }
5188 }
5189
5190 //
5191 // If we finished the scan, then we know about all the possible bad clusters.
5192 //
5193
5195
5196 //
5197 // Now we have to record the final run we encountered
5198 //
5199
5200 ClustersThisRun = FatIndex - StartIndexOfThisRun;
5201
5202 if (CurrentRun == FreeClusters) {
5203
5204 if (FreeClusterCount) {
5205
5206 *FreeClusterCount += ClustersThisRun;
5207 CurrentWindow->ClustersFree += ClustersThisRun;
5208 }
5209
5210 if (BitMap) {
5211
5212 RtlClearBits( BitMap,
5213 StartIndexOfThisRun - StartIndex,
5214 ClustersThisRun );
5215 }
5216
5217 } else {
5218
5219 if (BitMap) {
5220
5221 RtlSetBits( BitMap,
5222 StartIndexOfThisRun - StartIndex,
5223 ClustersThisRun );
5224 }
5225 }
5226
5227 //
5228 // And finish the last window if we are in setup.
5229 //
5230
5231 if (SetupWindows) {
5232
5233 CurrentWindow->LastCluster = FatIndex - 1;
5234 }
5235
5236 //
5237 // Now switch the active window if required. We've succesfully gotten everything
5238 // nailed down.
5239 //
5240 // If we were tracking the free cluster count, this means we should update the
5241 // window. This is the case of FAT12/16 initialization.
5242 //
5243
5244 if (SwitchToWindow) {
5245
5246 if (Vcb->FreeClusterBitMap.Buffer) {
5247
5248 ExFreePool( Vcb->FreeClusterBitMap.Buffer );
5249 }
5250
5251 RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
5252 NewBitMapBuffer,
5253 EndIndex - StartIndex + 1 );
5254
5255 NewBitMapBuffer = NULL;
5256
5257 Vcb->CurrentWindow = SwitchToWindow;
5258 Vcb->ClusterHint = (ULONG)-1;
5259
5260 if (FreeClusterCount) {
5261
5262 NT_ASSERT( !SetupWindows );
5263
5264 Vcb->CurrentWindow->ClustersFree = *FreeClusterCount;
5265 }
5266 }
5267
5268 //
5269 // Make sure plausible things occured ...
5270 //
5271
5272 if (!SetupWindows && BitMapBuffer == NULL) {
5273
5275 }
5276
5277 NT_ASSERT(Vcb->AllocationSupport.NumberOfFreeClusters <= Vcb->AllocationSupport.NumberOfClusters);
5278
5279 } _SEH2_FINALLY {
5280
5281 //
5282 // Unpin the last bcb and drop the temporary bitmap buffer if it exists.
5283 //
5284
5285 FatUnpinBcb( IrpContext, Bcb);
5286
5287 if (NewBitMapBuffer) {
5288
5289 ExFreePool( NewBitMapBuffer );
5290 }
5291 } _SEH2_END;
5292}
#define ALIGN_UP_BY(size, align)
DWORD ClusterSize
Definition: format.c:67
#define FAT32_ENTRY_MASK
Definition: fat.h:227
#define FatReservedBytes(B)
Definition: fat.h:414
#define FatLookup12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:584
#define FatIndexBitSize(B)
Definition: fat.h:515
#define FatBytesPerFat(B)
Definition: fat.h:410
#define TAG_FAT_BITMAP
Definition: nodetype.h:163
NTSTATUS FreeClusters(PNTFS_VCB Vcb, PNTFS_ATTR_CONTEXT AttrContext, ULONG AttrOffset, PFILE_RECORD_HEADER FileRecord, ULONG ClustersToFree)
Definition: attrib.c:1057
#define PAGE_SIZE
Definition: env_spec_w32.h:49
#define ExFreePool(addr)
Definition: env_spec_w32.h:352
#define PagedPool
Definition: env_spec_w32.h:308
#define SetFlag(_F, _SF)
Definition: ext2fs.h:187
#define MAX_CLUSTER_BITMAP_SIZE
Definition: allocsup.c:247
#define FAT_PREFETCH_PAGE_COUNT
Definition: allocsup.c:36
#define ASSERT_CURRENT_WINDOW_GOOD(VCB)
Definition: allocsup.c:83
VOID FatReadVolumeFile(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN VBO StartingVbo, IN ULONG ByteCount, OUT PBCB *Bcb, OUT PVOID *Buffer)
Definition: cachesup.c:102
NTSTATUS FatPrefetchPages(IN PIRP_CONTEXT IrpContext, IN PFILE_OBJECT FileObject, IN ULONG StartingPage, IN ULONG PageCount)
Definition: cachesup.c:1929
IN PVCB IN VBO IN ULONG OUT PBCB * Bcb
Definition: fatprocs.h:414
IN PVCB IN ULONG FatIndex
Definition: fatprocs.h:383
#define VCB_STATE_FLAG_BAD_BLOCKS_POPULATED
Definition: fatstruc.h:576
NTSYSAPI void WINAPI RtlInitializeBitMap(PRTL_BITMAP, PULONG, ULONG)
NTSYSAPI void WINAPI RtlClearBits(PRTL_BITMAP, ULONG, ULONG)
NTSYSAPI void WINAPI RtlSetBits(PRTL_BITMAP, ULONG, ULONG)
_In_ ULONG _In_ ULONG Offset
Definition: ntddpcm.h:101
PVOID NTAPI FsRtlAllocatePoolWithTag(IN POOL_TYPE PoolType, IN ULONG NumberOfBytes, IN ULONG Tag)
Definition: filter.c:229
_In_ PVOID _Out_opt_ BOOLEAN _Out_opt_ PPFN_NUMBER Page
Definition: mm.h:1306
ULONG ClustersFree
Definition: fatstruc.h:175
ULONG FirstCluster
Definition: fatstruc.h:173
ULONG LastCluster
Definition: fatstruc.h:174
uint16_t * PUSHORT
Definition: typedefs.h:56
uint32_t ULONG_PTR
Definition: typedefs.h:65
unsigned char * PUCHAR
Definition: typedefs.h:53

Referenced by FatSetupAllocationSupport().

◆ FatInterpretClusterType()

CLUSTER_TYPE FatInterpretClusterType ( IN PVCB  Vcb,
IN FAT_ENTRY  Entry 
)

Definition at line 3473 of file allocsup.c.

3499{
3500 DebugTrace(+1, Dbg, "InterpretClusterType\n", 0);
3501 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3502 DebugTrace( 0, Dbg, " Entry = %8lx\n", Entry);
3503
3504 PAGED_CODE();
3505
3506 switch(Vcb->AllocationSupport.FatIndexBitSize ) {
3507 case 32:
3509 break;
3510
3511 case 12:
3512 NT_ASSERT( Entry <= 0xfff );
3513 if (Entry >= 0x0ff0) {
3514 Entry |= 0x0FFFF000;
3515 }
3516 break;
3517
3518 default:
3519 case 16:
3520 NT_ASSERT( Entry <= 0xffff );
3521 if (Entry >= 0x0fff0) {
3522 Entry |= 0x0FFF0000;
3523 }
3524 break;
3525 }
3526
3528
3529 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterAvailable\n", 0);
3530
3531 return FatClusterAvailable;
3532
3533 } else if (Entry < FAT_CLUSTER_RESERVED) {
3534
3535 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterNext\n", 0);
3536
3537 return FatClusterNext;
3538
3539 } else if (Entry < FAT_CLUSTER_BAD) {
3540
3541 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterReserved\n", 0);
3542
3543 return FatClusterReserved;
3544
3545 } else if (Entry == FAT_CLUSTER_BAD) {
3546
3547 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterBad\n", 0);
3548
3549 return FatClusterBad;
3550
3551 } else {
3552
3553 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterLast\n", 0);
3554
3555 return FatClusterLast;
3556 }
3557}
#define FAT_CLUSTER_BAD
Definition: fat.h:257
#define FAT_CLUSTER_RESERVED
Definition: fat.h:256
base of all file and directory entries
Definition: entries.h:83

Referenced by _Requires_lock_held_(), and FatExamineFatEntries().

◆ FatLogOf()

UCHAR FatLogOf ( IN ULONG  Value)

Definition at line 4655 of file allocsup.c.

4676{
4677 UCHAR Log = 0;
4678
4679#if FASTFATDBG
4680 ULONG OrigValue = Value;
4681#endif
4682
4683 PAGED_CODE();
4684
4685 //
4686 // Knock bits off until we we get a one at position 0
4687 //
4688
4689 while ( (Value & 0xfffffffe) != 0 ) {
4690
4691 Log++;
4692 Value >>= 1;
4693 }
4694
4695 //
4696 // If there was more than one bit set, the file system messed up,
4697 // Bug Check.
4698 //
4699
4700 if (Value != 0x1) {
4701
4702 DebugTrace(+1, Dbg, "LogOf\n", 0);
4703 DebugTrace( 0, Dbg, " Value = %8lx\n", OrigValue);
4704
4705 DebugTrace( 0, Dbg, "Received non power of 2.\n", 0);
4706
4707 DebugTrace(-1, Dbg, "LogOf -> %8lx\n", Log);
4708
4709#ifdef _MSC_VER
4710#pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
4711#endif
4712 FatBugCheck( Value, Log, 0 );
4713 }
4714
4715 return Log;
4716}
_Must_inspect_result_ _In_ WDFKEY _In_ PCUNICODE_STRING _Out_opt_ PUSHORT _Inout_opt_ PUNICODE_STRING Value
Definition: wdfregistry.h:413
unsigned char UCHAR
Definition: xmlstorage.h:181

Referenced by FatSetupAllocationSupport().

◆ FatLookupFatEntry()

VOID FatLookupFatEntry ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb,
IN ULONG  FatIndex,
IN OUT PULONG  FatEntry,
IN OUT PFAT_ENUMERATION_CONTEXT  Context 
)

Definition at line 3565 of file allocsup.c.

3598{
3599 PAGED_CODE();
3600
3601 DebugTrace(+1, Dbg, "FatLookupFatEntry\n", 0);
3602 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3603 DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
3604 DebugTrace( 0, Dbg, " FatEntry = %8lx\n", FatEntry);
3605
3606 //
3607 // Make sure they gave us a valid fat index.
3608 //
3609
3610 FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
3611
3612 //
3613 // Case on 12 or 16 bit fats.
3614 //
3615 // In the 12 bit case (mostly floppies) we always have the whole fat
3616 // (max 6k bytes) pinned during allocation operations. This is possibly
3617 // a wee bit slower, but saves headaches over fat entries with 8 bits
3618 // on one page, and 4 bits on the next.
3619 //
3620 // The 16 bit case always keeps the last used page pinned until all
3621 // operations are done and it is unpinned.
3622 //
3623
3624 //
3625 // DEAL WITH 12 BIT CASE
3626 //
3627
3628 if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
3629
3630 //
3631 // Check to see if the fat is already pinned, otherwise pin it.
3632 //
3633
3634 if (Context->Bcb == NULL) {
3635
3636 FatReadVolumeFile( IrpContext,
3637 Vcb,
3638 FatReservedBytes( &Vcb->Bpb ),
3639 FatBytesPerFat( &Vcb->Bpb ),
3640 &Context->Bcb,
3641 &Context->PinnedPage );
3642 }
3643
3644 //
3645 // Load the return value.
3646 //
3647
3648
3650
3651 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
3652
3653 //
3654 // DEAL WITH 32 BIT CASE
3655 //
3656
3657 ULONG PageEntryOffset;
3658 ULONG OffsetIntoVolumeFile;
3659
3660 //
3661 // Initialize two local variables that help us.
3662 //
3663 OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(FAT_ENTRY);
3664 PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(FAT_ENTRY);
3665
3666 //
3667 // Check to see if we need to read in a new page of fat
3668 //
3669
3670 if ((Context->Bcb == NULL) ||
3671 (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3672
3673 //
3674 // The entry wasn't in the pinned page, so must we unpin the current
3675 // page (if any) and read in a new page.
3676 //
3677
3678 FatUnpinBcb( IrpContext, Context->Bcb );
3679
3680 FatReadVolumeFile( IrpContext,
3681 Vcb,
3682 OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3683 PAGE_SIZE,
3684 &Context->Bcb,
3685 &Context->PinnedPage );
3686
3687 Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3688 }
3689
3690 //
3691 // Grab the fat entry from the pinned page, and return
3692 //
3693
3694 *FatEntry = ((PULONG)(Context->PinnedPage))[PageEntryOffset] & FAT32_ENTRY_MASK;
3695
3696 } else {
3697
3698 //
3699 // DEAL WITH 16 BIT CASE
3700 //
3701
3702 ULONG PageEntryOffset;
3703 ULONG OffsetIntoVolumeFile;
3704
3705 //
3706 // Initialize two local variables that help us.
3707 //
3708
3709 OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(USHORT);
3710 PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(USHORT);
3711
3712 //
3713 // Check to see if we need to read in a new page of fat
3714 //
3715
3716 if ((Context->Bcb == NULL) ||
3717 (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3718
3719 //
3720 // The entry wasn't in the pinned page, so must we unpin the current
3721 // page (if any) and read in a new page.
3722 //
3723
3724 FatUnpinBcb( IrpContext, Context->Bcb );
3725
3726 FatReadVolumeFile( IrpContext,
3727 Vcb,
3728 OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3729 PAGE_SIZE,
3730 &Context->Bcb,
3731 &Context->PinnedPage );
3732
3733 Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3734 }
3735
3736 //
3737 // Grab the fat entry from the pinned page, and return
3738 //
3739
3740 *FatEntry = ((PUSHORT)(Context->PinnedPage))[PageEntryOffset];
3741 }
3742
3743 DebugTrace(-1, Dbg, "FatLookupFatEntry -> (VOID)\n", 0);
3744 return;
3745}
#define FatVerifyIndexIsValid(IC, V, I)
Definition: fat.h:532
unsigned short USHORT
Definition: pedump.c:61

Referenced by _Requires_lock_held_().

◆ FatSelectBestWindow()

INLINE ULONG FatSelectBestWindow ( IN PVCB  Vcb)

Definition at line 279 of file allocsup.c.

301{
302 ULONG i, Fave = 0;
303 ULONG MaxFree = 0;
304 ULONG FirstEmpty = (ULONG)-1;
305 ULONG ClustersPerWindow = MAX_CLUSTER_BITMAP_SIZE;
306
307 NT_ASSERT( 1 != Vcb->NumberOfWindows);
308
309 for (i = 0; i < Vcb->NumberOfWindows; i++) {
310
311 if (Vcb->Windows[i].ClustersFree == ClustersPerWindow) {
312
313 if (-1 == FirstEmpty) {
314
315 //
316 // Keep note of the first empty window on the disc
317 //
318
319 FirstEmpty = i;
320 }
321 }
322 else if (Vcb->Windows[i].ClustersFree > MaxFree) {
323
324 //
325 // This window has the most free clusters, so far
326 //
327
328 MaxFree = Vcb->Windows[i].ClustersFree;
329 Fave = i;
330
331 //
332 // If this window has >50% free clusters, then we will take it,
333 // so don't bother considering more windows.
334 //
335
336 if (MaxFree >= (ClustersPerWindow >> 1)) {
337
338 break;
339 }
340 }
341 }
342
343 //
344 // If there were no windows with 50% or more freespace, then select the
345 // first empty window on the disc, if any - otherwise we'll just go with
346 // the one with the most free clusters.
347 //
348
349 if ((MaxFree < (ClustersPerWindow >> 1)) && (-1 != FirstEmpty)) {
350
351 Fave = FirstEmpty;
352 }
353
354 return Fave;
355}
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248

Referenced by FatSetupAllocationSupport().

◆ FatSetFatRun()

VOID FatSetFatRun ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb,
IN ULONG  StartingFatIndex,
IN ULONG  ClusterCount,
IN BOOLEAN  ChainTogether 
)

Definition at line 4139 of file allocsup.c.

4175{
4176#define MAXCOUNTCLUS 0x10000
4177#define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
4178 PBCB SavedBcbs[COUNTSAVEDBCBS][2];
4179
4181 ULONG Cluster;
4182
4183 LBO StartSectorLbo;
4184 LBO FinalSectorLbo;
4185 LBO Lbo;
4186
4187 PVOID PinnedFat;
4188
4190
4191 ULONG SavedStartingFatIndex = StartingFatIndex;
4192
4193 PAGED_CODE();
4194
4195 DebugTrace(+1, Dbg, "FatSetFatRun\n", 0);
4196 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
4197 DebugTrace( 0, Dbg, " StartingFatIndex = %8x\n", StartingFatIndex);
4198 DebugTrace( 0, Dbg, " ClusterCount = %8lx\n", ClusterCount);
4199 DebugTrace( 0, Dbg, " ChainTogether = %s\n", ChainTogether ? "TRUE":"FALSE");
4200
4201 //
4202 // Make sure they gave us a valid fat run.
4203 //
4204
4205 FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex);
4206 FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex + ClusterCount - 1);
4207
4208 //
4209 // Check special case
4210 //
4211
4212 if (ClusterCount == 0) {
4213
4214 DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4215 return;
4216 }
4217
4218 //
4219 // Set Sector Size
4220 //
4221
4222 SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
4223
4224 //
4225 // Case on 12 or 16 bit fats.
4226 //
4227 // In the 12 bit case (mostly floppies) we always have the whole fat
4228 // (max 6k bytes) pinned during allocation operations. This is possibly
4229 // a wee bit slower, but saves headaches over fat entries with 8 bits
4230 // on one page, and 4 bits on the next.
4231 //
4232 // In the 16 bit case we only read one page at a time, as needed.
4233 //
4234
4235 //
4236 // DEAL WITH 12 BIT CASE
4237 //
4238
4239 _SEH2_TRY {
4240
4241 if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
4242
4243 //
4244 // We read in the entire fat. Note that using prepare write marks
4245 // the bcb pre-dirty, so we don't have to do it explicitly.
4246 //
4247
4248 RtlZeroMemory( &SavedBcbs[0][0], 2 * sizeof(PBCB) * 2);
4249
4250 FatPrepareWriteVolumeFile( IrpContext,
4251 Vcb,
4252 FatReservedBytes( &Vcb->Bpb ),
4253 FatBytesPerFat( &Vcb->Bpb ),
4254 &SavedBcbs[0][0],
4255 &PinnedFat,
4256 TRUE,
4257 FALSE );
4258
4259 //
4260 // Mark the affected sectors dirty. Note that FinalSectorLbo is
4261 // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
4262 // we catch the case of a dirty fat entry straddling a sector boundry.
4263 //
4264 // Note that if the first AddMcbEntry succeeds, all following ones
4265 // will simply coalese, and thus also succeed.
4266 //
4267
4268 StartSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + StartingFatIndex * 3 / 2)
4269 & ~(SectorSize - 1);
4270
4271 FinalSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + ((StartingFatIndex +
4272 ClusterCount) * 3 + 2) / 2) & ~(SectorSize - 1);
4273
4274 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4275
4276 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4277 }
4278
4279 //
4280 // Store the entries into the fat; we need a little
4281 // synchonization here and can't use a spinlock since the bytes
4282 // might not be resident.
4283 //
4284
4287
4288 for (Cluster = StartingFatIndex;
4289 Cluster < StartingFatIndex + ClusterCount - 1;
4290 Cluster++) {
4291
4292 FatSet12BitEntry( PinnedFat,
4293 Cluster,
4294 ChainTogether ? Cluster + 1 : FAT_CLUSTER_AVAILABLE );
4295 }
4296
4297 //
4298 // Save the last entry
4299 //
4300
4301 FatSet12BitEntry( PinnedFat,
4302 Cluster,
4303 ChainTogether ?
4305
4308
4309 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
4310
4311 //
4312 // DEAL WITH 32 BIT CASE
4313 //
4314
4315 for (;;) {
4316
4317 VBO StartOffsetInVolume;
4318 VBO FinalOffsetInVolume;
4319
4320 ULONG Page;
4321 ULONG FinalCluster;
4323 ULONG ClusterCountThisRun;
4324
4325 StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4326 StartingFatIndex * sizeof(FAT_ENTRY);
4327
4328 if (ClusterCount > MAXCOUNTCLUS) {
4329 ClusterCountThisRun = MAXCOUNTCLUS;
4330 } else {
4331 ClusterCountThisRun = ClusterCount;
4332 }
4333
4334 FinalOffsetInVolume = StartOffsetInVolume +
4335 (ClusterCountThisRun - 1) * sizeof(FAT_ENTRY);
4336
4337 {
4338 ULONG NumberOfPages;
4339 ULONG Offset;
4340
4341 NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4342 (StartOffsetInVolume / PAGE_SIZE) + 1;
4343
4344 RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4345
4346 for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4347 Page < NumberOfPages;
4348 Page++, Offset += PAGE_SIZE ) {
4349
4350 FatPrepareWriteVolumeFile( IrpContext,
4351 Vcb,
4352 Offset,
4353 PAGE_SIZE,
4354 &SavedBcbs[Page][0],
4355 (PVOID *)&SavedBcbs[Page][1],
4356 TRUE,
4357 FALSE );
4358
4359 if (Page == 0) {
4360
4361 FatEntry = (PULONG)((PUCHAR)SavedBcbs[0][1] +
4362 (StartOffsetInVolume % PAGE_SIZE));
4363 }
4364 }
4365 }
4366
4367 //
4368 // Mark the run dirty
4369 //
4370
4371 StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4372 FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4373
4374 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4375
4376 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO)Lbo, Lbo, SectorSize );
4377 }
4378
4379 //
4380 // Store the entries
4381 //
4382 // We need extra synchronization here for broken architectures
4383 // like the ALPHA that don't support atomic 16 bit writes.
4384 //
4385
4386#ifdef ALPHA
4389#endif // ALPHA
4390
4391 FinalCluster = StartingFatIndex + ClusterCountThisRun - 1;
4392 Page = 0;
4393
4394 for (Cluster = StartingFatIndex;
4395 Cluster <= FinalCluster;
4396 Cluster++, FatEntry++) {
4397
4398 //
4399 // If we just crossed a page boundry (as opposed to starting
4400 // on one), update our idea of FatEntry.
4401
4402 if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4403 (Cluster != StartingFatIndex) ) {
4404
4405 Page += 1;
4406 FatEntry = (PULONG)SavedBcbs[Page][1];
4407 }
4408
4409 *FatEntry = ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4411 }
4412
4413 //
4414 // Fix up the last entry if we were chaining together
4415 //
4416
4417 if ((ClusterCount <= MAXCOUNTCLUS) &&
4418 ChainTogether ) {
4419
4421 }
4422
4423#ifdef ALPHA
4426#endif // ALPHA
4427
4428 {
4429 ULONG i;
4430
4431 //
4432 // Unpin the Bcbs
4433 //
4434
4435 for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4436
4437 FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4438 SavedBcbs[i][0] = NULL;
4439 }
4440 }
4441
4442 if (ClusterCount <= MAXCOUNTCLUS) {
4443
4444 break;
4445
4446 } else {
4447
4448 StartingFatIndex += MAXCOUNTCLUS;
4449 ClusterCount -= MAXCOUNTCLUS;
4450 }
4451 }
4452
4453 } else {
4454
4455 //
4456 // DEAL WITH 16 BIT CASE
4457 //
4458
4459 VBO StartOffsetInVolume;
4460 VBO FinalOffsetInVolume;
4461
4462 ULONG Page;
4463 ULONG FinalCluster;
4465
4466 StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4467 StartingFatIndex * sizeof(USHORT);
4468
4469 FinalOffsetInVolume = StartOffsetInVolume +
4470 (ClusterCount - 1) * sizeof(USHORT);
4471
4472 //
4473 // Read in one page of fat at a time. We cannot read in the
4474 // all of the fat we need because of cache manager limitations.
4475 //
4476 // SavedBcb was initialized to be able to hold the largest
4477 // possible number of pages in a fat plus and extra one to
4478 // accomadate the boot sector, plus one more to make sure there
4479 // is enough room for the RtlZeroMemory below that needs the mark
4480 // the first Bcb after all the ones we will use as an end marker.
4481 //
4482
4483 {
4484 ULONG NumberOfPages;
4485 ULONG Offset;
4486
4487 NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4488 (StartOffsetInVolume / PAGE_SIZE) + 1;
4489
4490 RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4491
4492 for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4493 Page < NumberOfPages;
4494 Page++, Offset += PAGE_SIZE ) {
4495
4496 FatPrepareWriteVolumeFile( IrpContext,
4497 Vcb,
4498 Offset,
4499 PAGE_SIZE,
4500 &SavedBcbs[Page][0],
4501 (PVOID *)&SavedBcbs[Page][1],
4502 TRUE,
4503 FALSE );
4504
4505 if (Page == 0) {
4506
4507 FatEntry = (PUSHORT)((PUCHAR)SavedBcbs[0][1] +
4508 (StartOffsetInVolume % PAGE_SIZE));
4509 }
4510 }
4511 }
4512
4513 //
4514 // Mark the run dirty
4515 //
4516
4517 StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4518 FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4519
4520 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4521
4522 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4523 }
4524
4525 //
4526 // Store the entries
4527 //
4528 // We need extra synchronization here for broken architectures
4529 // like the ALPHA that don't support atomic 16 bit writes.
4530 //
4531
4532#ifdef ALPHA
4535#endif // ALPHA
4536
4537 FinalCluster = StartingFatIndex + ClusterCount - 1;
4538 Page = 0;
4539
4540 for (Cluster = StartingFatIndex;
4541 Cluster <= FinalCluster;
4542 Cluster++, FatEntry++) {
4543
4544 //
4545 // If we just crossed a page boundry (as opposed to starting
4546 // on one), update our idea of FatEntry.
4547
4548 if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4549 (Cluster != StartingFatIndex) ) {
4550
4551 Page += 1;
4552 FatEntry = (PUSHORT)SavedBcbs[Page][1];
4553 }
4554
4555 *FatEntry = (USHORT) (ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4557 }
4558
4559 //
4560 // Fix up the last entry if we were chaining together
4561 //
4562
4563 if ( ChainTogether ) {
4564
4565#ifdef _MSC_VER
4566#pragma warning( suppress: 4310 )
4567#endif
4569
4570 }
4571#ifdef ALPHA
4574#endif // ALPHA
4575 }
4576
4577 } _SEH2_FINALLY {
4578
4579 ULONG i;
4580
4582
4583 //
4584 // If we still somehow have the Mutex, release it.
4585 //
4586
4587 if (ReleaseMutex) {
4588
4590
4592 }
4593
4594 //
4595 // Unpin the Bcbs
4596 //
4597
4598 for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4599
4600 FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4601 }
4602
4603 //
4604 // At this point nothing in this finally clause should have raised.
4605 // So, now comes the unsafe (sigh) stuff.
4606 //
4607
4609 (Vcb->AllocationSupport.FatIndexBitSize == 32) ) {
4610
4611 //
4612 // Fat32 unwind
4613 //
4614 // This case is more complex because the FAT12 and FAT16 cases
4615 // pin all the needed FAT pages (128K max), after which it
4616 // can't fail, before changing any FAT entries. In the Fat32
4617 // case, it may not be practical to pin all the needed FAT
4618 // pages, because that could span many megabytes. So Fat32
4619 // attacks in chunks, and if a failure occurs once the first
4620 // chunk has been updated, we have to back out the updates.
4621 //
4622 // The unwind consists of walking back over each FAT entry we
4623 // have changed, setting it back to the previous value. Note
4624 // that the previous value with either be FAT_CLUSTER_AVAILABLE
4625 // (if ChainTogether==TRUE) or a simple link to the successor
4626 // (if ChainTogether==FALSE).
4627 //
4628 // We concede that any one of these calls could fail too; our
4629 // objective is to make this case no more likely than the case
4630 // for a file consisting of multiple disjoint runs.
4631 //
4632
4633 while ( StartingFatIndex > SavedStartingFatIndex ) {
4634
4635 StartingFatIndex--;
4636
4637 FatSetFatEntry( IrpContext, Vcb, StartingFatIndex,
4638 ChainTogether ?
4639 StartingFatIndex + 1 : FAT_CLUSTER_AVAILABLE );
4640 }
4641 }
4642
4643 DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4644 } _SEH2_END;
4645
4646 return;
4647}
#define FatSet12BitEntry(FAT, INDEX, ENTRY)
Definition: fat.h:603
#define MAXCOUNTCLUS
#define FatLockFreeClusterBitMap(VCB)
Definition: allocsup.c:99
VOID FatSetFatRun(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartingFatIndex, IN ULONG ClusterCount, IN BOOLEAN ChainTogether)
Definition: allocsup.c:4139
#define FatUnlockFreeClusterBitMap(VCB)
Definition: allocsup.c:112
#define COUNTSAVEDBCBS
#define _SEH2_AbnormalTermination()
Definition: pseh2_64.h:160
BOOL WINAPI DECLSPEC_HOTPATCH ReleaseMutex(IN HANDLE hMutex)
Definition: synch.c:618
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
_In_ ULONG SectorSize
Definition: halfuncs.h:291

Referenced by FatSetFatRun().

◆ FatSetupAllocationSupport()

VOID FatSetupAllocationSupport ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb 
)

Definition at line 359 of file allocsup.c.

380{
381 ULONG BitIndex;
382 ULONG ClustersDescribableByFat;
383
384 PAGED_CODE();
385
386 DebugTrace(+1, Dbg, "FatSetupAllocationSupport\n", 0);
387 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
388
389 //
390 // Compute a number of fields for Vcb.AllocationSupport
391 //
392
393 Vcb->AllocationSupport.RootDirectoryLbo = FatRootDirectoryLbo( &Vcb->Bpb );
394 Vcb->AllocationSupport.RootDirectorySize = FatRootDirectorySize( &Vcb->Bpb );
395
396 Vcb->AllocationSupport.FileAreaLbo = FatFileAreaLbo( &Vcb->Bpb );
397
398 Vcb->AllocationSupport.NumberOfClusters = FatNumberOfClusters( &Vcb->Bpb );
399
400 Vcb->AllocationSupport.FatIndexBitSize = FatIndexBitSize( &Vcb->Bpb );
401
402 Vcb->AllocationSupport.LogOfBytesPerSector = FatLogOf(Vcb->Bpb.BytesPerSector);
403 Vcb->AllocationSupport.LogOfBytesPerCluster = FatLogOf(FatBytesPerCluster( &Vcb->Bpb ));
404 Vcb->AllocationSupport.NumberOfFreeClusters = 0;
405
406
407 //
408 // Deal with a bug in DOS 5 format, if the Fat is not big enough to
409 // describe all the clusters on the disk, reduce this number. We expect
410 // that fat32 volumes will not have this problem.
411 //
412 // Turns out this was not a good assumption. We have to do this always now.
413 //
414
415 ClustersDescribableByFat = ( ((FatIsFat32(Vcb)? Vcb->Bpb.LargeSectorsPerFat :
416 Vcb->Bpb.SectorsPerFat) *
417 Vcb->Bpb.BytesPerSector * 8)
418 / FatIndexBitSize(&Vcb->Bpb) ) - 2;
419
420 if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByFat) {
421
422 Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByFat;
423 }
424
425 //
426 // Extend the virtual volume file to include the Fat
427 //
428
429 {
431
434 FatBytesPerFat( &Vcb->Bpb ));
436
437 if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) {
438
439 FatInitializeCacheMap( Vcb->VirtualVolumeFile,
440 &FileSizes,
441 TRUE,
443 Vcb );
444
445 } else {
446
447 CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes );
448 }
449 }
450
451 _SEH2_TRY {
452
453 if (FatIsFat32(Vcb) &&
454 Vcb->AllocationSupport.NumberOfClusters > MAX_CLUSTER_BITMAP_SIZE) {
455
456 Vcb->NumberOfWindows = (Vcb->AllocationSupport.NumberOfClusters +
459
460 } else {
461
462 Vcb->NumberOfWindows = 1;
463 }
464
466 Vcb->NumberOfWindows * sizeof(FAT_WINDOW),
468
469 RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
470 NULL,
471 0 );
472
473 //
474 // Chose a FAT window to begin operation in.
475 //
476
477 if (Vcb->NumberOfWindows > 1) {
478
479 //
480 // Read the fat and count up free clusters. We bias by the two reserved
481 // entries in the FAT.
482 //
483
484 FatExamineFatEntries( IrpContext, Vcb,
485 2,
486 Vcb->AllocationSupport.NumberOfClusters + 2 - 1,
487 TRUE,
488 NULL,
489 NULL);
490
491
492 //
493 // Pick a window to begin allocating from
494 //
495
496 Vcb->CurrentWindow = &Vcb->Windows[ FatSelectBestWindow( Vcb)];
497
498 } else {
499
500 Vcb->CurrentWindow = &Vcb->Windows[0];
501
502 //
503 // Carefully bias ourselves by the two reserved entries in the FAT.
504 //
505
506 Vcb->CurrentWindow->FirstCluster = 2;
507 Vcb->CurrentWindow->LastCluster = Vcb->AllocationSupport.NumberOfClusters + 2 - 1;
508 }
509
510 //
511 // Now transition to the FAT window we have chosen.
512 //
513
514 FatExamineFatEntries( IrpContext, Vcb,
515 0,
516 0,
517 FALSE,
518 Vcb->CurrentWindow,
519 NULL);
520
521 //
522 // Now set the ClusterHint to the first free bit in our favorite
523 // window (except the ClusterHint is off by two).
524 //
525
526 Vcb->ClusterHint =
527 (BitIndex = RtlFindClearBits( &Vcb->FreeClusterBitMap, 1, 0 )) != -1 ?
528 BitIndex + 2 : 2;
529
530 } _SEH2_FINALLY {
531
533
534 //
535 // If we hit an exception, back out.
536 //
537
539
540 FatTearDownAllocationSupport( IrpContext, Vcb );
541 }
542 } _SEH2_END;
543
544 return;
545}
static CC_FILE_SIZES FileSizes
#define FatNumberOfClusters(B)
Definition: fat.h:482
#define FatRootDirectoryLbo(B)
Definition: fat.h:445
#define FatFileAreaLbo(B)
Definition: fat.h:458
#define FatBytesPerCluster(B)
Definition: fat.h:408
#define FatRootDirectorySize(B)
Definition: fat.h:427
#define TAG_FAT_WINDOW
Definition: nodetype.h:166
VOID FatExamineFatEntries(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb, IN ULONG StartIndex OPTIONAL, IN ULONG EndIndex OPTIONAL, IN BOOLEAN SetupWindows, IN PFAT_WINDOW SwitchToWindow OPTIONAL, IN PULONG BitMapBuffer OPTIONAL)
Definition: allocsup.c:4720
INLINE ULONG FatSelectBestWindow(IN PVCB Vcb)
Definition: allocsup.c:279
UCHAR FatLogOf(IN ULONG Value)
Definition: allocsup.c:4655
VOID FatTearDownAllocationSupport(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
Definition: allocsup.c:549
VOID FatSetupAllocationSupport(IN PIRP_CONTEXT IrpContext, IN PVCB Vcb)
Definition: allocsup.c:359
VOID FatInitializeCacheMap(_In_ PFILE_OBJECT FileObject, _In_ PCC_FILE_SIZES FileSizes, _In_ BOOLEAN PinAccess, _In_ PCACHE_MANAGER_CALLBACKS Callbacks, _In_ PVOID LazyWriteContext)
Definition: cachesup.c:62
LARGE_INTEGER FatMaxLarge
Definition: fatdata.c:63
FAT_DATA FatData
Definition: fatdata.c:56
#define FatIsFat32(VCB)
Definition: fatprocs.h:1446
VOID NTAPI CcSetFileSizes(IN PFILE_OBJECT FileObject, IN PCC_FILE_SIZES FileSizes)
Definition: fssup.c:356
LARGE_INTEGER FileSize
Definition: cctypes.h:16
LARGE_INTEGER ValidDataLength
Definition: cctypes.h:17
LARGE_INTEGER AllocationSize
Definition: cctypes.h:15
CACHE_MANAGER_CALLBACKS CacheManagerNoOpCallbacks
Definition: fatstruc.h:160
LONGLONG QuadPart
Definition: typedefs.h:114

Referenced by FatSetupAllocationSupport().

◆ FatTearDownAllocationSupport()

VOID FatTearDownAllocationSupport ( IN PIRP_CONTEXT  IrpContext,
IN PVCB  Vcb 
)

Definition at line 549 of file allocsup.c.

572{
573 DebugTrace(+1, Dbg, "FatTearDownAllocationSupport\n", 0);
574 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
575
576 PAGED_CODE();
577
578 //
579 // If there are FAT buckets, free them.
580 //
581
582 if ( Vcb->Windows != NULL ) {
583
584 ExFreePool( Vcb->Windows );
585 Vcb->Windows = NULL;
586 }
587
588 //
589 // Free the memory associated with the free cluster bitmap.
590 //
591
592 if ( Vcb->FreeClusterBitMap.Buffer != NULL ) {
593
594 ExFreePool( Vcb->FreeClusterBitMap.Buffer );
595
596 //
597 // NULL this field as an flag.
598 //
599
600 Vcb->FreeClusterBitMap.Buffer = NULL;
601 }
602
603 //
604 // And remove all the runs in the dirty fat Mcb
605 //
606
607 FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 0, 0xFFFFFFFF );
608
609 DebugTrace(-1, Dbg, "FatTearDownAllocationSupport -> (VOID)\n", 0);
610
611 UNREFERENCED_PARAMETER( IrpContext );
612
613 return;
614}
VOID FatRemoveMcbEntry(IN PVCB Vcb, IN PLARGE_MCB Mcb, IN VBO Vbo, IN ULONG SectorCount)
Definition: fsctrl.c:599
#define UNREFERENCED_PARAMETER(P)
Definition: ntbasedef.h:317

Referenced by FatDeleteVcb(), and FatSetupAllocationSupport().