📄 allocsup.c
字号:
Vcb->AllocationSupport.FatIndexBitSize = FatIndexBitSize( &Vcb->Bpb );
Vcb->AllocationSupport.LogOfBytesPerSector = FatLogOf(Vcb->Bpb.BytesPerSector);
Vcb->AllocationSupport.LogOfBytesPerCluster = FatLogOf(FatBytesPerCluster( &Vcb->Bpb ));
Vcb->AllocationSupport.NumberOfFreeClusters = 0;
//
// Deal with a bug in DOS 5 format, if the Fat is not big enough to
// describe all the clusters on the disk, reduce this number. We expect
// that fat32 volumes will not have this problem.
//
// Turns out this was not a good assumption. We have to do this always now.
//
ClustersDescribableByFat = ( ((FatIsFat32(Vcb)? Vcb->Bpb.LargeSectorsPerFat :
Vcb->Bpb.SectorsPerFat) *
Vcb->Bpb.BytesPerSector * 8)
/ FatIndexBitSize(&Vcb->Bpb) ) - 2;
if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByFat) {
Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByFat;
}
//
// Extend the virtual volume file to include the Fat
//
{
CC_FILE_SIZES FileSizes;
FileSizes.AllocationSize.QuadPart =
FileSizes.FileSize.QuadPart = (FatReservedBytes( &Vcb->Bpb ) +
FatBytesPerFat( &Vcb->Bpb ));
FileSizes.ValidDataLength = FatMaxLarge;
if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) {
CcInitializeCacheMap( Vcb->VirtualVolumeFile,
&FileSizes,
TRUE,
&FatData.CacheManagerNoOpCallbacks,
Vcb );
} else {
CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes );
}
}
try {
if (FatIsFat32(Vcb) &&
Vcb->AllocationSupport.NumberOfClusters > MAX_CLUSTER_BITMAP_SIZE) {
Vcb->NumberOfWindows = (Vcb->AllocationSupport.NumberOfClusters +
MAX_CLUSTER_BITMAP_SIZE - 1) /
MAX_CLUSTER_BITMAP_SIZE;
BitMapSize = MAX_CLUSTER_BITMAP_SIZE;
} else {
Vcb->NumberOfWindows = 1;
BitMapSize = Vcb->AllocationSupport.NumberOfClusters;
}
Vcb->Windows = FsRtlAllocatePoolWithTag( PagedPool,
Vcb->NumberOfWindows * sizeof(FAT_WINDOW),
TAG_FAT_WINDOW );
RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
NULL,
0 );
//
// Chose a FAT window to begin operation in.
//
if (Vcb->NumberOfWindows > 1) {
//
// Read the fat and count up free clusters. We bias by the two reserved
// entries in the FAT.
//
FatExamineFatEntries( IrpContext, Vcb,
2,
Vcb->AllocationSupport.NumberOfClusters + 2 - 1,
TRUE,
NULL,
NULL);
//
// Pick a window to begin allocating from
//
Vcb->CurrentWindow = &Vcb->Windows[ FatSelectBestWindow( Vcb)];
} else {
Vcb->CurrentWindow = &Vcb->Windows[0];
//
// Carefully bias ourselves by the two reserved entries in the FAT.
//
Vcb->CurrentWindow->FirstCluster = 2;
Vcb->CurrentWindow->LastCluster = Vcb->AllocationSupport.NumberOfClusters + 2 - 1;
}
//
// Now transition to the FAT window we have chosen.
//
FatExamineFatEntries( IrpContext, Vcb,
0,
0,
FALSE,
Vcb->CurrentWindow,
NULL);
//
// Now set the ClusterHint to the first free bit in our favorite
// window (except the ClusterHint is off by two).
//
Vcb->ClusterHint =
(BitIndex = RtlFindClearBits( &Vcb->FreeClusterBitMap, 1, 0 )) != -1 ?
BitIndex + 2 : 2;
} finally {
DebugUnwind( FatSetupAllocationSupport );
//
// If we hit an exception, back out.
//
if (AbnormalTermination()) {
FatTearDownAllocationSupport( IrpContext, Vcb );
}
}
return;
}
VOID
FatTearDownAllocationSupport (
IN PIRP_CONTEXT IrpContext,
IN PVCB Vcb
)
/*++
Routine Description:
This routine prepares the volume for closing. Specifically, we must
release the free fat bit map buffer, and uninitialize the dirty fat
Mcb.
Arguments:
Vcb - Supplies the Vcb to fill in.
Return Value:
VOID
--*/
{
DebugTrace(+1, Dbg, "FatTearDownAllocationSupport\n", 0);
DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
PAGED_CODE();
//
// If there are FAT buckets, free them.
//
if ( Vcb->Windows != NULL ) {
ExFreePool( Vcb->Windows );
Vcb->Windows = NULL;
}
//
// Free the memory associated with the free cluster bitmap.
//
if ( Vcb->FreeClusterBitMap.Buffer != NULL ) {
ExFreePool( Vcb->FreeClusterBitMap.Buffer );
//
// NULL this field as an flag.
//
Vcb->FreeClusterBitMap.Buffer = NULL;
}
//
// And remove all the runs in the dirty fat Mcb
//
FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 0, 0xFFFFFFFF );
DebugTrace(-1, Dbg, "FatTearDownAllocationSupport -> (VOID)\n", 0);
UNREFERENCED_PARAMETER( IrpContext );
return;
}
VOID
FatLookupFileAllocation (
IN PIRP_CONTEXT IrpContext,
IN PFCB FcbOrDcb,
IN VBO Vbo,
OUT PLBO Lbo,
OUT PULONG ByteCount,
OUT PBOOLEAN Allocated,
OUT PBOOLEAN EndOnMax,
OUT PULONG Index
)
/*++
Routine Description:
This routine looks up the existing mapping of VBO to LBO for a
file/directory. The information it queries is either stored in the
mcb field of the fcb/dcb or it is stored on in the fat table and
needs to be retrieved and decoded, and updated in the mcb.
Arguments:
FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being queried
Vbo - Supplies the VBO whose LBO we want returned
Lbo - Receives the LBO corresponding to the input Vbo if one exists
ByteCount - Receives the number of bytes within the run the run
that correpond between the input vbo and output lbo.
Allocated - Receives TRUE if the Vbo does have a corresponding Lbo
and FALSE otherwise.
EndOnMax - Receives TRUE if the run ends in the maximal FAT cluster,
which results in a fractional bytecount.
Index - Receives the Index of the run
--*/
{
VBO CurrentVbo;
LBO CurrentLbo;
LBO PriorLbo;
VBO FirstVboOfCurrentRun;
LBO FirstLboOfCurrentRun;
BOOLEAN LastCluster;
ULONG Runs;
PVCB Vcb;
FAT_ENTRY FatEntry;
ULONG BytesPerCluster;
ULARGE_INTEGER BytesOnVolume;
FAT_ENUMERATION_CONTEXT Context;
PAGED_CODE();
DebugTrace(+1, Dbg, "FatLookupFileAllocation\n", 0);
DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo);
DebugTrace( 0, Dbg, " Lbo = %8lx\n", Lbo);
DebugTrace( 0, Dbg, " ByteCount = %8lx\n", ByteCount);
DebugTrace( 0, Dbg, " Allocated = %8lx\n", Allocated);
Context.Bcb = NULL;
Vcb = FcbOrDcb->Vcb;
*EndOnMax = FALSE;
//
// Check the trivial case that the mapping is already in our
// Mcb.
//
if ( FatLookupMcbEntry(Vcb, &FcbOrDcb->Mcb, Vbo, Lbo, ByteCount, Index) ) {
*Allocated = TRUE;
ASSERT( ByteCount != 0);
//
// Detect the overflow case, trim and claim the condition.
//
if (Vbo + *ByteCount == 0) {
*EndOnMax = TRUE;
}
DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0);
DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
return;
}
//
// Initialize the Vcb, the cluster size, LastCluster, and
// FirstLboOfCurrentRun (to be used as an indication of the first
// iteration through the following while loop).
//
BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
BytesOnVolume.QuadPart = UInt32x32To64( Vcb->AllocationSupport.NumberOfClusters, BytesPerCluster );
LastCluster = FALSE;
FirstLboOfCurrentRun = 0;
//
// Discard the case that the request extends beyond the end of
// allocation. Note that if the allocation size if not known
// AllocationSize is set to 0xffffffff.
//
if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) {
*Allocated = FALSE;
DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0);
DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
return;
}
//
// The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
// and FatEntry to describe the beginning of the last entry in the Mcb.
// This is used as initialization for the following loop.
//
// If the Mcb was empty, we start at the beginning of the file with
// CurrentVbo set to 0 to indicate a new run.
//
if (FatLookupLastMcbEntry( Vcb, &FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo, &Runs )) {
DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1);
CurrentVbo -= (BytesPerCluster - 1);
CurrentLbo -= (BytesPerCluster - 1);
//
// Convert an index to a count.
//
Runs += 1;
} else {
DebugTrace( 0, Dbg, "Mcb empty.\n", 0);
//
// Check for an FcbOrDcb that has no allocation
//
if (FcbOrDcb->FirstClusterOfFile == 0) {
*Allocated = FALSE;
DebugTrace( 0, Dbg, "File has no allocation.\n", 0);
DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
return;
} else {
CurrentVbo = 0;
CurrentLbo = FatGetLboFromIndex( Vcb, FcbOrDcb->FirstClusterOfFile );
FirstVboOfCurrentRun = CurrentVbo;
FirstLboOfCurrentRun = CurrentLbo;
Runs = 0;
DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo);
}
}
//
// Now we know that we are looking up a valid Vbo, but it is
// not in the Mcb, which is a monotonically increasing list of
// Vbo's. Thus we have to go to the Fat, and update
// the Mcb as we go. We use a try-finally to unpin the page
// of fat hanging around. Also we mark *Allocated = FALSE, so that
// the caller wont try to use the data if we hit an exception.
//
*Allocated = FALSE;
try {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -