📄 vmcbsup.c
字号:
BOOLEAN - TRUE if the mapping is valid and FALSE otherwise.
--*/
{
BOOLEAN Result;
PAGED_CODE();
DebugTrace(( +1, Dbg, "UdfVmcbLbnToVbn, Lbn = %08x\n", Lbn ));
//
// If the requested Lbn is greater than the maximum allowed Lbn
// then the result is FALSE
//
if (Lbn > Vmcb->MaximumLbn) {
DebugTrace(( -1, Dbg, "Lbn too large, UdfVmcbLbnToVbn -> FALSE\n" ));
return FALSE;
}
//
// Now grab the mutex for the vmcb
//
(VOID)KeWaitForSingleObject( &Vmcb->Mutex,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER) NULL );
try {
Result = UdfVmcbLookupMcbEntry( &Vmcb->LbnIndexed,
Lbn,
Vbn,
SectorCount,
NULL );
DebugTrace(( 0, Dbg, "*Vbn = %08x\n", *Vbn ));
} finally {
(VOID) KeReleaseMutex( &Vmcb->Mutex, FALSE );
DebugUnwind("UdfVmcbLbnToVbn");
DebugTrace(( -1, Dbg, "UdfVmcbLbnToVbn -> Result = %08x\n", Result ));
}
return Result;
}
BOOLEAN
UdfAddVmcbMapping (
IN PVMCB Vmcb,
IN LBN Lbn,
IN ULONG SectorCount,
IN BOOLEAN ExactEnd,
OUT PVBN Vbn,
OUT PULONG AlignedSectorCount
)
/*++
Routine Description:
This routine adds a new LBN to VBN mapping to the VMCB structure. When
a new LBN is added to the structure it does it only on page aligned
boundaries.
If pool is not available to store the information this routine will
raise a status value indicating insufficient resources.
Arguments:
Vmcb - Supplies the VMCB being updated.
Lbn - Supplies the starting LBN to add to VMCB.
SectorCount - Supplies the number of Sectors in the run
ExactEnd - Indicates that instead of aligning to map sectors beyond
the end of the request, use a hole. Implies trying to look at
these sectors could be undesireable.
Vbn - Receives the assigned VBN
AlignedSectorCount - Receives the actual sector count created in the
Vmcb for page alignment purposes. Vbn+AlignedSectorCount-1 == LastVbn.
Return Value:
BOOLEAN - TRUE if this is a new mapping and FALSE if the mapping
for the LBN already exists. If it already exists then the
sector count for this new addition must already be in the
VMCB structure
--*/
{
BOOLEAN Result;
BOOLEAN VbnMcbAdded;
BOOLEAN LbnMcbAdded;
LBN LocalLbn;
VBN LocalVbn;
ULONG LocalCount;
PAGED_CODE();
DebugTrace(( +1, Dbg, "UdfAddVmcbMapping, Lbn = %08x\n", Lbn ));
DebugTrace(( 0, Dbg, " SectorCount = %08x\n", SectorCount ));
ASSERT( SectorCount != 0 );
VbnMcbAdded = FALSE;
LbnMcbAdded = FALSE;
//
// Now grab the mutex for the vmcb
//
(VOID)KeWaitForSingleObject( &Vmcb->Mutex,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER) NULL );
try {
//
// Check if the Lbn is already mapped, which means we find an entry
// with a non zero mapping Vbn value.
//
if (UdfVmcbLookupMcbEntry( &Vmcb->LbnIndexed,
Lbn,
Vbn,
&LocalCount,
NULL )) {
//
// It is already mapped so now the sector count must not exceed
// the count already in the run
//
if (SectorCount <= LocalCount) {
try_leave( Result = FALSE );
}
}
//
// At this point, we did not find a full existing mapping for the
// Lbn and count. But there might be some overlapping runs that we'll
// need to now remove from the vmcb structure. So for each Lbn in
// the range we're after, check to see if it is mapped and remove the
// mapping. We only need to do this test if the sector count is less
// than or equal to a page size. Because those are the only
// structures that we know we'll try an remove/overwrite.
//
if (SectorCount <= PageAlign(Vmcb, 1)) {
if (UdfVmcbLookupMcbEntry( &Vmcb->LbnIndexed,
Lbn,
Vbn,
&LocalCount,
NULL )) {
UdfRemoveVmcbMapping( Vmcb, *Vbn, PageAlign(Vmcb, 1) );
}
}
//
// We need to add this new run at the end of the Vbns. To do this we
// need to look up the last mcb entry or use a vbn for the second
// page, if the mcb is empty. We'll also special case the situation
// where the last lbn of the mapping and the mapping we're adding
// simply flow into each other in which case we'll not bother bumping
// the vbn to a page alignment
//
if (FsRtlLookupLastMcbEntry( &Vmcb->VbnIndexed, &LocalVbn, &LocalLbn )) {
if (LocalLbn + 1 == Lbn) {
LocalVbn = LocalVbn + 1;
LocalLbn = LocalLbn + 1;
} else {
//
// Get the next available Vbn Page, and calculate the
// Lbn for the page containing the Lbn
//
LocalVbn = PageAlign( Vmcb, LocalVbn + 1 );
LocalLbn = PageAlign( Vmcb, Lbn + 1 ) - PageAlign( Vmcb, 1 );
}
} else {
//
// Get the first available Vbn page, and calculate the
// Lbn for the page containing the Lbn.
//
LocalVbn = 0;
LocalLbn = PageAlign( Vmcb, Lbn + 1 ) - PageAlign( Vmcb, 1 );
}
//
// Calculate the number of sectors that we need to map to keep
// everything on a page granularity.
//
LocalCount = PageAlign( Vmcb, SectorCount + (Lbn - LocalLbn) );
//
// See if we should use a hole to map the alignment at the end of the request.
//
if (ExactEnd && Lbn + SectorCount < LocalLbn + LocalCount) {
LocalCount = SectorCount + (Lbn - LocalLbn);
}
//
// Add the double mapping
//
FsRtlAddMcbEntry( &Vmcb->VbnIndexed,
LocalVbn,
LocalLbn,
LocalCount );
VbnMcbAdded = TRUE;
FsRtlAddMcbEntry( &Vmcb->LbnIndexed,
LocalLbn,
LocalVbn,
LocalCount );
LbnMcbAdded = TRUE;
*Vbn = LocalVbn + (Lbn - LocalLbn);
*AlignedSectorCount = LocalCount - (Lbn - LocalLbn);
try_leave( Result = TRUE );
} finally {
//
// If this is an abnormal termination then clean up any mcb's that we
// might have modified.
//
if (AbnormalTermination()) {
if (VbnMcbAdded) { FsRtlRemoveMcbEntry( &Vmcb->VbnIndexed, LocalVbn, LocalCount ); }
if (LbnMcbAdded) { FsRtlRemoveMcbEntry( &Vmcb->LbnIndexed, LocalLbn, LocalCount ); }
}
(VOID) KeReleaseMutex( &Vmcb->Mutex, FALSE );
DebugUnwind("UdfAddVmcbMapping");
DebugTrace(( 0, Dbg, " LocalVbn = %08x\n", LocalVbn ));
DebugTrace(( 0, Dbg, " LocalLbn = %08x\n", LocalLbn ));
DebugTrace(( 0, Dbg, " LocalCount = %08x\n", LocalCount ));
DebugTrace(( 0, Dbg, " *Vbn = %08x\n", *Vbn ));
DebugTrace(( 0, Dbg, " *AlignedSectorCount = %08x\n", *AlignedSectorCount ));
DebugTrace((-1, Dbg, "UdfAddVmcbMapping -> %08x\n", Result ));
}
return Result;
}
VOID
UdfRemoveVmcbMapping (
IN PVMCB Vmcb,
IN VBN Vbn,
IN ULONG SectorCount
)
/*++
Routine Description:
This routine removes a Vmcb mapping.
If pool is not available to store the information this routine will
raise a status value indicating insufficient resources.
Arguments:
Vmcb - Supplies the Vmcb being updated.
Vbn - Supplies the VBN to remove
SectorCount - Supplies the number of sectors to remove.
Return Value:
None.
--*/
{
LBN Lbn;
ULONG LocalCount;
ULONG i;
PAGED_CODE();
DebugTrace((+1, Dbg, "UdfRemoveVmcbMapping, Vbn = %08x\n", Vbn ));
DebugTrace(( 0, Dbg, " SectorCount = %08x\n", SectorCount ));
//
// Now grab the mutex for the vmcb
//
(VOID)KeWaitForSingleObject( &Vmcb->Mutex,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER) NULL );
try {
for (i = 0; i < SectorCount; i += 1) {
//
// Lookup the Vbn so we can get its current Lbn mapping
//
if (!UdfVmcbLookupMcbEntry( &Vmcb->VbnIndexed,
Vbn + i,
&Lbn,
&LocalCount,
NULL )) {
UdfBugCheck( 0, 0, 0 );
}
FsRtlRemoveMcbEntry( &Vmcb->VbnIndexed,
Vbn + i,
1 );
FsRtlRemoveMcbEntry( &Vmcb->LbnIndexed,
Lbn,
1 );
}
{
DebugTrace(( 0, Dbg, "VbnIndex:\n", 0 ));
DebugTrace(( 0, Dbg, "LbnIndex:\n", 0 ));
}
} finally {
(VOID) KeReleaseMutex( &Vmcb->Mutex, FALSE );
DebugUnwind( "UdfRemoveVmcbMapping" );
DebugTrace(( -1, Dbg, "UdfRemoveVmcbMapping -> VOID\n" ));
}
return;
}
//
// Local support routine
//
BOOLEAN
UdfVmcbLookupMcbEntry (
IN PMCB Mcb,
IN VBN Vbn,
OUT PLBN Lbn,
OUT PULONG SectorCount OPTIONAL,
OUT PULONG Index OPTIONAL
)
/*++
Routine Description:
This routine retrieves the mapping of a Vbn to an Lbn from an Mcb.
It indicates if the mapping exists and the size of the run.
The only difference betweent this and the regular FsRtlLookupMcbEntry
is that we undo the behavior of returning TRUE in holes in the allocation.
This is because we don't want to avoid mapping at Lbn 0, which is how the
emulated behavior of the small Mcb package tells callers that there is no
mapping at that location in a hole. We have holes all over our Vbn space
in the VbnIndexed map.
The small Mcb package was able to get away with this because Lbn 0 was the
boot sector (or similar magic location) on the disc. In our metadata stream,
we wish to use Vbn 0 (remember this is a double map).
Arguments:
Mcb - Supplies the Mcb being examined.
Vbn - Supplies the Vbn to lookup.
Lbn - Receives the Lbn corresponding to the Vbn. A value of -1 is
returned if the Vbn does not have a corresponding Lbn.
SectorCount - Receives the number of sectors that map from the Vbn to
contiguous Lbn values beginning with the input Vbn.
Index - Receives the index of the run found.
Return Value:
BOOLEAN - TRUE if the Vbn is within the range of VBNs mapped by the
MCB (not if it corresponds to a hole in the mapping), and FALSE
if the Vbn is beyond the range of the MCB's mapping.
For example, if an MCB has a mapping for VBNs 5 and 7 but not for
6, then a lookup on Vbn 5 or 7 will yield a non zero Lbn and a sector
count of 1. A lookup for Vbn 6 will return FALSE with an Lbn value of
0, and lookup for Vbn 8 or above will return FALSE.
--*/
{
BOOLEAN Results;
LONGLONG LiLbn;
LONGLONG LiSectorCount;
Results = FsRtlLookupLargeMcbEntry( (PLARGE_MCB)Mcb,
(LONGLONG)(Vbn),
&LiLbn,
ARGUMENT_PRESENT(SectorCount) ? &LiSectorCount : NULL,
NULL,
NULL,
Index );
if ((ULONG)LiLbn == -1) {
*Lbn = 0;
Results = FALSE;
} else {
*Lbn = (ULONG)LiLbn;
}
if (ARGUMENT_PRESENT(SectorCount)) { *SectorCount = ((ULONG)LiSectorCount); }
return Results;
}
#if VMCB_WRITE_SUPPORT
VOID
PbSetDirtyVmcb (
IN PVMCB Vmcb,
IN ULONG LbnPageNumber,
IN ULONG Mask
)
/*++
Routine Description:
This routine sets the sectors within a page as dirty based on the input
mask.
If pool is not available to store the information this routine will
raise a status value indicating insufficient resources.
Arguments:
Vmcb - Supplies the Vmcb being manipulated.
LbnPageNumber - Supplies the Page Number (LBN based) of the page being
modified. For example, with a page size of 8 a page number of 0
corresponds to LBN values 0 through 7, a page number of 1 corresponds
to 8 through 15, and so on.
Mask - Supplies the mask of dirty sectors to set for the Page (a 1 bit
means to set it dirty). For example to set LBN 9 dirty on a system
with a page size of 8 the LbnPageNumber will be 1, and the mask will
be 0x00000002.
Return Value:
None.
--*/
{
DIRTY_PAGE Key;
PDIRTY_PAGE Entry;
PAGED_CODE();
DebugTrace(+1, Dbg, ( "UdfSetDirtyVmcb\n", 0 ) );
DebugTrace( 0, Dbg, ( " LbnPageNumber = %08x\n", LbnPageNumber ) );
DebugTrace( 0, Dbg, ( " Mask = %08x\n", Mask ) );
Key.LbnPageNumber = LbnPageNumber;
Key.DirtyMask = 0;
//
// Now grab the mutex for the vmcb
//
(VOID)KeWaitForSingleObject( &Vmcb->Mutex,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER) NULL );
try {
Entry = RtlInsertElementGenericTable( &Vmcb->DirtyTable,
&Key,
sizeof(DIRTY_PAGE),
NULL );
Entry->DirtyMask = (Entry->DirtyMask | Mask) & (SECTOR_MASK); //**** change to manifest constant
DebugTrace(0, Dbg, ( "DirtyMask = %08x\n", Entry->DirtyMask ) );
{
DebugTrace(0, Dbg, ( "", PbDumpDirtyVmcb(Vmcb) ) );
}
} finally {
(VOID) KeReleaseMutex( &Vmcb->Mutex, FALSE );
DebugUnwind("UdfSetDirtyVmcb");
DebugTrace(-1, Dbg, ( "UdfSetDirtyVmcb -> VOID\n", 0 ) );
}
return;
}
#endif // VMCB_WRITE_SUPPORT
#if VMCB_WRITE_SUPPORT
VOID
PbSetCleanVmcb (
IN PVMCB Vmcb,
IN ULONG LbnPageNumber,
IN ULONG Mask
)
/*++
Routine Description:
This routine sets all of the sectors within a page as clean. All
of the sectors in a page whether they are dirty or not are set clean
by this procedure.
Arguments:
Vmcb - Supplies the Vmcb being manipulated.
LbnPageNumber - Supplies the Page Number (Lbn based) of page being
modified. For example, with a page size of 8 a page number of 0
corresponds to LBN values 0 through 7, a page number of 1 corresponds
to 8 through 15, and so on.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -