📄 vmcbsup.c
字号:
Mask - Supplies the mask of clean sectors to set for the Page (a 1 bit
means to set it clean). For example to set LBN 9 clean on a system
with a page size of 8 the LbnPageNumber will be 1, and the mask will
be 0x00000002.
Return Value:
None.
--*/
{
DIRTY_PAGE Key;
PDIRTY_PAGE Entry;
PAGED_CODE();
DebugTrace(+1, Dbg, ( "UdfSetCleanVmcb\n", 0 ) );
DebugTrace( 0, Dbg, ( " LbnPageNumber = %08x\n", LbnPageNumber ) );
DebugTrace( 0, Dbg, ( " Mask = %08x\n", Mask ) );
Key.LbnPageNumber = LbnPageNumber;
//
// Now grab the mutex for the vmcb
//
(VOID)KeWaitForSingleObject( &Vmcb->Mutex,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER) NULL );
try {
//
// If the page is not in the table, it is already all clean
//
if (Entry = RtlLookupElementGenericTable( &Vmcb->DirtyTable, &Key )) {
Entry->DirtyMask &= ~Mask;
DebugTrace(0, Dbg, ( "DirtyMask = %08x\n", Entry->DirtyMask ) );
//
// If the mask is all clean now, delete the entry
//
if (Entry->DirtyMask == 0) {
(VOID)RtlDeleteElementGenericTable( &Vmcb->DirtyTable, &Key );
}
}
{
DebugTrace(0, Dbg, ( "", PbDumpDirtyVmcb(Vmcb) ) );
}
} finally {
(VOID) KeReleaseMutex( &Vmcb->Mutex, FALSE );
DebugTrace(-1, Dbg, ( "UdfSetCleanVcmb -> VOID\n", 0 ) );
}
return;
}
#endif // VMCB_WRITE_SUPPORT
#if VMCB_WRITE_SUPPORT
ULONG
PbGetDirtySectorsVmcb (
IN PVMCB Vmcb,
IN ULONG LbnPageNumber
)
/*++
Routine Description:
This routine returns to its caller a mask of dirty sectors within a page.
Arguments:
Vmcb - Supplies the Vmcb being manipulated
LbnPageNumber - Supplies the Page Number (Lbn based) of page being
modified. For example, with a page size of 8 a page number of 0
corresponds to LBN values 0 through 7, a page number of 1 corresponds
to 8 through 15, and so on.
Return Value:
ULONG - Receives a mask of dirty sectors within the specified page.
(a 1 bit indicates that the sector is dirty).
--*/
{
DIRTY_PAGE Key;
PDIRTY_PAGE Entry;
ULONG Mask;
DebugTrace(+1, Dbg, ( "UdfGetDirtySectorsVmcb\n", 0 ) );
DebugTrace( 0, Dbg, ( " LbnPageNumber = %08x\n", LbnPageNumber ) );
Key.LbnPageNumber = LbnPageNumber;
//
// Now grab the mutex for the vmcb
//
(VOID)KeWaitForSingleObject( &Vmcb->Mutex,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER) NULL );
try {
if ((Entry = RtlLookupElementGenericTable( &Vmcb->DirtyTable,
&Key )) == NULL) {
DebugTrace(0, Dbg, ( "Entry not found\n", 0 ) );
try_leave( Mask = 0 );
}
Mask = Entry->DirtyMask & (SECTOR_MASK); //**** change to manifest constant
} finally {
(VOID) KeReleaseMutex( &Vmcb->Mutex, FALSE );
DebugTrace(-1, Dbg, ( "UdfGetDirtySectorsVmcb -> %08x\n", Mask ) );
}
return Mask;
}
#endif // VMCB_WRITE_SUPPORT
#if VMCB_WRITE_SUPPORT
ULONG
PbGetAndCleanDirtyVmcb (
IN PVMCB Vmcb,
IN ULONG LbnPageNumber
)
/*++
Routine Description:
This routine returns to its caller a mask of dirty sectors within a page,
and atomically clear the bits.
Arguments:
Vmcb - Supplies the Vmcb being manipulated
LbnPageNumber - Supplies the Page Number (Lbn based) of page being
modified. For example, with a page size of 8 a page number of 0
corresponds to LBN values 0 through 7, a page number of 1 corresponds
to 8 through 15, and so on.
Return Value:
ULONG - Receives a mask of dirty sectors within the specified page.
(a 1 bit indicates that the sector is dirty).
--*/
{
DIRTY_PAGE Key;
PDIRTY_PAGE Entry;
ULONG Mask;
PAGED_CODE();
DebugTrace(+1, Dbg, ( "UdfGetAndCleanDirtyVmcb\n", 0 ) );
DebugTrace( 0, Dbg, ( " LbnPageNumber = %08x\n", LbnPageNumber ) );
Key.LbnPageNumber = LbnPageNumber;
//
// Now grab the mutex for the vmcb
//
(VOID)KeWaitForSingleObject( &Vmcb->Mutex,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER) NULL );
try {
//
// Locate the dirty page within the dirty table
//
if ((Entry = RtlLookupElementGenericTable( &Vmcb->DirtyTable,
&Key )) == NULL) {
DebugTrace(0, Dbg, ( "Entry not found\n", 0 ) );
try_leave( Mask = 0 );
}
//
// We found a page so generate a proper mask and then
// delete the dirty page
//
Mask = Entry->DirtyMask & (SECTOR_MASK); //**** change to manifest constant
(VOID) RtlDeleteElementGenericTable( &Vmcb->DirtyTable, &Key );
} finally {
(VOID) KeReleaseMutex( &Vmcb->Mutex, FALSE );
DebugTrace(-1, Dbg, ( "UdfGetAndCleanDirtyVmcb -> %08x\n", Mask ) );
}
return Mask;
}
#endif // VMCB_WRITE_SUPPORT
//
// Local support routines
//
#if VMCB_WRITE_SUPPORT
RTL_GENERIC_COMPARE_RESULTS
PbCompareDirtyVmcb (
IN PRTL_GENERIC_TABLE DirtyTable,
IN PVOID FirstStruct,
IN PVOID SecondStruct
)
/*++
Routine Description:
This generic table support routine compares two dirty page structures
Arguments:
DirtyTable - Supplies the generic table being queried
FirstStruct - Really supplies the first structure to compare
SecondStruct - Really supplies the second structure to compare
Return Value:
RTL_GENERIDC_COMPARE_RESULTS - The results of comparing the two
input structures
--*/
{
PDIRTY_PAGE DirtyPage1 = FirstStruct;
PDIRTY_PAGE DirtyPage2 = SecondStruct;
UNREFERENCED_PARAMETER( DirtyTable );
PAGED_CODE();
if (DirtyPage1->LbnPageNumber < DirtyPage2->LbnPageNumber) {
return GenericLessThan;
} else if (DirtyPage1->LbnPageNumber > DirtyPage2->LbnPageNumber) {
return GenericGreaterThan;
} else {
return GenericEqual;
}
}
#endif // VMCB_WRITE_SUPPORT
//
// Local support routines
//
#if VMCB_WRITE_SUPPORT
PVOID
PbAllocateDirtyVmcb (
IN PRTL_GENERIC_TABLE DirtyTable,
IN CLONG ByteSize
)
/*++
Routine Description:
This generic table support routine allocates memory
Arguments:
DirtyTable - Supplies the generic table being modified
ByteSize - Supplies the size, in bytes, to allocate
Return Value:
PVOID - Returns a pointer to the allocated data
--*/
{
PAGED_CODE();
return FsRtlAllocatePoolWithTag( (POOL_TYPE)DirtyTable->TableContext, ByteSize, 'bcmV' );
}
#endif // VMCB_WRITE_SUPPORT
//
// Local support routines
//
#if VMCB_WRITE_SUPPORT
VOID
PbDeallocateDirtyVmcb (
IN PRTL_GENERIC_TABLE DirtyTable,
IN PVOID Buffer
)
/*++
Routine Description:
This generic table support routine deallocates memory
Arguments:
DirtyTable - Supplies the generic table being modified
Buffer - Supplies the buffer being deallocated
Return Value:
None.
--*/
{
UNREFERENCED_PARAMETER( DirtyTable );
PAGED_CODE();
ExFreePool( Buffer );
return;
}
#endif // VMCB_WRITE_SUPPORT
//
// Local support routines
//
#if VMCB_WRITE_SUPPORT
ULONG
PbDumpDirtyVmcb (
IN PVMCB Vmcb
)
/*++
Routine Description:
Arguments:
Return Value:
--*/
{
PDIRTY_PAGE Ptr;
PAGED_CODE();
KdPrint((" Dump Dirty Vmcb\n"));
for (Ptr = RtlEnumerateGenericTable( &Vmcb->DirtyTable, TRUE );
Ptr != NULL;
Ptr = RtlEnumerateGenericTable( &Vmcb->DirtyTable, FALSE )) {
KdPrint((" LbnPageNumber = %08x, ", Ptr->LbnPageNumber ));
KdPrint(("DirtyMask = %08x\n", Ptr->DirtyMask ));
}
return 0;
}
#endif // VMCB_WRITE_SUPPORT
#if VMCB_WRITE_SUPPORT
NTSTATUS
PbFlushVolumeFile (
IN PIRP_CONTEXT IrpContext,
IN PVCB Vcb
)
/*++
Routine Description:
The function carefully flushes the entire volume file. It is nessecary
to dance around a bit because of complicated synchronization reasons.
Arguments:
Vcb - Supplies the Vcb being flushed
Return Value:
NTSTATUS - The status of the flush operation
--*/
{
ULONG ElementNumber;
ULONG NumberOfDirtyPages;
PULONG VbnsToFlush;
LBN Lbn;
PDIRTY_PAGE Ptr;
NTSTATUS ReturnStatus = STATUS_SUCCESS;
PVMCB Vmcb = (PNONOPAQUE_VMCB)&Vcb->Vmcb;
//
// The only way we have to correctly synchronize things is to
// repin stuff, and then unpin repin it with WriteThrough as TRUE.
//
// Grab the mutex for the vmcb
//
(VOID)KeWaitForSingleObject( &Vmcb->Mutex,
Executive,
KernelMode,
FALSE,
(PLARGE_INTEGER) NULL );
NumberOfDirtyPages = RtlNumberGenericTableElements(&Vmcb->DirtyTable);
//
// If there are no dirty sectors, no need to flush.
//
if (NumberOfDirtyPages == 0) {
(VOID)KeReleaseMutex( &Vmcb->Mutex, FALSE );
return STATUS_SUCCESS;
}
try {
VbnsToFlush = FsRtlAllocatePoolWithTag( PagedPool, NumberOfDirtyPages * sizeof(ULONG), 'bcmV' );
} finally {
if (AbnormalTermination()) {
(VOID)KeReleaseMutex( &Vmcb->Mutex, FALSE );
}
}
for (Ptr = RtlEnumerateGenericTable( &Vmcb->DirtyTable, TRUE ),
ElementNumber = 0;
Ptr != NULL;
Ptr = RtlEnumerateGenericTable( &Vmcb->DirtyTable, FALSE ),
ElementNumber += 1) {
VBN Vbn;
BOOLEAN Result;
//
// Lbn pages always map to Vbn pages. Thus any sector in an Lbn
// page will map to the same Vbn page. So it suffices to map the
// first Lbn in the page to a Vbn and flush that page.
//
Lbn = Ptr->LbnPageNumber * (PAGE_SIZE / 512);
ASSERT(Ptr->DirtyMask != 0);
Result = PbVmcbLbnToVbn( &Vcb->Vmcb, Lbn, &Vbn, NULL );
//
// This lookup must work as the LBN page was dirty.
//
if (!Result) {
PbBugCheck( 0, 0, 0 );
}
//
// Bring store this Vbn away for flushing later.
//
ASSERT( ElementNumber < NumberOfDirtyPages );
ASSERT( (Vbn & (PAGE_SIZE/512 - 1)) == 0 );
VbnsToFlush[ElementNumber] = Vbn;
}
ASSERT( ElementNumber == NumberOfDirtyPages );
//
// Now drop the mutex and walk through the dirty Vbn list generated
// above. We cannot hold the mutex while doing IO as this will cause
// a deadlock with the cache manager.
//
(VOID)KeReleaseMutex( &Vmcb->Mutex, FALSE );
for ( ElementNumber = 0;
ElementNumber < NumberOfDirtyPages;
ElementNumber += 1) {
PBCB Bcb;
PVOID DontCare;
LARGE_INTEGER Offset;
IO_STATUS_BLOCK Iosb;
//
// This page is dirty. Flush it by writing it though.
//
Offset.QuadPart = VbnsToFlush[ElementNumber] << 9;
try {
(VOID)CcPinRead( Vcb->VirtualVolumeFile,
&Offset,
PAGE_SIZE,
TRUE,
&Bcb,
&DontCare );
CcSetDirtyPinnedData( Bcb, NULL );
CcRepinBcb( Bcb );
CcUnpinData( Bcb );
CcUnpinRepinnedBcb( Bcb, TRUE, &Iosb );
if (!NT_SUCCESS(Iosb.Status)) {
ReturnStatus = Iosb.Status;
}
} except(PbExceptionFilter(IrpContext, GetExceptionInformation())) {
ReturnStatus = IrpContext->ExceptionStatus;
}
}
ExFreePool( VbnsToFlush );
return ReturnStatus;
}
#endif // VMCB_WRITE_SUPPORT
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -