📄 irp.c
字号:
StackPtr++;
} while (Irp->CurrentLocation <= (Irp->StackCount + 1));
/* Check if the IRP is an associated IRP */
if (Irp->Flags & IRP_ASSOCIATED_IRP)
{
/* Get the master IRP and count */
MasterIrp = Irp->AssociatedIrp.MasterIrp;
MasterCount = InterlockedDecrement(&MasterIrp->AssociatedIrp.IrpCount);
/* Free the MDLs */
for (Mdl = Irp->MdlAddress; Mdl; Mdl = NextMdl)
{
/* Go to the next one */
NextMdl = Mdl->Next;
IoFreeMdl(Mdl);
}
/* Free the IRP itself */
IoFreeIrp(Irp);
/* Complete the Master IRP */
if (!MasterCount) IofCompleteRequest(MasterIrp, PriorityBoost);
return;
}
/* We don't support this yet */
ASSERT(Irp->IoStatus.Status != STATUS_REPARSE);
/* Check if we have an auxiliary buffer */
if (Irp->Tail.Overlay.AuxiliaryBuffer)
{
/* Free it */
ExFreePool(Irp->Tail.Overlay.AuxiliaryBuffer);
Irp->Tail.Overlay.AuxiliaryBuffer = NULL;
}
/* Check if this is a Paging I/O or Close Operation */
if (Irp->Flags & (IRP_PAGING_IO | IRP_CLOSE_OPERATION))
{
/* Handle a Close Operation or Sync Paging I/O */
if (Irp->Flags & (IRP_SYNCHRONOUS_PAGING_IO | IRP_CLOSE_OPERATION))
{
/* Set the I/O Status and Signal the Event */
Flags = Irp->Flags & (IRP_SYNCHRONOUS_PAGING_IO | IRP_PAGING_IO);
*Irp->UserIosb = Irp->IoStatus;
KeSetEvent(Irp->UserEvent, PriorityBoost, FALSE);
/* Free the IRP for a Paging I/O Only, Close is handled by us */
if (Flags) IoFreeIrp(Irp);
}
else
{
#if 0
/* Page 166 */
KeInitializeApc(&Irp->Tail.Apc
&Irp->Tail.Overlay.Thread->Tcb,
Irp->ApcEnvironment,
IopCompletePageWrite,
NULL,
NULL,
KernelMode,
NULL);
KeInsertQueueApc(&Irp->Tail.Apc,
NULL,
NULL,
PriorityBoost);
#else
/* Not implemented yet. */
DPRINT1("Not supported!\n");
while (TRUE);
#endif
}
/* Get out of here */
return;
}
/* Unlock MDL Pages, page 167. */
Mdl = Irp->MdlAddress;
while (Mdl)
{
MmUnlockPages(Mdl);
Mdl = Mdl->Next;
}
/* Check if we should exit because of a Deferred I/O (page 168) */
if ((Irp->Flags & IRP_DEFER_IO_COMPLETION) && !(Irp->PendingReturned))
{
/*
* Return without queuing the completion APC, since the caller will
* take care of doing its own optimized completion at PASSIVE_LEVEL.
*/
return;
}
/* Get the thread and file object */
Thread = Irp->Tail.Overlay.Thread;
FileObject = Irp->Tail.Overlay.OriginalFileObject;
/* Make sure the IRP isn't canceled */
if (!Irp->Cancel)
{
/* Initialize the APC */
KeInitializeApc(&Irp->Tail.Apc,
&Thread->Tcb,
Irp->ApcEnvironment,
IopCompleteRequest,
NULL,
NULL,
KernelMode,
NULL);
/* Queue it */
KeInsertQueueApc(&Irp->Tail.Apc,
FileObject,
NULL, /* This is used for REPARSE stuff */
PriorityBoost);
}
else
{
/* The IRP just got canceled... does a thread still own it? */
Thread = Irp->Tail.Overlay.Thread;
if (Thread)
{
/* Yes! There is still hope! Initialize the APC */
KeInitializeApc(&Irp->Tail.Apc,
&Thread->Tcb,
Irp->ApcEnvironment,
IopCompleteRequest,
NULL,
NULL,
KernelMode,
NULL);
/* Queue it */
KeInsertQueueApc(&Irp->Tail.Apc,
FileObject,
NULL, /* This is used for REPARSE stuff */
PriorityBoost);
}
else
{
/* Nothing left for us to do, kill it */
ASSERT(Irp->Cancel);
IopCleanupIrp(Irp, FileObject);
}
}
}
/*
* @unimplemented
*/
BOOLEAN
NTAPI
IoForwardIrpSynchronously(IN PDEVICE_OBJECT DeviceObject,
IN PIRP Irp)
{
UNIMPLEMENTED;
return FALSE;
}
/*
* @implemented
*/
VOID
NTAPI
IoFreeIrp(IN PIRP Irp)
{
PNPAGED_LOOKASIDE_LIST List;
PP_NPAGED_LOOKASIDE_NUMBER ListType = LookasideSmallIrpList;
PKPRCB Prcb;
IOTRACE(IO_IRP_DEBUG,
"%s - Freeing IRPs %p\n",
__FUNCTION__,
Irp);
/* Make sure the Thread IRP list is empty and that it OK to free it */
ASSERT(Irp->Type == IO_TYPE_IRP);
ASSERT(IsListEmpty(&Irp->ThreadListEntry));
ASSERT(Irp->CurrentLocation >= Irp->StackCount);
/* If this was a pool alloc, free it with the pool */
if (!(Irp->AllocationFlags & IRP_ALLOCATED_FIXED_SIZE))
{
/* Free it */
ExFreePool(Irp);
}
else
{
/* Check if this was a Big IRP */
if (Irp->StackCount != 1) ListType = LookasideLargeIrpList;
/* Get the PRCB */
Prcb = KeGetCurrentPrcb();
/* Use the P List */
List = (PNPAGED_LOOKASIDE_LIST)Prcb->PPLookasideList[ListType].P;
List->L.TotalFrees++;
/* Check if the Free was within the Depth or not */
if (ExQueryDepthSList(&List->L.ListHead) >= List->L.Depth)
{
/* Let the balancer know */
List->L.FreeMisses++;
/* Use the L List */
List = (PNPAGED_LOOKASIDE_LIST)Prcb->PPLookasideList[ListType].L;
List->L.TotalFrees++;
/* Check if the Free was within the Depth or not */
if (ExQueryDepthSList(&List->L.ListHead) >= List->L.Depth)
{
/* All lists failed, use the pool */
List->L.FreeMisses++;
ExFreePool(Irp);
Irp = NULL;
}
}
/* The free was within the Depth */
if (Irp)
{
InterlockedPushEntrySList(&List->L.ListHead,
(PSINGLE_LIST_ENTRY)Irp);
}
}
}
/*
* @implemented
*/
PEPROCESS NTAPI
IoGetRequestorProcess(IN PIRP Irp)
{
return(Irp->Tail.Overlay.Thread->ThreadsProcess);
}
/*
* @implemented
*/
ULONG
NTAPI
IoGetRequestorProcessId(IN PIRP Irp)
{
return (ULONG)(IoGetRequestorProcess(Irp)->UniqueProcessId);
}
/*
* @implemented
*/
NTSTATUS
NTAPI
IoGetRequestorSessionId(IN PIRP Irp,
OUT PULONG pSessionId)
{
/* Return the session */
*pSessionId = IoGetRequestorProcess(Irp)->Session;
return STATUS_SUCCESS;
}
/*
* @implemented
*/
PIRP
NTAPI
IoGetTopLevelIrp(VOID)
{
return (PIRP)PsGetCurrentThread()->TopLevelIrp;
}
/*
* @implemented
*/
VOID
NTAPI
IoInitializeIrp(IN PIRP Irp,
IN USHORT PacketSize,
IN CCHAR StackSize)
{
/* Clear it */
IOTRACE(IO_IRP_DEBUG,
"%s - Initializing IRP %p\n",
__FUNCTION__,
Irp);
RtlZeroMemory(Irp, PacketSize);
/* Set the Header and other data */
Irp->Type = IO_TYPE_IRP;
Irp->Size = PacketSize;
Irp->StackCount = StackSize;
Irp->CurrentLocation = StackSize + 1;
Irp->ApcEnvironment = KeGetCurrentThread()->ApcStateIndex;
Irp->Tail.Overlay.CurrentStackLocation = (PIO_STACK_LOCATION)(Irp + 1) + StackSize;
/* Initialize the Thread List */
InitializeListHead(&Irp->ThreadListEntry);
}
/*
* @implemented
*/
BOOLEAN
NTAPI
IoIsOperationSynchronous(IN PIRP Irp)
{
/* Check the flags */
if (!(Irp->Flags & (IRP_PAGING_IO | IRP_SYNCHRONOUS_PAGING_IO)) &&
((Irp->Flags & IRP_SYNCHRONOUS_PAGING_IO) ||
(Irp->Flags & IRP_SYNCHRONOUS_API) ||
(IoGetCurrentIrpStackLocation(Irp)->FileObject->Flags &
FO_SYNCHRONOUS_IO)))
{
/* Synch API or Paging I/O is OK, as is Sync File I/O */
return TRUE;
}
/* Otherwise, it is an asynchronous operation. */
return FALSE;
}
/*
* @unimplemented
*/
BOOLEAN
NTAPI
IoIsValidNameGraftingBuffer(IN PIRP Irp,
IN PREPARSE_DATA_BUFFER ReparseBuffer)
{
UNIMPLEMENTED;
return FALSE;
}
/*
* @implemented
*/
PIRP
NTAPI
IoMakeAssociatedIrp(IN PIRP Irp,
IN CCHAR StackSize)
{
PIRP AssocIrp;
IOTRACE(IO_IRP_DEBUG,
"%s - Associating IRP %p\n",
__FUNCTION__,
Irp);
/* Allocate the IRP */
AssocIrp = IoAllocateIrp(StackSize, FALSE);
if (!AssocIrp) return NULL;
/* Set the Flags */
AssocIrp->Flags |= IRP_ASSOCIATED_IRP;
/* Set the Thread */
AssocIrp->Tail.Overlay.Thread = Irp->Tail.Overlay.Thread;
/* Associate them */
AssocIrp->AssociatedIrp.MasterIrp = Irp;
return AssocIrp;
}
/*
* @implemented
*/
VOID
NTAPI
IoQueueThreadIrp(IN PIRP Irp)
{
IOTRACE(IO_IRP_DEBUG,
"%s - Queueing IRP %p\n",
__FUNCTION__,
Irp);
/* Use our inlined routine */
IopQueueIrpToThread(Irp);
}
/*
* @implemented
* Reference: Chris Cant's "Writing WDM Device Drivers"
*/
VOID
NTAPI
IoReuseIrp(IN OUT PIRP Irp,
IN NTSTATUS Status)
{
UCHAR AllocationFlags;
IOTRACE(IO_IRP_DEBUG,
"%s - Reusing IRP %p\n",
__FUNCTION__,
Irp);
/* Make sure it's OK to reuse it */
ASSERT(!Irp->CancelRoutine);
ASSERT(IsListEmpty(&Irp->ThreadListEntry));
/* Get the old flags */
AllocationFlags = Irp->AllocationFlags;
/* Reinitialize the IRP */
IoInitializeIrp(Irp, Irp->Size, Irp->StackCount);
/* Duplicate the data */
Irp->IoStatus.Status = Status;
Irp->AllocationFlags = AllocationFlags;
}
/*
* @implemented
*/
VOID
NTAPI
IoSetTopLevelIrp(IN PIRP Irp)
{
/* Set the IRP */
PsGetCurrentThread()->TopLevelIrp = (ULONG)Irp;
}
/* EOF */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -