📄 ke_x.h
字号:
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
/* Nothing to do on UP */
UNREFERENCED_PARAMETER(LockQueue);
}
#else
//
// Spinlock Acquisition at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock)
{
for (;;)
{
/* Try to acquire it */
if (InterlockedBitTestAndSet((PLONG)SpinLock, 0))
{
/* Value changed... wait until it's locked */
while (*(volatile KSPIN_LOCK *)SpinLock == 1)
{
#ifdef DBG
/* On debug builds, we use a much slower but useful routine */
Kii386SpinOnSpinLock(SpinLock, 5);
#else
/* Otherwise, just yield and keep looping */
YieldProcessor();
#endif
}
}
else
{
#ifdef DBG
/* On debug builds, we OR in the KTHREAD */
*SpinLock = KeGetCurrentThread() | 1;
#endif
/* All is well, break out */
break;
}
}
}
//
// Spinlock Release at IRQL >= DISPATCH_LEVEL
//
FORCEINLINE
VOID
KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock)
{
#ifdef DBG
/* Make sure that the threads match */
if ((KeGetCurrentThread() | 1) != *SpinLock)
{
/* They don't, bugcheck */
KeBugCheckEx(SPIN_LOCK_NOT_OWNED, SpinLock, 0, 0, 0);
}
#endif
/* Clear the lock */
InterlockedAnd(SpinLock, 0);
}
KIRQL
FORCEINLINE
KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object)
{
LONG OldValue, NewValue;
/* Make sure we're at a safe level to touch the lock */
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
/* Start acquire loop */
do
{
/* Loop until the other CPU releases it */
while ((UCHAR)Object->Lock & KOBJECT_LOCK_BIT)
{
/* Let the CPU know that this is a loop */
YieldProcessor();
};
/* Try acquiring the lock now */
NewValue = InterlockedCompareExchange(&Object->Lock,
OldValue | KOBJECT_LOCK_BIT,
OldValue);
} while (NewValue != OldValue);
}
KIRQL
FORCEINLINE
KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object)
{
/* Make sure we're at a safe level to touch the lock */
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
/* Release it */
InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT);
}
KIRQL
FORCEINLINE
KiAcquireDispatcherLock(VOID)
{
/* Raise to synchronization level and acquire the dispatcher lock */
return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock);
}
VOID
FORCEINLINE
KiReleaseDispatcherLock(IN KIRQL OldIrql)
{
/* First release the lock */
KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()->
LockQueue[LockQueueDispatcherLock]);
/* Then exit the dispatcher */
KiExitDispatcher(OldIrql);
}
//
// This routine inserts a thread into the deferred ready list of the given CPU
//
FORCEINLINE
VOID
KiInsertDeferredReadyList(IN PKTHREAD Thread)
{
PKPRCB Prcb = KeGetCurrentPrcb();
/* Set the thread to deferred state and CPU */
Thread->State = DeferredReady;
Thread->DeferredProcessor = Prcb->Number;
/* Add it on the list */
PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry);
}
FORCEINLINE
VOID
KiRescheduleThread(IN BOOLEAN NewThread,
IN ULONG Cpu)
{
/* Check if a new thread needs to be scheduled on a different CPU */
if ((NewThread) && !(KeGetPcr()->Number == Cpu))
{
/* Send an IPI to request delivery */
KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
}
}
//
// This routine sets the current thread in a swap busy state, which ensure that
// nobody else tries to swap it concurrently.
//
FORCEINLINE
VOID
KiSetThreadSwapBusy(IN PKTHREAD Thread)
{
/* Make sure nobody already set it */
ASSERT(Thread->SwapBusy == FALSE);
/* Set it ourselves */
Thread->SwapBusy = TRUE;
}
//
// This routine acquires the PRCB lock so that only one caller can touch
// volatile PRCB data.
//
// Since this is a simple optimized spin-lock, it must be be only acquired
// at dispatcher level or higher!
//
FORCEINLINE
VOID
KiAcquirePrcbLock(IN PKPRCB Prcb)
{
/* Make sure we're at a safe level to touch the PRCB lock */
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
/* Start acquire loop */
for (;;)
{
/* Acquire the lock and break out if we acquired it first */
if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break;
/* Loop until the other CPU releases it */
do
{
/* Let the CPU know that this is a loop */
YieldProcessor();
} while (Prcb->PrcbLock);
}
}
//
// This routine releases the PRCB lock so that other callers can touch
// volatile PRCB data.
//
// Since this is a simple optimized spin-lock, it must be be only acquired
// at dispatcher level or higher!
//
FORCEINLINE
VOID
KiReleasePrcbLock(IN PKPRCB Prcb)
{
/* Make sure it's acquired! */
ASSERT(Prcb->PrcbLock != 0);
/* Release it */
InterlockedAnd(&Prcb->PrcbLock, 0);
}
//
// This routine acquires the thread lock so that only one caller can touch
// volatile thread data.
//
// Since this is a simple optimized spin-lock, it must be be only acquired
// at dispatcher level or higher!
//
FORCEINLINE
VOID
KiAcquireThreadLock(IN PKTHREAD Thread)
{
/* Make sure we're at a safe level to touch the thread lock */
ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
/* Start acquire loop */
for (;;)
{
/* Acquire the lock and break out if we acquired it first */
if (!InterlockedExchange(&Thread->ThreadLock, 1)) break;
/* Loop until the other CPU releases it */
do
{
/* Let the CPU know that this is a loop */
YieldProcessor();
} while (Thread->ThreadLock);
}
}
//
// This routine releases the thread lock so that other callers can touch
// volatile thread data.
//
// Since this is a simple optimized spin-lock, it must be be only acquired
// at dispatcher level or higher!
//
FORCEINLINE
VOID
KiReleaseThreadLock(IN PKTHREAD Thread)
{
/* Release it */
InterlockedAnd(&Thread->ThreadLock, 0);
}
FORCEINLINE
BOOLEAN
KiTryThreadLock(IN PKTHREAD Thread)
{
LONG Value;
/* If the lock isn't acquired, return false */
if (!Thread->ThreadLock) return FALSE;
/* Otherwise, try to acquire it and check the result */
Value = 1;
Value = InterlockedExchange(&Thread->ThreadLock, &Value);
/* Return the lock state */
return (Value == TRUE);
}
FORCEINLINE
VOID
KiCheckDeferredReadyList(IN PKPRCB Prcb)
{
/* Scan the deferred ready lists if required */
if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb);
}
FORCEINLINE
VOID
KiRequestApcInterrupt(IN BOOLEAN NeedApc,
IN UCHAR Processor)
{
/* Check if we need to request APC delivery */
if (NeedApc)
{
/* Check if it's on another CPU */
if (KeGetPcr()->Number != Cpu)
{
/* Send an IPI to request delivery */
KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC);
}
else
{
/* Request a software interrupt */
HalRequestSoftwareInterrupt(APC_LEVEL);
}
}
}
#endif
FORCEINLINE
VOID
KiAcquireApcLock(IN PKTHREAD Thread,
IN PKLOCK_QUEUE_HANDLE Handle)
{
/* Acquire the lock and raise to synchronization level */
KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle);
}
FORCEINLINE
VOID
KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread,
IN PKLOCK_QUEUE_HANDLE Handle)
{
/* Acquire the lock */
KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle);
}
FORCEINLINE
VOID
KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread,
IN PKLOCK_QUEUE_HANDLE Handle)
{
/* Acquire the lock */
KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle);
}
FORCEINLINE
VOID
KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle)
{
/* Release the lock */
KeReleaseInStackQueuedSpinLock(Handle);
}
FORCEINLINE
VOID
KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
{
/* Release the lock */
KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
}
FORCEINLINE
VOID
KiAcquireProcessLock(IN PKPROCESS Process,
IN PKLOCK_QUEUE_HANDLE Handle)
{
/* Acquire the lock and raise to synchronization level */
KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle);
}
FORCEINLINE
VOID
KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle)
{
/* Release the lock */
KeReleaseInStackQueuedSpinLock(Handle);
}
FORCEINLINE
VOID
KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle)
{
/* Release the lock */
KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle);
}
FORCEINLINE
VOID
KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue,
IN PKLOCK_QUEUE_HANDLE DeviceLock)
{
/* Check if we were called from a threaded DPC */
if (KeGetCurrentPrcb()->DpcThreadActive)
{
/* Lock the Queue, we're not at DPC level */
KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock);
}
else
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -