📄 umsfreevirtualprocessorroot.cpp
字号:
/// Otherwise, false is passed in.
/// </param>
void UMSFreeVirtualProcessorRoot::InvokeSchedulingContext(bool fAsynchronous)
{
DispatchState dispatchState;
dispatchState.m_fIsPreviousContextAsynchronouslyBlocked = fAsynchronous;
m_pSchedulingContext->Dispatch(&dispatchState);
}
/// <summary>
/// Returns a process unique identifier for the thread proxy.
/// </summary>
/// <returns>
/// The IThreadProxy id.
/// </returns>
unsigned int UMSFreeVirtualProcessorRoot::GetId() const
{
return m_id;
}
/// <summary>
/// Called in order to perform a cooperative context switch between one context and another. After this call, pContext will
/// be running atop the virtual processor root and the context which was running will not. What happens to the context that
/// was running depends on the value of the reason argument.
/// </summary>
/// <param name="pContext">
/// The context to cooperatively switch to.
/// </param>
/// <param name="switchState">
/// Indicates the state of the thread proxy that is executing the switch. This can determine ownership of the underlying thread
/// proxy and context.
/// </param>
void UMSFreeVirtualProcessorRoot::SwitchTo(IExecutionContext *pContext, SwitchingProxyState switchState)
{
//
// The semantics around the primary context are slightly different.
//
CORE_ASSERT(switchState == Blocking);
if (switchState != Blocking)
{
throw invalid_operation();
}
UMSFreeThreadProxy * pProxy = static_cast<UMSFreeThreadProxy *> (pContext->GetProxy());
CORE_ASSERT(pProxy != NULL);
if (pProxy != NULL)
Execute(pProxy, true, false);
}
/// <summary>
/// Called in order to perform a cooperative context switch out. After this call, the context which was running will not.
/// </summary>
void UMSFreeVirtualProcessorRoot::SwitchOut()
{
//
// The scheduling context can't SwitchOut. I don't even understand the semantics around this.
//
CORE_ASSERT(false);
throw invalid_operation();
}
/// <summary>
/// Called in order to yield to the underlying operating system. This allows the operating system to schedule
/// other work in that time quantum.
/// </summary>
void UMSFreeVirtualProcessorRoot::YieldToSystem()
{
SwitchToThread();
}
/// <summary>
/// The UMS primary function. This is invoked when the virtual processor switches into UMS scheduling mode or whenever a given
/// context blocks or yields.
/// </summary>
/// <param name="reason">
/// The reason for the UMS invocation.
/// </param>
/// <param name="activationPayload">
/// The activation payload (depends on reason)
/// </param>
/// <param name="pData">
/// The context (the virtual processor pointer)
/// </param>
void NTAPI UMSFreeVirtualProcessorRoot::PrimaryInvocation(UMS_SCHEDULER_REASON reason, ULONG_PTR activationPayload, PVOID pData)
{
UMSFreeVirtualProcessorRoot *pRoot = NULL;
UMSFreeThreadProxy *pProxy = NULL;
PUMS_CONTEXT pPrimaryContext = UMS::GetCurrentUmsThread();
CORE_ASSERT(pPrimaryContext != NULL);
if (reason == UmsSchedulerStartup)
{
InitialThreadParam * param = reinterpret_cast<InitialThreadParam *>(pData);
pRoot = param->m_pRoot;
//
// Upon startup of the primary, we must stash the *this* pointer somewhere. We can snap this into a TLS slot or the UMS
// context.
//
UMSBaseObject *pObj = pRoot;
if (!UMS::SetUmsThreadInformation(pPrimaryContext, UmsThreadUserContext, &pObj, sizeof(pObj)))
throw scheduler_resource_allocation_error(HRESULT_FROM_WIN32(GetLastError()));
//
// Indicate that the Primary is ready to start.
// The thread parameter lifetime is managed by the caller. Do not touch param (pData) once the
// event is set below.
//
SetEvent(param->m_hEvent);
}
else
{
//
// activationPayload and pData might be NULL (blocking), so we're left with storing the UMSFreeVirtualProcessorRoot* in either
// TLS or the UMS context (the primary does have one). At present, it's in the UMS context.
//
UMSBaseObject *pObj = NULL;
if (!UMS::QueryUmsThreadInformation(pPrimaryContext, UmsThreadUserContext, &pObj, sizeof(pObj), NULL))
throw scheduler_resource_allocation_error(HRESULT_FROM_WIN32(GetLastError()));
pRoot = static_cast<UMSFreeVirtualProcessorRoot *>(pObj);
pProxy = static_cast<UMSFreeThreadProxy *>(pRoot->m_pExecutingProxy);
CORE_ASSERT(pProxy->GetVirtualProcessorRoot() == pRoot);
CORE_ASSERT(pRoot->m_pSchedulingContext != NULL);
}
//
// **************************************************
// READ THIS RIGHT NOW:
// **************************************************
//
// Anything this function does is highly sensitive. It's entirely possible that we are the *ONLY* primary within a process
// and a UT just blocked (pPreviousContext) on some arbitrary object. If we block on the same object, the UT will never be rescheduled
// and process deadlock will ensue. It is therefore IMPERATIVE that everything done in this function be completely lock free and wait
// free. Nothing here can block on **ANY** lock that **MIGHT** be held by arbitrary user code running on the scheduler. This means no
// memory allocation, no scheduler locks, nothing...
//
switch(reason)
{
case UmsSchedulerStartup:
{
//
// Wait for the root to be activated.
//
WaitForSingleObject(pRoot->m_hBlock, INFINITE);
if (!pRoot->m_fDelete)
{
//
// Activation should have set the scheduling context
//
CORE_ASSERT(pRoot->m_fActivated);
CORE_ASSERT(pRoot->m_pSchedulingContext != NULL);
//
// Upon first start-up, we immediately invoke the Scheduling context in order to make a scheduling decision. The factory for creation has made
// us a guarantee that threads that come from it are already "ready to run".
//
pRoot->InvokeSchedulingContext(false);
}
break;
}
case UmsSchedulerThreadBlocked:
{
bool fAsynchronous = (activationPayload & 0x1) == 0;
//
// One of two things can have happened here:
//
// - pProxy actually blocked and we will wind up switching into the scheduling context
// - pProxy terminated. In this case, we return from HandleBlocking and fall through.
//
pRoot->HandleBlocking(pProxy, fAsynchronous);
break;
}
case UmsSchedulerThreadYield:
{
CORE_ASSERT(reinterpret_cast<UMSThreadProxy *>(pData) == pProxy);
PUMS_CONTEXT pPreviousContext = reinterpret_cast<PUMS_CONTEXT>(activationPayload);
CORE_ASSERT(UMSFreeThreadProxy::FromUMSContext(pPreviousContext) == pProxy);
pRoot->HandleYielding(pProxy);
break;
}
default:
CORE_ASSERT(false);
break;
}
CORE_ASSERT(pRoot->m_fDelete);
//
// If the last operation was a yield and we exit this routine, there's a small window during which it's possible to reference invalid memory
// We make every effort to get back via a thread exit when necessary, but the RM spec allows someone
// to get off the virtual processor via exiting the thread and then subsequently free it. This is also a completely normal path for ConcRT when it is shutting
// down a scheduler entirely. Any virtual processors remaining exit their dispatch loops and then everything gets released in a single call to shutdown
// all resources. At this point, we are left with few options. The only palatable one is to create a thread for the sole purpose of destroying it
// to get off the virtual processor.
//
if (ResourceManager::RequireUMSWorkaround() && reason == UmsSchedulerThreadYield)
{
//
// At this point in time, we are not responsible for scheduling anyone, so we are completely free to do whatever Win32 things that are necessary.
// We need to create a thread with the sole purpose of exiting. Because of things like implicit creation, however,
// process termination does not wait on ConcRT's background threads. Normally, they will just be exiting and be terminated by the OS on the
// way out. Unfortunately, it's possible that we're here at the time and that the OS will fail the thread creation. In that particular case,
// we are in an unfortunate catch-22. If we let this routine exit, we risk memory corruption due to the bug this is working around. We can't
// create the thread because we are in process termination. In this particular scenario, we will simply spin forever and let the OS bring
// our thread down. If we do not, the process exit code will get overwritten with some error code and the user will be confused.
//
int tripCount = 0;
for(;;)
{
try
{
UMSFreeThreadProxy *pBurnedProxy = static_cast<UMSFreeThreadProxy *> (pRoot->SchedulerProxy()->GetNewThreadProxy(NULL));
CORE_ASSERT(pBurnedProxy->GetCriticalRegionType() == OutsideCriticalRegion);
pBurnedProxy->ForceEnterHyperCriticalRegion();
pBurnedProxy->Cancel();
pRoot->Execute(pBurnedProxy, true, true);
if (!ResourceManager::IsTerminating() && tripCount >= 10)
{
throw invalid_operation();
}
}
catch (const scheduler_resource_allocation_error& err)
{
if (!ResourceManager::IsTerminating() &&
(err.get_error_code() != HRESULT_FROM_WIN32(ERROR_ACCESS_DENIED) || tripCount >= 10))
{
throw;
}
}
catch (...)
{
if (!ResourceManager::IsTerminating() || tripCount >= 10)
throw;
}
//
// If the CRT is statically linked to a DLL, ResourceManager::IsTerminating will not get flagged before thread creations start
// failing. They fail with ERROR_ACCESS_DENIED in this scenario. If this really is process shutdown, this thread is going
// to get terminated by the OS pretty soon. Simply remake the attempt with a few sleeps and only throw if the OS hasn't terminated
// the thread within a specific amount of time.
//
if (!ResourceManager::IsTerminating())
{
++tripCount;
SleepEx(500 + (__rdtsc() % 499), true);
}
else
break;
}
if (ResourceManager::IsTerminating())
{
//
// If we get here, one of two things has happened:
//
// - The thread creation (or some allocation during it) threw as the process is shutting down.
// - The execute failed because the thread was terminated by the OS during shutdown after being created but before being
// executed.
//
// In either case, we cannot work around the bug that the above code deals with and we cannot exit. Spin forever waiting for the OS
// to terminate us. See above for details.
//
_SpinWaitBackoffNone spinWait;
for(;;)
{
//
// The process is in the middle of terminating, so the OS will terminate this thread. Make sure we're appropriately yielding during our spin
// so we don't hold it up.
//
spinWait._SpinOnce();
}
}
CORE_ASSERT(false);
}
CORE_ASSERT(!ResourceManager::RequireUMSWorkaround() || reason != UmsSchedulerThreadYield);
}
/// <summary>
/// The primary thread for this UMS virtual processor.
/// </summary>
/// <param name="pContext">
/// The UMSFreeVirtualProcessorRoot that the primary manages.
/// </param>
DWORD CALLBACK UMSFreeVirtualProcessorRoot::PrimaryMain(LPVOID pContext)
{
InitialThreadParam * param = reinterpret_cast<InitialThreadParam *>(pContext);
UMSFreeVirtualProcessorRoot *pRoot = param->m_pRoot;
UMS_SCHEDULER_STARTUP_INFO si;
si.UmsVersion = UMS_VERSION;
si.CompletionList = pRoot->SchedulerProxy()->GetCompletionList();
si.SchedulerProc = (PUMS_SCHEDULER_ENTRY_POINT) &PrimaryInvocation;
si.SchedulerParam = pContext;
if (!UMS::EnterUmsSchedulingMode(&si))
throw new scheduler_resource_allocation_error(HRESULT_FROM_WIN32(GetLastError()));
//
// Release our reference count on the scheduler. This is the only point it is safe. We require the data structures maintained by the proxy (e.g.:
// all the completion / transfer lists, etc...) until the primary is actually completed. Hence, all of this is reference counted and the proxy doesn't
// die until every last primary is out.
//
pRoot->SchedulerProxy()->Release();
//
// This is the only point at which it is *SAFE* to delete the virtual processor root. Any time we reenter the primary, we need it. The primary thread
// has to have exited UMS scheduling mode before anything can be done to get rid of structures.
//
delete pRoot;
FreeLibraryAndDestroyThread(0);
return 0;
}
/// <summary>
/// Returns our RM.
/// </summary>
ResourceManager *UMSFreeVirtualProcessorRoot::GetResourceManager()
{
return SchedulerProxy()->GetResourceManager();
}
} // namespace details
} // namespace Concurrency
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -