resourcemanager.cpp
来自「C语言库函数的原型,有用的拿去」· C++ 代码 · 共 1,520 行 · 第 1/5 页
CPP
1,520 行
}
/// <summary>
/// Increments the reference count of a resource manager.
/// </summary>
/// <returns>
/// Returns the resulting reference count.
/// </returns>
unsigned int ResourceManager::Reference()
{
return (unsigned int) InterlockedIncrement(&m_referenceCount);
}
/// <summary>
/// Increments the reference count to RM but does not allow a 0 to 1 transition.
/// </summary>
/// <returns>
/// True if RM was referenced, false, if the reference count was 0.
/// </returns>
bool ResourceManager::SafeReference()
{
return SafeInterlockedIncrement(&m_referenceCount);
}
/// <summary>
/// Decrements the reference count of a resource manager.
/// </summary>
unsigned int ResourceManager::Release()
{
long rc = InterlockedDecrement(&m_referenceCount);
if (rc == 0)
{
{ // begin locked region
_StaticLock::_Scoped_lock lock(s_lock);
if (this == (ResourceManager*) Security::DecodePointer(s_pResourceManager))
{
// A new s_pRM could be created in CreateSingleton, we can only set the static pointer to null
// if it is the same as 'this'.
s_pResourceManager = NULL;
}
} // end locked region
if (m_hDynamicRMThreadHandle != NULL)
{
{ // begin locked region
_NonReentrantBlockingLock::_Scoped_lock lock(m_lock);
ASSERT(m_hDynamicRMThreadHandle != (HANDLE)1);
// Cause the dynamic RM background thread to Exit and wait for it to finish executing
ASSERT(m_dynamicRMWorkerState == Standby);
m_dynamicRMWorkerState = Exit;
} // end locked region
WakeupDynamicRMWorker();
WaitForSingleObject(m_hDynamicRMThreadHandle, INFINITE);
}
delete this;
}
return (unsigned int) rc;
}
/// <summary>
/// Debug CRT test hook to create artificial topologies. With the retail CRT, this API simply returns.
/// </summary>
void ResourceManager::CreateNodeTopology(unsigned int nodeCount, unsigned int *pCoreCount, unsigned int **pNodeDistance, unsigned int *pProcessorGroups)
{
#if defined(_DEBUG)
if (pCoreCount == NULL)
{
throw std::invalid_argument("pCoreCount");
}
if (nodeCount < 1)
{
throw std::invalid_argument("nodeCount");
}
{ // begin locked region
_NonReentrantBlockingLock::_Scoped_lock lock(m_lock);
if ( !m_schedulers.Empty())
{
throw invalid_operation();
}
// Destroy the existing node structure.
for (unsigned int i = 0; i < m_nodeCount; ++i)
{
m_pGlobalNodes[i].Cleanup();
}
delete [] m_pGlobalNodes;
delete [] m_pSortedNodeOrder;
#if defined(CONCRT_TRACING)
delete [] m_drmInitialState;
#endif
s_nodeCount = m_nodeCount = nodeCount;
s_packageCount = 0;
s_physicalProcessorCount = 0;
for (unsigned int i = 0; i < m_nodeCount; ++i)
{
s_physicalProcessorCount += pCoreCount[i];
}
unsigned int procNumber = 0;
m_pGlobalNodes = new GlobalNode[m_nodeCount];
memset(m_pGlobalNodes, 0, sizeof(GlobalNode) * m_nodeCount);
m_pSortedNodeOrder = new unsigned int[m_nodeCount];
//
// This is a patch for the test hook to allow schedulers to actually be created with the "fake" underlying
// topology as long as the group numbers are valid for the machine.
//
ULONG_PTR processAffinityMask = 0;
ULONG_PTR systemAffinityMask = 0;
BOOL retVal = GetProcessAffinityMask(GetCurrentProcess(), &processAffinityMask, &systemAffinityMask);
ASSERT(retVal == TRUE);
for (unsigned int i = 0; i < m_nodeCount; ++i)
{
m_pSortedNodeOrder[i] = i;
m_pGlobalNodes[i].m_id = i;
m_pGlobalNodes[i].m_pCores = new GlobalCore[pCoreCount[i]];
m_pGlobalNodes[i].m_coreCount = (LONG) pCoreCount[i];
m_pGlobalNodes[i].m_nodeAffinity = processAffinityMask;
memset(m_pGlobalNodes[i].m_pCores, 0, m_pGlobalNodes[i].m_coreCount * sizeof(GlobalCore));
if (pProcessorGroups != NULL)
{
if (i > 0 && (pProcessorGroups[i] != pProcessorGroups[i - 1]))
{
// Reset the proc number to zero since we've encountered a new group.
procNumber = 0;
}
m_pGlobalNodes[i].m_processorGroup = pProcessorGroups[i];
}
m_pGlobalNodes[i].m_pSortedCoreOrder = new unsigned int[m_pGlobalNodes[i].m_coreCount];
for (unsigned int j = 0; j < m_pGlobalNodes[i].m_coreCount; ++j)
{
m_pGlobalNodes[i].m_pCores[j].m_processorNumber = (BYTE) procNumber++;
m_pGlobalNodes[i].m_pSortedCoreOrder[j] = j;
}
}
#if defined(CONCRT_TRACING)
// Assumes a m x n allocation.
m_numTotalCores = m_nodeCount * m_pGlobalNodes[0].m_coreCount;
m_drmInitialState = new GlobalCoreData[m_numTotalCores];
memset(m_drmInitialState, 0, sizeof(GlobalCoreData) * m_numTotalCores);
#endif
} // end locked region
#endif // if defined(_DEBUG)
}
/// <summary>
/// Associate an IScheduler with the ISchedulerProxy that represents that part
// of IResourceManager associated with the IScheduler.
/// </summary>
/// <param name="pScheduler">
/// The scheduler be associated.
/// </param>
/// <param name="version">
/// The version of the RM<->Scheduler communication channel that is being utilized.
/// </param>
ISchedulerProxy *ResourceManager::RegisterScheduler(IScheduler *pScheduler, unsigned int version)
{
if (pScheduler == NULL)
throw std::invalid_argument("pScheduler");
if (version != CONCRT_RM_VERSION_1)
throw std::invalid_argument("version");
return CreateSchedulerProxy(pScheduler);
}
/// <summary>
/// Called by a scheduler in order make an initial request for an allocation of virtual processors. The request
/// is driven by policies within the scheduler queried via the IScheduler::GetPolicy method. If the request
/// can be satisfied via the rules of allocation, it is communicated to the scheduler as a call to
/// IScheduler::AddVirtualProcessors.
/// </summary>
/// <param name="pProxy">
/// The scheduler proxy that is making the allocation request.
/// </param>
/// <param name="doSubscribeCurrentThread">
/// Whether to subscribe the current thread and account for it during resource allocation.
/// </param>
/// <returns>
/// The IExecutionResource instance representing current thread if doSubscribeCurrentThread was true; NULL otherwise.
/// </returns>
IExecutionResource * ResourceManager::RequestInitialVirtualProcessors(SchedulerProxy *pProxy, bool doSubscribeCurrentThread)
{
bool createWorkerThread = false;
ExecutionResource * pExecutionResource = NULL;
bool doExternalThreadAllocation = false;
{ // begin locked region
_NonReentrantBlockingLock::_Scoped_lock lock(m_lock);
ASSERT(pProxy->GetNumExternalThreads() == 0);
if (doSubscribeCurrentThread)
{
pExecutionResource = pProxy->ReferenceCurrentThreadExecutionResource();
if (pExecutionResource == NULL)
{
doExternalThreadAllocation = true;
}
}
// Increment this count before performaing the allocation. If the new scheduler activates vprocs at the time
// they are added, we use this information to decide whether core busy/idle notifications need to be sent to other schedulers.
if (pProxy->ShouldReceiveNotifications())
{
++m_numSchedulersNeedingNotifications;
}
++m_numSchedulers;
m_schedulers.AddTail(pProxy);
// Based on the policy of the scheduler proxy, and the load on the system, allocate cores to this proxy.
// The API will invoke a scheduler proxy callback (GrantAllocation) before it returns.
ExecutionResource * pNewExecutionResource = PerformAllocation(pProxy, doExternalThreadAllocation);
// If this external thread did not exist in the RM already, get it from PerformAllocation.
if (pExecutionResource == NULL)
{
pExecutionResource = pNewExecutionResource;
}
else
{
ASSERT(pNewExecutionResource == NULL);
}
if (pProxy->ShouldReceiveNotifications())
{
SendResourceNotifications(pProxy);
}
if (m_numSchedulers != 2)
{
return pExecutionResource;
}
// We've just added the second scheduler. We need to either create or wake up the dynamic RM worker thread.
ASSERT(m_dynamicRMWorkerState == Standby);
m_dynamicRMWorkerState = LoadBalance;
if (m_hDynamicRMThreadHandle == NULL)
{
// Store a temporary value before releasing the lock and proceeding to allocate memory/create the thread.
// This is to prevent a duplicate allocation if the refcount goes from 2->1 and back to 2 after the lock is released.
m_hDynamicRMThreadHandle = (HANDLE)1;
// Initialize the memory used for DRM under the lock, since these variables are touched in the static RM path as well
ASSERT(m_ppProxyData != NULL);
m_ppGivingProxies = new DynamicAllocationData * [m_maxSchedulers];
m_ppReceivingProxies = new DynamicAllocationData * [m_maxSchedulers];
createWorkerThread = true;
}
} // end locked region
// Set the event outside the lock to prevent the high priority thread from having to block immediately upon starting up.
WakeupDynamicRMWorker();
// Create the thread/data or set the dynamic RM event after releasing the lock to prevent the high priority thread
// from having to block immediately upon starting up.
if (createWorkerThread)
{
CreateDynamicRMWorker();
}
return pExecutionResource;
}
/// <summary>
/// This API registers the current thread with the resource manager associating it with this scheduler proxy,
/// and returns an instance of IExecutionResource back to the scheduler, for bookkeeping and maintenance.
/// </summary>
/// <returns>
/// The IExecutionResource instance representing current thread in the runtime.
/// </returns>
ExecutionResource * ResourceManager::SubscribeCurrentThread(SchedulerProxy *pSchedulerProxy)
{
ExecutionResource * pExecutionResource = NULL;
{ // begin locked region
_NonReentrantBlockingLock::_Scoped_lock lock(m_lock);
pExecutionResource = pSchedulerProxy->ReferenceCurrentThreadExecutionResource();
// Create an execution resources if the current thread does not already have one.
if (pExecutionResource == NULL)
{
pExecutionResource = PerformExternalThreadAllocation(pSchedulerProxy);
}
} // end locked region
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?