📄 rminternal.h
字号:
// a scratch field, and the value is stale outside of dynamic RM phases.
unsigned int m_numDRMIdle;
// The number of borrowed cores in this node for the scheduler in question that were found to be idle during the dynamic RM phase.
// This is a scratch field, and the value is stale outside of dynamic RM phases.
unsigned int m_numDRMBorrowedIdle;
// The array of cores in this node.
SchedulerCore * m_pCores;
/// <summary>
/// Returns the number of cores that were found to be idle.
/// </summary>
unsigned int GetNumIdleCores()
{
return m_numDRMIdle;
}
/// <summary>
/// Returns the number of allocated cores in this node that are fixed - cannot be removed by dynamic RM.
/// </summary>
unsigned int GetNumFixedCores()
{
return m_numFixedCores;
}
/// <summary>
/// Returns the number of movable cores within this node.
/// </summary>
unsigned int GetNumMigratableCores()
{
return m_allocatedCores - m_numFixedCores;
}
/// <summary>
/// Returns the number of owned cores - cores that are not borrowed from a different scheduler.
/// </summary>
unsigned int GetNumOwnedCores()
{
return m_allocatedCores - m_numBorrowedCores;
}
/// <summary>
/// Returns the number of non-borrowed, non-fixed cores.
unsigned int GetNumOwnedMigratableCores()
{
return m_allocatedCores - m_numBorrowedCores - m_numFixedCores;
}
/// <summary>
/// Returns the number of borrowed cores - cores that were temporarily lent to this scheduler since the scheduler(s) they
/// were assigned to, were not using them.
/// </summary>
unsigned int GetNumBorrowedCores()
{
return m_numBorrowedCores;
}
/// <summary>
/// Returns the number of borrowed cores that are idle.
/// </summary>
unsigned int GetNumBorrowedIdleCores()
{
return m_numDRMBorrowedIdle;
}
/// <summary>
/// Returns the number of borrowed cores that are not idle.
/// </summary>
unsigned int GetNumBorrowedInUseCores()
{
ASSERT(m_numBorrowedCores >= m_numDRMBorrowedIdle);
return (m_numBorrowedCores - m_numDRMBorrowedIdle);
}
/// <summary>
/// Deallocates memory allocated by the node.
/// </summary>
void Cleanup(void)
{
delete [] m_pCores;
}
};
/// <summary>
/// Representation of a processor node within the RM's global map of execution resources. Information in this struct
/// represents a systemwide view of the underlying node.
/// </summary>
struct GlobalNode : public ProcessorNode
{
// A scratch field used during dynamic RM allocation, on the RM's global copy of nodes. Idle cores represents the number
// of cores on this node that are idle and can temporarily be assigned to another scheduler that needs cores.
unsigned int m_idleCores;
// The array of cores in this node.
GlobalCore * m_pCores;
// An array of indices for sorting cores.
unsigned int * m_pSortedCoreOrder;
/// <summary>
/// Initializes a processor node.
/// </summary>
void Initialize(USHORT id, USHORT processorGroup, ULONG_PTR affinityMask)
{
m_id = id;
m_processorGroup = processorGroup;
m_nodeAffinity = affinityMask;
m_coreCount = NumberOfBitsSet(affinityMask);
m_allocatedCores = m_availableForAllocation = 0;
m_pCores = new GlobalCore[m_coreCount];
memset(m_pCores, 0, m_coreCount * sizeof(GlobalCore));
m_pSortedCoreOrder = new unsigned int[m_coreCount];
for (unsigned int i = 0; i < m_coreCount; ++i)
{
m_pSortedCoreOrder[i] = i;
}
for (unsigned int i = 0, j = 0; j < m_coreCount; ++i)
{
ASSERT(i < sizeof(ULONG_PTR) * 8);
// Check if the LSB of the affinity mask is set.
if ((affinityMask & 1) != 0)
{
// Bit 0 of the affinity mask corresponds to processor number 0, bit 1 to processor number 1, etc...
m_pCores[j++].m_processorNumber = (BYTE) i;
}
// Right shift the affinity by 1.
affinityMask >>= 1;
}
}
/// <summary>
/// Creates a scheduler node from a global processor node. Used to create a representation of the node for
/// a scheduler proxy when allocation is complete. Also resets the original node so it is ready for the
/// next allocation attempt.
/// </summary>
void CloneAndReset(SchedulerNode * pNewNode)
{
ASSERT(pNewNode != NULL);
// Copy the base class portion of the node, which is shared.
memcpy(pNewNode, this, sizeof(ProcessorNode));
pNewNode->m_pCores = new SchedulerCore[m_coreCount];
memset(pNewNode->m_pCores, 0, m_coreCount * sizeof(SchedulerCore));
// Since we're using a memset to initialize the elements of each SchedulerCore, add an assert
// in case the implementation of List changes such that zeroing it out is not a 'good initial state'.
ASSERT(pNewNode->m_pCores[0].m_resources.Empty());
for (unsigned int i = 0; i < m_coreCount; ++i)
{
// Copy the base class portion of the core.
memcpy(&pNewNode->m_pCores[i], &m_pCores[i], sizeof(ProcessorCore));
// Reset the core state on the cores of the original node structure ('this')
// once a deep copy has been made.
m_pCores[i].m_coreState = ProcessorCore::Available;
m_pCores[i].m_idleSchedulers = 0;
}
m_allocatedCores = 0;
ASSERT(m_availableForAllocation == 0);
}
/// <summary>
/// Copies a processor node. Used to create a copy of the node from a scheduler proxy when allocation
/// is invoked in the "update" mode.
/// </summary>
void Copy(SchedulerNode * pCopyFromNode)
{
for (unsigned int i = 0; i < m_coreCount; ++i)
{
ASSERT(m_pCores[i].m_coreState == ProcessorCore::Available);
m_pCores[i].m_coreState = pCopyFromNode->m_pCores[i].m_coreState;
}
m_allocatedCores = pCopyFromNode->m_allocatedCores;
}
/// <summary>
/// Copies the allocation changes in a processor node to the schedulers node after an allocation increase, and resets it.
/// </summary>
void CopyAndReset(SchedulerNode * pCopyToNode)
{
#if defined(_DEBUG)
unsigned int numChanged = 0;
#endif
for (unsigned int i = 0; i < m_coreCount; ++i)
{
#if defined(_DEBUG)
if (m_pCores[i].m_coreState != pCopyToNode->m_pCores[i].m_coreState)
{
ASSERT(m_pCores[i].m_coreState == ProcessorCore::Allocated);
ASSERT(pCopyToNode->m_pCores[i].m_coreState == ProcessorCore::Available);
ASSERT(++numChanged <= 1);
}
#endif
pCopyToNode->m_pCores[i].m_coreState = m_pCores[i].m_coreState;
m_pCores[i].m_coreState = ProcessorCore::Available;
m_pCores[i].m_idleSchedulers = 0;
}
ASSERT(pCopyToNode->m_allocatedCores <= m_allocatedCores);
pCopyToNode->m_allocatedCores = m_allocatedCores;
m_allocatedCores = 0;
ASSERT(m_availableForAllocation == 0);
}
/// <summary>
/// Deallocates memory allocated by the node.
/// </summary>
void Cleanup(void)
{
delete [] m_pCores;
delete [] m_pSortedCoreOrder;
}
};
/// <summary>
/// Used to store information during static and dynamic allocation.
/// </summary>
struct AllocationData
{
// Index into an array of schedulers - used for sorting, etc.
unsigned int m_index;
// Additional allocation to give to a scheduler after proportional allocation decisions are made.
unsigned int m_allocation;
// Used to hold a scaled allocation value during proportional allocation.
double m_scaledAllocation;
// The scheduler proxy this allocation data is for.
SchedulerProxy *m_pProxy;
// Number of idle cores in a scheduler proxy during static allocation or dynamic core migration.
unsigned int m_numIdleCores;
// Number of idle cores in a scheduler proxy during static allocation or dynamic core migration that are also borrowed. During core
// migration these cores are the first to go.
unsigned int m_numBorrowedIdleCores;
};
struct StaticAllocationData : public AllocationData
{
// A field used during static allocation to decide on an allocation proportional to each scheduler's desired value.
double m_adjustedDesired;
// Tells if a thread subscription is a part of this static allocation request.
bool m_fNeedsExternalThreadAllocation;
};
struct DynamicAllocationData : public AllocationData
{
// This variable is toggled back in forth during dynamic migration to instruct the RM whether or not
// an exact fit allocation should be attempted - i.e. if a node has 3 available cores, but this scheduler proxy
// needs only 2, keep searching in case a later node is found with 2 available cores.
bool m_fExactFitAllocation;
// Fully loaded is set to true when a scheduler is using all the cores that are allocated to it (no cores are idle)
// AND it has less than its desired number of cores.
bool m_fFullyLoaded;
// Number suggested as an appropriate allocation for the scheduler proxy, by the hill climbing instance.
unsigned int m_suggestedAllocation;
#if defined(CONCRT_TRACING)
unsigned int m_originalSuggestedAllocation;
#endif
union
{
// Struct used for a receiving proxy.
struct
{
// Number of nodes in the scheduler proxy that are partially allocated.
unsigned int m_numPartiallyFilledNodes;
// As we go through dynamic allocation, the starting node index moves along the array of sorted nodes,
// in a scheduling proxy that is receiving cores.
unsigned int m_startingNodeIndex;
};
// Struct used for a giving proxy.
struct
{
// Maximum number of borrowed idle cores this scheduler can give up.
unsigned int m_borrowedIdleCoresToMigrate;
// Maximum number of borrowed in-use cores this scheduler can give up.
unsigned int m_borrowedInUseCoresToMigrate;
// Maximum number of owned cores this scheduler can give up.
unsigned int m_ownedCoresToMigrate;
};
};
};
} // namespace details
} // namespace Concurrency
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -