📄 numa.c
字号:
static PAReverseRemap(PA paddr, int cpunum){ if ((paddr & remapVec->RemapMask[cpunum]) == remapVec->NodeAddr[cpunum]) { return paddr - remapVec->NodeAddr[cpunum]; } else if ((paddr & remapVec->RemapMask[cpunum]) == 0) { return paddr + remapVec->NodeAddr[cpunum]; } else { return paddr; }}#define BACKMAP_PADDR(paddr,cpunum) \ (((paddr & backmapMask) \ || !remapVec->RemapEnable[cpunum]) \ ? paddr : ReverseRemap(paddr, cpunum))#endif#ifdef SOLO#define BACKMAP_PADDR(paddr,cpunum) (paddr)#endif/***************************************************************** * DirIsASharer * Check if the cpunum is a sharer *****************************************************************/static intDirIsASharer(DirState *dirState, int cpunum){ return (dirState->bitmap[cpunum/8] & (1 << (cpunum % 8)));}/***************************************************************** * DirSetASharer * * Add a cpu to the list of processors sharing a cache line *****************************************************************/static voidDirSetASharer(DirState *dirState, int cpunum, int exclusive){ if (!DirIsASharer(dirState, cpunum)) { dirState->numsharers++; dirState->bitmap[cpunum/8] |= (1 << (cpunum % 8)); } dirState->dirty = exclusive; if (exclusive && (dirState->numsharers != 1)) { CPUError("Directory protocol screwup\n"); }}/***************************************************************** * DirClearASharer * * Remove a processor from the list of cache line sharers *****************************************************************/static voidDirClearASharer(DirState *dirState, int cpunum){ if (!DirIsASharer(dirState, cpunum)) return; dirState->numsharers--; dirState->bitmap[cpunum/8] &= ~(1 << (cpunum % 8)); if (dirState->numsharers == 0) dirState->dirty = 0;}/******** NUMA STATE MACHINE ROUTINES **********//* * Queue up for the memory controller */ static voidDoDCDelay(int num, MemRequest *mreq) { MemState *mState = memState[num]; ASSERT((mreq->dc_delay_time == NUMA_PILOCAL_DC_TIME) || (mreq->dc_delay_time == NUMA_PIREMOTE_DC_TIME) || (mreq->dc_delay_time == NUMA_NILOCAL_DC_TIME) || (mreq->dc_delay_time == NUMA_NIREMOTE_DC_TIME)); if(List_IsEmpty(&mState->localQueue)) { EventDoCallback(num, mreq->hdr.rout, (EventCallbackHdr *) mreq, 0, mreq->dc_delay_time); } List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&mState->localQueue));}/* * Called after the fixed local delay for a request * Will queue for the local directory controller */static voidRequestLocal(int cpuNum, EventCallbackHdr *hdr, void *v) { MemRequest *mreq = (MemRequest *)hdr; mreq->state = REQ_LOCAL_DC_WAIT; mreq->hdr.rout = RequestLocalDC; if (mreq->memnum == mreq->localMemNum) { mreq->dc_delay_time = NUMA_PILOCAL_DC_TIME; } else { mreq->dc_delay_time = NUMA_PIREMOTE_DC_TIME; } DoDCDelay(mreq->localMemNum, mreq);}/* * Called after the queued local DC delay for a request is done * Will check if the request needs to go remote * and send to the appropriate directory controller */static voidRequestLocalDC(int num, EventCallbackHdr *hdr, void *v) { MemRequest *mreq = (MemRequest *)hdr; MemState *mState = memState[num]; /* * Remove request from list and schedule next callback */ List_Remove(MREQ_TO_LIST(mreq)); if (!List_IsEmpty(&mState->localQueue)) { MemRequest *newReq; /* * Start the next memory request. */ newReq = LIST_TO_MREQ(List_First(&(mState->localQueue))); EventDoCallback(num, newReq->hdr.rout, (EventCallbackHdr *) newReq, 0, newReq->dc_delay_time); } /* Send to the directory controller, with net delay if remote */ if (mreq->memnum == mreq->localMemNum) { DirContEnter(mreq->memnum, mreq); } else { mreq->state = REQ_NET_WAIT; EventDoCallback(mreq->cpunum, RequestNet, (EventCallbackHdr *) mreq, 0, NUMA_NET_TIME); }}/* * Called after the fixed net delay for a request is done * Now queue for the destination (remote) DC */static voidRequestNet(int cpuNum, EventCallbackHdr *hdr, void *v) { MemRequest *mreq = (MemRequest *)hdr; mreq->state = REQ_REMOTE_DC_WAIT; mreq->hdr.rout = RequestRemoteDC; mreq->dc_delay_time = NUMA_NILOCAL_DC_TIME; DoDCDelay(mreq->memnum, mreq);}/* * Called after the queued remote DC delay for a request * Will send to the directory controller */static voidRequestRemoteDC(int num, EventCallbackHdr *hdr, void *v) { MemRequest *mreq = (MemRequest *)hdr; MemState *mState = memState[num]; /* * Remove request from list and schedule next callback */ List_Remove(MREQ_TO_LIST(mreq)); if (!List_IsEmpty(&mState->localQueue)) { MemRequest *newReq; /* * Start the next memory request. */ newReq = LIST_TO_MREQ(List_First(&(mState->localQueue))); EventDoCallback(num, newReq->hdr.rout, (EventCallbackHdr *) newReq, 0, newReq->dc_delay_time); } /* Send to the directory controller */ DirContEnter(mreq->memnum, mreq);}/* * Make the decision on where to get the data from or NAK * If uncached op, next step memory * If Pending set, this request will be NAKed * If !pending and clean with not cached, next step memory * If !pending and shared and GET, next step memory * If !pending and shared and GETX,next step invalidate * (memory access overlaps, and is not modelled) * If !pending and dirty and GET, next step downgrade * If !pending and dirty and GETX, next step invalidate */static voidDirContEnter(int num, MemRequest *mreq) { DirState *dirState = mreq->dirState; int nextop;#define NEXTOP_MEM 1#define NEXTOP_INV 2#define NEXTOP_NAK 3#define NEXTOP_NONE 4 mreq->state = IN_MEM_DC; /* Decide the next operation based on request type and state of the directory, and change the result status if necessary */ switch (mreq->cmd & MEMSYS_CMDMASK) { case MEMSYS_GET: if (dirState->pending) { nextop = NEXTOP_NAK; } else if (dirState->dirty) { StatsInc(COUNT_REMOTEDIRTY, mreq, 1); nextop = NEXTOP_INV; mreq->result = MEMSYS_RESULT_CACHE|MEMSYS_RESULT_DOWNGRADE; ASSERT(!dirState->pending); dirState->pending = 1; } else { nextop = NEXTOP_MEM; ASSERT(!dirState->pending); dirState->pending = 1; } break; case MEMSYS_GETX: if (dirState->pending) { nextop = NEXTOP_NAK; } else if (dirState->numsharers > 0){ nextop = NEXTOP_INV; if (dirState->dirty) { StatsInc(COUNT_REMOTEDIRTY, mreq, 1); mreq->result = MEMSYS_RESULT_INVALIDATE|MEMSYS_RESULT_CACHE; } else { StatsInc(COUNT_EXCLUSIVEONSHARED, mreq, 1); mreq->result = MEMSYS_RESULT_INVALIDATE|MEMSYS_RESULT_MEMORY; } ASSERT(!dirState->pending); dirState->pending = 1; } else { nextop = NEXTOP_MEM; ASSERT(!dirState->pending); dirState->pending = 1; } break; case MEMSYS_UPGRADE: if (dirState->pending) { nextop = NEXTOP_NAK; } else if (!DirIsASharer(dirState, mreq->cpunum)) { /* * this is required because inval acks have no delay * therfore the directory can remove the sharer from the list * after the upgrade is issued. NAK will cause retry as GETX. */ nextop = NEXTOP_NAK; } else if (dirState->numsharers > 1){ StatsInc(COUNT_EXCLUSIVEONSHARED, mreq, 1); nextop = NEXTOP_INV; mreq->result = MEMSYS_RESULT_INVALIDATE|MEMSYS_RESULT_MEMORY; ASSERT(!dirState->pending); dirState->pending = 1; } else { nextop = NEXTOP_NONE; } break; case MEMSYS_UNCWRITE: case MEMSYS_UNCWRITE_ACCELERATED: case MEMSYS_UNCREAD: nextop = NEXTOP_MEM; break; case MEMSYS_WRITEBACK: nextop = NEXTOP_MEM; break; case MEMSYS_REPLACEMENT_HINT: nextop = NEXTOP_NONE; break; default: CPUError("Unknown memsys command (0x%x) in UmaCmd\n", mreq->cmd); nextop = NEXTOP_NONE; /* Quite compiler warning message */ } /* Forward the request based on the next operation */ switch (nextop) { case NEXTOP_NAK: mreq->status = MEMSYS_STATUS_NAK; StatsInc(COUNT_NAKS, mreq, 1); DirContDone(mreq->memnum, mreq); break; case NEXTOP_MEM: StatsInc(COUNT_MEMORYACCESS, mreq, 1); mreq->hdr.rout = MemContDone; MemContDelay(mreq->memnum, mreq); break; case NEXTOP_INV: DoInvalidates(mreq); break; case NEXTOP_NONE: DirContDone(mreq->memnum, mreq); break; } return;}/* Queue up for the memory controller */static voidMemContDelay(int num, MemRequest *mreq) { MemState *mState = memState[num]; mreq->state = MEM_WAIT; if(List_IsEmpty(&mState->memoryQueue)) { EventDoCallback(num, mreq->hdr.rout, (EventCallbackHdr *) mreq, 0, NUMA_MEM_TIME); } List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&mState->memoryQueue));}/* Memory controller delay done */static voidMemContDone(int num, EventCallbackHdr *hdr, void *v) { MemRequest *mreq = (MemRequest *)hdr; MemState *mState = memState[num]; /* * Remove request from list and schedule next callback */ List_Remove(MREQ_TO_LIST(mreq)); if (!List_IsEmpty(&mState->memoryQueue)) { MemRequest *newReq; /* * Start the next memory request. */ newReq = LIST_TO_MREQ(List_First(&(mState->memoryQueue))); EventDoCallback(num, newReq->hdr.rout, (EventCallbackHdr *) newReq, 0, NUMA_MEM_TIME); } DirContDone(mreq->memnum, mreq); }/* Sharing writebacks queue up for the memory controller */static voidSharingWBDelay(int num, InvalRequest *lreq) { MemState *mState = memState[num]; lreq->state = WB_MEM_WAIT; if(List_IsEmpty(&mState->memoryQueue)) { EventDoCallback(num, lreq->hdr.rout, (EventCallbackHdr *) lreq, 0, NUMA_MEM_TIME); } List_Insert(INVAL_TO_LIST(lreq), LIST_ATREAR(&mState->memoryQueue));}/* Memory controller delay done for sharing writebacks */static voidSharingWBDone(int num, EventCallbackHdr *hdr, void *v) { InvalRequest *lreq = (InvalRequest *)hdr; MemState *mState = memState[num]; /* * Remove request from list and schedule next callback */ List_Remove(INVAL_TO_LIST(lreq)); if (!List_IsEmpty(&mState->memoryQueue)) { InvalRequest *newReq; /* * Start the next memory request. */ newReq = LIST_TO_INVAL(List_First(&(mState->memoryQueue))); EventDoCallback(num, newReq->hdr.rout, (EventCallbackHdr *) newReq, 0, NUMA_MEM_TIME); } /* Sharing writeback done. Release the structure at this point */ lreq->state = ALL_DONE;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -