📄 busuma.c
字号:
wbBuffers[i].entries[j].cmd = MEMSYS_WRITEBACK;#ifdef DATA_HANDLING wbBuffers[i].entries[j].data = (char *)calloc(1,SCACHE_LINE_SIZE);#else wbBuffers[i].entries[j].data = NULL;#endif wbBuffers[i].entries[j].addr = 0; wbBuffers[i].entries[j].state = REQ_FREE; List_Insert(MREQ_TO_LIST(&wbBuffers[i].entries[j]), LIST_ATREAR(&wbBuffers[i].free)); } wbBuffers[i].numOverflowUsed = 0; List_Init(&wbBuffers[i].overflow); } for (m = 0; m < NUM_MACHINES; m++) { List_Init(&lowPri[m]); List_Init(&highPri[m]); } List_Init(&freeMemReqList); for (i = 0; i < MAX_MREQ; i++) { MemRequest *mreq = MemRequestStorage + i; bzero((char *) (mreq), sizeof(MemRequestStorage[0])); List_InitElement(MREQ_TO_LIST(mreq)); List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&freeMemReqList)); } for (i = 0; i < TOTAL_MEMORIES; i++) { memState[i] = (MemState *) ZMALLOC(sizeof(MemState), "memState"); ASSERT(memState[i]); List_Init(&memState[i]->memoryQueue); bzero((char *) &memState[i]->stats, sizeof(BusUmaStats)); memState[i]->wbHdr.cmd = MEMSYS_WRITEBACK; memState[i]->wbHdr.memnum = i; }#ifndef SOLO BusUmaInitRemap();#endif}static voidFreeWBentry(MemRequest *mreq){ if (mreq->state == WB_OVERFLOW) { mreq->reqAddr = MEMSYS_NOADDR; mreq->len = 0; mreq->data = NULL; return; } else if (mreq->state == CPU_TO_BUS_OVERHEAD) { EventCallbackRemove((EventCallbackHdr*)mreq); memState[mreq->memnum]->stats.buCounts[BUC_KILLEDWBS]++; } else if (mreq->state == BUS_ARB_WAIT) { List_Remove(MREQ_TO_LIST(mreq)); memState[mreq->memnum]->stats.buCounts[BUC_KILLEDWBS]++; } else if (mreq->state == BUS_TRANSFER_WAIT) { /* this can get called from BusUmaDrain */ EventCallbackRemove((EventCallbackHdr*)mreq); memState[mreq->memnum]->stats.buCounts[BUC_KILLEDWBS]++; } else { ASSERT(mreq->state == MEM_BUSY_WAIT); } mreq->state = REQ_FREE; List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&wbBuffers[mreq->cpunum].free)); wbBuffers[mreq->cpunum].numUsed--; ASSERT(wbBuffers[mreq->cpunum].numUsed >=0); if (!List_IsEmpty(&wbBuffers[mreq->cpunum].overflow)) { /* cpu was stalled waiting for space in writeback buffer, so reissue the request */ Result result; List_Links *itemPtr = List_First(&wbBuffers[mreq->cpunum].overflow); MemRequest *ovfReq = LIST_TO_MREQ(itemPtr); List_Remove(itemPtr); wbBuffers[mreq->cpunum].numOverflowUsed--; result = BusUmaCmd(mreq->cpunum, ovfReq->cmd, ovfReq->addr, ovfReq->transId, ovfReq->reqAddr, ovfReq->len, ovfReq->data); ovfReq->state = REQ_FREE; List_Insert(MREQ_TO_LIST(ovfReq), LIST_ATREAR(&freeMemReqList)); if (result == SUCCESS) { ASSERT(CPUVec.Unstall); CPUVec.Unstall(mreq->cpunum); } }}/* Check the writeback buffer to see if an entry with the given address is already there * If so, and read=1 place the data from in the buffer into argument data * and read=0 place the data from the argument into the buffer (collapse two writebacks into one) */static MemRequest *IsInWBBuffer(int cpuNum, PA addr){ List_Links *itemPtr, *nextPtr; int i; if (wbBuffers[cpuNum].numUsed != 0) { for (i = 0; i < WB_BUFFER_SIZE; i++) { if ((wbBuffers[cpuNum].entries[i].addr == addr) && ((wbBuffers[cpuNum].entries[i].state == CPU_TO_BUS_OVERHEAD) || (wbBuffers[cpuNum].entries[i].state == BUS_ARB_WAIT))) { return &wbBuffers[cpuNum].entries[i]; } } } for (itemPtr = List_First(&wbBuffers[cpuNum].overflow), nextPtr = NULL; !List_IsAtEnd(&wbBuffers[cpuNum].overflow, itemPtr); itemPtr = nextPtr) { nextPtr = List_Next(itemPtr); if (LIST_TO_MREQ(itemPtr)->reqAddr == addr) return LIST_TO_MREQ(itemPtr); } return NULL;}static voidNakRequest(MemRequest *mreq){ CacheCmdDone(mreq->cpunum, mreq->transId, mreq->mode, MEMSYS_STATUS_NAK, mreq->result, mreq->data); mreq->state = REQ_FREE; List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&freeMemReqList)); memState[mreq->memnum]->stats.counts[COUNT_NAKS]++;}static voidErrorRequest(MemRequest *mreq){ CacheCmdDone(mreq->cpunum, mreq->transId, mreq->mode, MEMSYS_STATUS_ERROR, mreq->result, mreq->data); mreq->state = REQ_FREE; List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&freeMemReqList)); memState[mreq->memnum]->stats.counts[COUNT_NAKS]++;}static voidRTMerge(MemRequest *mreq, int index){ mreq->state = MEM_MERGE_WAIT; mreq->rtIndex = index; List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&requestTable[mreq->machnum][index].merge)); if (debugMem) CPUPrint("DBGBSU %lld %d merge addr (0x%x)\n", CPUVec.CycleCount(0), mreq->cpunum, mreq->addr);}static voidRTDelay(MemRequest *mreq, int index){ mreq->state = MEM_CONFLICT_WAIT; mreq->rtIndex = index; List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&requestTable[mreq->machnum][index].delay)); if (debugMem) CPUPrint("DBGBSU %lld %d delay addr (0x%x)\n", CPUVec.CycleCount(0), mreq->cpunum, mreq->addr);}static voidCheckForMergeDelay(List_Links *list, MemRequest *mreq, int numFree){ List_Links *itemPtr, *nextPtr; for (itemPtr = List_First(list), nextPtr = NULL; !List_IsAtEnd(list, itemPtr); itemPtr = nextPtr) { MemRequest *req2 = LIST_TO_MREQ(itemPtr); nextPtr = List_Next(itemPtr); if (req2->addr == mreq->addr) { switch (req2->cmd & MEMSYS_CMDMASK) { case MEMSYS_GET: /* if mode shared, merge, otherwise fall through to GETX/UPGRADE */ if (mreq->mode == MEMSYS_SHARED) { List_Remove(itemPtr); RTMerge(req2, mreq->rtIndex); break; } case MEMSYS_GETX: case MEMSYS_UPGRADE: List_Remove(itemPtr); RTDelay(req2, mreq->rtIndex); break; case MEMSYS_WRITEBACK: /* this is OK because the writeback will be killed in cachesnoop */ break; default: CPUError("Unknown memsys command (0x%x) in BusUma::RTInsert\n", req2->cmd); } } else { /* if we took the last entry and a get/getx is arbitrating then add it to RTfull queue */ if ((numFree == 1) && (req2->rtIndex == -1) && (req2->state != RT_OVERFLOW) && (((req2->cmd & MEMSYS_CMDMASK) == MEMSYS_GET) || ((req2->cmd & MEMSYS_CMDMASK) == MEMSYS_GETX))) { req2->state = RT_OVERFLOW; List_Remove(itemPtr); List_Insert(itemPtr, LIST_ATREAR(&requestTableFullQ[mreq->machnum])); } } }}static voidRTInsert(MemRequest *mreq) { int index, numFree = 0; for (index = 0; index < REQUEST_TABLE_SIZE; index++) { if (!requestTable[mreq->machnum][index].used) { if (numFree == 0) { requestTable[mreq->machnum][index].used = 1; requestTable[mreq->machnum][index].addr = mreq->addr; requestTable[mreq->machnum][index].initiator = mreq; mreq->rtIndex = index; if (debugMem) CPUPrint("DBGBSU %lld %d addr (0x%x) assigned RTindex %d\n", CPUVec.CycleCount(0), mreq->cpunum, mreq->addr, index); } numFree++; } } ASSERT(numFree != 0); /* add all pending requests to same addr merge/delay queues also add all get/getx to request delay queue if this request took the last spot */ CheckForMergeDelay(&highPri[mreq->machnum], mreq, numFree); CheckForMergeDelay(&requestTableFullQ[mreq->machnum], mreq, numFree);}static voidMemoryDone(int cpuNum, EventCallbackHdr *hdr, void *arg){ MemRequest *mreq = (MemRequest *)hdr; ASSERT(mreq->state == MEM_ACTIVE_WAIT); ASSERT(M_FROM_CPU(cpuNum) == mreq->machnum); switch (mreq->cmd & MEMSYS_CMDMASK) { case MEMSYS_GET: case MEMSYS_GETX: mreq->state = REPLY_BUS_ARB_WAIT; List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&highPri[mreq->machnum])); Arbitrate(mreq->machnum); break; case MEMSYS_WRITEBACK: break; case MEMSYS_SHARING_WRITEBACK: /* free the mreq */ mreq->state = REQ_FREE; List_Insert(MREQ_TO_LIST(mreq), LIST_ATREAR(&freeMemReqList)); break; default: CPUError("Bad mreq->cmd (0x%x)\n", mreq->cmd); } /* now check for more requests */ if (!List_IsEmpty(&memState[mreq->memnum]->memoryQueue)) MemoryArrive(mreq->memnum); else memState[mreq->memnum]->busy = 0;}/* a request just arrived at this memormy * copy data from memory and stall */static voidMemoryArrive(int memnum){ MemRequest *mreq; ASSERT(!List_IsEmpty(&memState[memnum]->memoryQueue)); mreq = LIST_TO_MREQ(List_First(&memState[memnum]->memoryQueue)); List_Remove(MREQ_TO_LIST(mreq)); ASSERT(mreq->state == MEM_BUSY_WAIT); memState[memnum]->busy = 1; memState[mreq->memnum]->stats.counts[COUNT_MEMORYACCESS]++; memState[memnum]->stats.buCounts[BUC_MEMQUEUE] += CPUVec.CycleCount(mreq->cpunum) - mreq->memQEnter; switch (mreq->cmd & MEMSYS_CMDMASK) { case MEMSYS_GET: if (mreq->cmd & MEMSYS_DMAFLAVOR) { bcopy(DATA_ADDR(mreq->machnum, mreq->reqAddr), mreq->data, mreq->len); } else { mreq->data = (byte*)DATA_ADDR(mreq->machnum, mreq->addr); } if (debugMem) CPUPrint("DBGBSU %lld get arrived at memory (0x%x)\n", CPUVec.CycleCount(0), mreq->addr); break; case MEMSYS_GETX: if (mreq->cmd & MEMSYS_DMAFLAVOR) { bcopy(mreq->data, DATA_ADDR(mreq->machnum, mreq->reqAddr), mreq->len); } else { mreq->data = (byte*)DATA_ADDR(mreq->machnum, mreq->addr); } if (debugMem) CPUPrint("DBGBSU %lld getx arrived at memory (0x%x)\n", CPUVec.CycleCount(0), mreq->addr); break; case MEMSYS_WRITEBACK: if (debugMem) CPUPrint("DBGBSU %lld wb arrived at memory (0x%x)\n", CPUVec.CycleCount(0), mreq->addr);#ifdef DATA_HANDLING bcopy(mreq->data, DATA_ADDR(mreq->machnum, mreq->addr), SCACHE_LINE_SIZE);#endif FreeWBentry(mreq); mreq = &memState[memnum]->wbHdr; break; case MEMSYS_SHARING_WRITEBACK: if (debugMem) CPUPrint("DBGBSU %lld sharingWB arrived at memory (0x%x)\n", CPUVec.CycleCount(0), mreq->addr); /* don't really need to do anything except make memory busy */ break; default: CPUError("Bad mreq->cmd (0x%x)\n", mreq->cmd); } mreq->state = MEM_ACTIVE_WAIT; EventDoCallback(FIRST_CPU(mreq->machnum), MemoryDone, (EventCallbackHdr *)mreq, NULL, memoryCycles);}/* request came back from memory * free the request table entry if the memory request used it */static voidCPUArrive(MemRequest *mreq){ List_Links *itemPtr, *nextPtr, *mergeHdr, *delayHdr; int rtIndex = mreq->rtIndex; ASSERT((mreq->state == REPLY_BUS_TRANSFER_WAIT) || ((mreq->state == BUS_TRANSFER_WAIT) && ((mreq->cmd & MEMSYS_CMDMASK) == MEMSYS_UPGRADE))); CacheCmdDone(mreq->cpunum, mreq->transId, mreq->mode, mreq->status, mreq->result, mreq->data); if (rtIndex != -1) { ASSERT(requestTable[mreq->machnum][rtIndex].initiator == mreq); mergeHdr = &requestTable[mreq->machnum][rtIndex].merge; delayHdr = &requestTable[mreq->machnum][rtIndex].delay; /* if initiator is a GET, handle all the merges */ if ((mreq->cmd & MEMSYS_CMDMASK) == MEMSYS_GET) { for (itemPtr = List_First(mergeHdr), nextPtr = NULL; !List_IsAtEnd(mergeHdr, itemPtr); itemPtr = nextPtr) { MemRequest *mergeReq = LIST_TO_MREQ(itemPtr); nextPtr = List_Next(itemPtr); memState[mreq->memnum]->stats.buCounts[BUC_MERGES]++; CacheCmdDone(mergeReq->cpunum, mergeReq->transId, mergeReq->mode, mreq->status, mreq->result, mreq->data); if (debugMem) CPUPrint("DBGBSU %lld %d merged addr (0x%x)\n", CPUVec.CycleCount(0), mergeReq->cpunum, mergeReq->addr); /* free the mreq */ List_Remove(itemPtr); List_Insert(itemPtr, LIST_ATREAR(&freeMemReqList)); LIST_TO_MREQ(itemPtr)->state = REQ_FREE; } } /* if something on delay queue, make the first entry be the new initiator */ if (!List_IsEmpty(delayHdr)) { MemRequest *delayReq = LIST_TO_MREQ(List_First(delayHdr)); List_Remove(MREQ_TO_LIST(delayReq)); requestTable[mreq->machnum][rtIndex].initiator = delayReq; /* if the new initiator is a GET, then move all the other GETs in the delay queue to the merge queue */ if ((delayReq->cmd & MEMSYS_CMDMASK) == MEMSYS_GET) { for (itemPtr = List_First(delayHdr), nextPtr = NULL; !List_IsAtEnd(delayHdr, itemPtr); itemPtr = nextPtr) { nextPtr = List_Next(itemPtr); if ((LIST_TO_MREQ(itemPtr)->cmd & MEMSYS_CMDMASK) == MEMSYS_GET) { List_Remove(itemPtr); List_Insert(itemPtr, LIST_ATREAR(mergeHdr)); LIST_TO_MREQ(itemPtr)->state = MEM_MERGE_WAIT; } } } memState[mreq->memnum]->stats.buCounts[BUC_CONFLICTS]++; if (debugMem) CPUPrint("DBGBSU %lld %d issue delayed addr (0x%x)\n", CPUVec.CycleCount(0), delayReq->cpunum, delayReq->addr); /* Now let the new initiator arbitrate the bus */ delayReq->state = BUS_ARB_WAIT; List_Insert(MREQ_TO_LIST(delayReq), LIST_ATREAR(&highPri[mreq->machnum])); Arbitrate(mreq->machnum); } else { /* free the request table entry, and release the first request in RTfullQ */ requestTable[mreq->machnum][rtIndex].used = 0; if (debugMem) CPUPrint("DBGBSU %lld %d freeing RT index %d addr(0x%x)\n", CPUVec.CycleCount(mreq->cpunum), mreq->cpunum, rtIndex, mreq->addr);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -