📄 disksim_cachemem.c
字号:
cache->stat.writeinducedfillatoms += cache_issue_fillreq(cache, fillblkno, (fillblkno + fillbcount - 1), writedesc, cache->writefill_prefetch_type); return(1); } i += linesize - ((lbn + i) % linesize); } cache->stat.writes++; cache->stat.writeatoms += writedesc->req->bcount; cache->stat.getblockwritedones++; if (writedesc->allocstop & 4) { cache->stat.writehitsdirty++; } else if (writedesc->allocstop) { cache->stat.writemisses++; } else { cache->stat.writehitsclean++; } if (writedesc->flags & CACHE_FLAG_WASBLOCKED) { /* callback */ (*writedesc->donefunc)(writedesc->doneparam, writedesc->req); addtoextraq((event *) writedesc); } return(0);}/* Gets the appropriate block, locked and ready to be accessed read or write */static int cachemem_get_block (struct cache_if *c, ioreq_event *req, void (**donefunc)(void *, ioreq_event *), void *doneparam){ struct cache_mem *cache = (struct cache_mem *)c; struct cache_mem_event *rwdesc = (struct cache_mem_event *) getfromextraq(); int ret; // fprintf (outputfile, "totalreqs = %d\n", disksim->totalreqs); // fprintf (outputfile, "%.5f: Entered cache_get_block: rw %d, devno %d, blkno %d, size %d\n", simtime, (req->flags & READ), req->devno, req->blkno, req->bcount); rwdesc->type = (req->flags & READ) ? CACHE_EVENT_READ : CACHE_EVENT_WRITE; rwdesc->donefunc = donefunc; rwdesc->doneparam = doneparam; rwdesc->req = req; req->next = NULL; req->prev = NULL; rwdesc->validpoint = -1; rwdesc->lockstop = - (req->blkno % cache->atomsperbit); rwdesc->allocstop = 0; /* overload -- use for determining hit type */ rwdesc->flags = 0; if (req->flags & READ) { cache->stat.getblockreadstarts++; ret = cache_read_continue(cache, rwdesc); } else { cache->stat.getblockwritestarts++; ret = cache_write_continue(cache, rwdesc); } // fprintf (outputfile, "rwdesc %p, ret %x, validpoint %d\n", rwdesc, ret, rwdesc->validpoint); if (ret == 0) { (*donefunc)(doneparam, req); addtoextraq((event *) rwdesc); } else { rwdesc->flags |= CACHE_FLAG_WASBLOCKED; } return(ret);}/* frees the block after access complete, block is clean so remove locks *//* and update lru */static void cachemem_free_block_clean (struct cache_if *c, ioreq_event *req){ struct cache_mem *cache = (struct cache_mem *)c; cache_atom *line = NULL; int lockgran = 0; int i; // fprintf (outputfile, "%.5f: Entered cache_free_block_clean: blkno %d, bcount %d, devno %d\n", simtime, req->blkno, req->bcount, req->devno); cache->stat.freeblockcleans++; if (cache->size == 0) { return; } for (i=0; i<req->bcount; i++) { if (line == NULL) { line = cache_find_atom(cache, req->devno, (req->blkno + i)); /* Can't free unallocated space */ ASSERT(line != NULL); if (req->type) { cache_access(cache, line); } } if (((line->lbn % cache->lockgran) == (cache->lockgran-1)) || (i == (req->bcount-1))) { lockgran += cache_free_read_lock(cache, line, req); } line = line->line_next; } /* Must have unlocked entire requests worth of data */ ASSERT2((lockgran >= req->bcount), "lockgran", lockgran, "reqbcount", req->bcount);}static void cache_write_line_by_line (struct cache_mem *cache, ioreq_event *flushreq, struct cache_mem_event *writedesc, int reqdone);/* a delayed write - set dirty bits, remove locks and update lru. *//* If cache doesn't allow delayed writes, forward this to async */static int cachemem_free_block_dirty (struct cache_if *c, ioreq_event *req, void (**donefunc)(void *, ioreq_event *), void *doneparam){ struct cache_mem *cache = (struct cache_mem *)c; cache_atom *line = NULL; ioreq_event *flushreq = 0; struct cache_mem_event *writedesc = 0; int lockgran = 0; int flushblkno = req->blkno; int flushbcount = req->bcount; int linebyline = cache->linebylinetmp; int i; int writethru = (cache->size == 0) || (cache->writescheme != CACHE_WRITE_BACK); // fprintf (outputfile, "%.5f, Entered cache_free_block_dirty: blkno %d, size %d, writethru %d\n", simtime, req->blkno, req->bcount, writethru); cache->linebylinetmp = 0; cache->stat.freeblockdirtys++; if (writethru) { writedesc = (struct cache_mem_event *) getfromextraq(); writedesc->type = CACHE_EVENT_SYNC; writedesc->donefunc = donefunc; writedesc->doneparam = doneparam; writedesc->req = req; req->type = IO_REQUEST_ARRIVE; req->next = NULL; req->prev = NULL; flushreq = ioreq_copy(req); flushreq->type = IO_ACCESS_ARRIVE; flushreq->buf = cache; } if (cache->size == 0) { cache->stat.destagewrites++; cache->stat.destagewriteatoms += flushreq->bcount; cache_waitfor_IO(cache, 1, writedesc, flushreq); (*cache->issuefunc)(cache->issueparam, flushreq); return(1); } // fprintf (outputfile, "flushblkno %d, reqblkno %d, atomsperbit %d\n", flushblkno, req->blkno, cache->atomsperbit); flushblkno -= (req->blkno % cache->atomsperbit); flushbcount += (req->blkno % cache->atomsperbit); i = flushblkno + flushbcount; flushbcount += rounduptomult(i, cache->atomsperbit) - i; // fprintf (outputfile, "in free_block_dirty: flushblkno %d, flushsize %d\n", flushblkno, flushbcount); for (i=0; i<flushbcount; i++) { if (line == NULL) { if ((lockgran) && (writethru) && ((cache->write_line_by_line) || (!cache_concatok(cache, flushblkno, 1, (flushblkno+1), i)))) { flushbcount = i; linebyline = 1; break; } line = cache_find_atom(cache, req->devno, (flushblkno + i)); /* dirtied space must be allocated */ ASSERT(line != NULL); cache_access(cache, line); } if (!writethru) { line->busno = req->busno; line->slotno = req->slotno; } line->state |= (writethru) ? CACHE_VALID : (CACHE_VALID|CACHE_DIRTY); if (((line->lbn % cache->lockgran) != (cache->lockgran-1)) && (i != (flushbcount-1))) { } else if (writethru) { lockgran += cache_get_read_lock(cache, line, writedesc); } else { lockgran += cache_free_write_lock(cache, line, req); } line = line->line_next; } /* locks must be held over entire space */ ASSERT2((lockgran >= flushbcount), "lockgran", lockgran, "flushbcount", flushbcount); if (writethru) { cache->stat.destagewrites++; cache->stat.destagewriteatoms += flushbcount; flushreq->blkno = flushblkno; flushreq->bcount = flushbcount; if (linebyline) { cache_write_line_by_line(cache, flushreq, writedesc, 0); } cache_waitfor_IO(cache, 1, writedesc, flushreq); // fprintf (outputfile, "Issueing dirty block flush: writedesc %p, req %p, blkno %d, bcount %d, devno %d\n", writedesc, writedesc->req, flushreq->blkno, flushreq->bcount, flushreq->devno); (*cache->issuefunc)(cache->issueparam, flushreq); if (cache->writescheme == CACHE_WRITE_SYNCONLY) { return(1); } else { /* Assuming that it is safe to touch it after call to cache_waitfor_IO */ req->type = -1; writedesc->donefunc = &disksim->donefunc_cachemem_empty; req = ioreq_copy(req); } } else if (cache->flush_idledelay >= 0.0) { ioqueue_reset_idledetecter((*cache->queuefind)(cache->queuefindparam, req->devno), 0); } (*donefunc)(doneparam, req); return(0);}static void cache_write_line_by_line (struct cache_mem *cache, ioreq_event *flushreq, struct cache_mem_event *writedesc, int reqdone){ struct cache_mem_event *tmp = cache->partwrites; while ((tmp) && (tmp->req != writedesc->req)) { tmp = tmp->next; } if (tmp == NULL) { /* partial write sync must have been initiated if it is done */ ASSERT(!reqdone); tmp = (struct cache_mem_event *) getfromextraq(); tmp->req = writedesc->req; tmp->locktype = writedesc->req->blkno; tmp->lockstop = writedesc->req->bcount; tmp->next = cache->partwrites; tmp->prev = NULL; if (tmp->next) { tmp->next->prev = tmp; } cache->partwrites = tmp; } if (reqdone) { tmp->req->bcount = tmp->accblkno - flushreq->blkno; tmp->req->blkno = flushreq->blkno; tmp->req->type = 0; cachemem_free_block_clean((struct cache_if *)cache, tmp->req); if (tmp->accblkno >= (tmp->locktype + tmp->lockstop)) { if (tmp->prev) { tmp->prev->next = tmp->next; } else { cache->partwrites = tmp->next; } if (tmp->next) { tmp->next->prev = tmp->prev; } tmp->req->blkno = tmp->locktype; tmp->req->bcount = tmp->lockstop; (*writedesc->donefunc)(writedesc->doneparam, tmp->req); addtoextraq((event *) tmp); } else { tmp->req->bcount = tmp->locktype + tmp->lockstop - tmp->accblkno; tmp->req->blkno = tmp->accblkno; cache->linebylinetmp = 1; cachemem_free_block_dirty((struct cache_if *)cache, tmp->req, writedesc->donefunc, writedesc->doneparam); } addtoextraq((event *) writedesc); } else { writedesc->type = CACHE_EVENT_SYNCPART; tmp->accblkno = flushreq->blkno + flushreq->bcount; }}static int cachemem_sync (struct cache_if *c){ return(0);}static void *cachemem_disk_access_complete (struct cache_if *c, ioreq_event *curr){ struct cache_mem *cache = (struct cache_mem *)c; ioreq_event *req; struct cache_mem_event *tmp = cache->IOwaiters; // fprintf (outputfile, "Entered cache_disk_access_complete: blkno %d, bcount %d, devno %d\n", curr->blkno, curr->bcount, curr->devno); while (tmp) { req = tmp->req; while (req) { if ((curr->devno == req->devno) && ((curr->blkno == tmp->accblkno) || ((tmp->accblkno == -1) && ((req->next) || (tmp->type == CACHE_EVENT_SYNC) || (tmp->type == CACHE_EVENT_IDLESYNC)) && (curr->blkno == req->blkno)))) { // fprintf (outputfile, "Matched: tmp %p, req %p, blkno %d, accblkno %d, reqblkno %d\n", tmp, req, curr->blkno, tmp->accblkno, req->blkno); goto completed_access; } req = req->next; } tmp = tmp->next; }completed_access: if (tmp == NULL) { fprintf(stderr, "Not yet supporting non-waited for disk accesses in cache\n"); exit(1); } if (tmp->prev) { tmp->prev->next = tmp->next; } else { cache->IOwaiters = tmp->next; } if (tmp->next) { tmp->next->prev = tmp->prev; }/*fprintf (outputfile, "IOwaiters: %x, tmp->prev %x, tmp->next %x, 3 %d\n", cache->IOwaiters, tmp->prev, tmp->next, 3);*/ if ((cache->size == 0) || (tmp->type == CACHE_EVENT_SYNC) || (tmp->type == CACHE_EVENT_IDLESYNC)) { int type = req->type; if (req->next) { req->next->prev = req->prev; } if (req->prev) { req->prev->next = req->next; } else { tmp->req = req->next; } req->type = 0; cachemem_free_block_clean((struct cache_if *)cache, req); req->type = type; if (type != -1) { (*tmp->donefunc)(tmp->doneparam, req); } if (tmp->req) { cache_waitfor_IO(cache, 1, tmp, tmp->req); tmp->accblkno = (tmp->req->next) ? -1 : tmp->req->blkno; tmp = (type == -1) ? (struct cache_mem_event *) event_copy((event *)tmp) : NULL; } else { if (tmp->type == CACHE_EVENT_IDLESYNC) { cache_idletime_detected(cache, curr->devno); } if (type != -1) { addtoextraq((event *) tmp); tmp = NULL; } } if (type == -1) { tmp->req = req; } } else if (tmp->type == CACHE_EVENT_READ) { } else if (tmp->type == CACHE_EVENT_WRITE) { } else if (tmp->type == CACHE_EVENT_SYNCPART) { curr->next = tmp->req; tmp->req= curr; curr = NULL; } else if (tmp->type == CACHE_EVENT_ALLOCATE) { /* Must be a replacement-induced write-back */ req->next->prev = req->prev; if (req->prev) { req->prev->next = req->next; } else { tmp->req = req->next; } req->type = 0; cachemem_free_block_clean((struct cache_if *)cache, req); addtoextraq((event *) req); if (tmp->req != tmp->waitees->req) { cache_waitfor_IO(cache, 1, tmp, tmp->req); tmp->accblkno = -1; tmp = NULL; } } else if (tmp->type == CACHE_EVENT_READEXTRA) { tmp->type = CACHE_EVENT_READ; cache_unlock_attached_prefetch(cache, tmp); } else if (tmp->type == CACHE_EVENT_WRITEFILLEXTRA) { tmp->type = CACHE_EVENT_WRITE; cache_unlock_attached_prefetch(cache, tmp); } else { fprintf(stderr, "Unknown type at cache_disk_access_complete: %d\n", tmp->type); exit(1); } addtoextraq((event *) curr); return(tmp);}static void cachemem_wakeup_complete (struct cache_if *c, void *d){ struct cache_mem *cache = (struct cache_mem *)c; struct cache_mem_event *desc = (struct cache_mem_event *)d; switch(desc->type) { case CACHE_EVENT_READ: cache_read_c
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -