📄 disksim_cachemem.c
字号:
return(2); } j++; tmp = tmp->line_next; while ((tmp) && (tmp->lbn % lockgran)) { j++; tmp = tmp->line_next; } } } return(0);}static int cache_issue_fillreq (struct cache_mem *cache, int start, int end, struct cache_mem_event *rwdesc, int prefetchtype){ ioreq_event *fillreq; int linesize = max(cache->linesize, 1); // fprintf (outputfile, "Entered cache_issue_fillreq: start %d, end %d, prefetchtype %d\n", start, end, prefetchtype); if (prefetchtype & CACHE_PREFETCH_FRONTOFLINE) { cache_atom *line = cache_find_atom(cache, rwdesc->req->devno, start); int validstart = -1; int lockgran = cache->lockgran; while (start % linesize) { line = line->line_prev; if (line->state & CACHE_VALID) {/*fprintf (outputfile, "already valid backwards: lbn %d\n", line->lbn);*/ break;/* if (line->state & CACHE_DIRTY) { break; } if (validstart == -1) { validstart = line->lbn; }*/ } else { validstart = -1; } if ((line->lbn % lockgran) == (lockgran-1)) { if ((!cache->prefetch_waitfor_locks) && (cache_atom_islocked(cache, line))) { break; } if ((lockgran = cache_get_write_lock(cache, line, rwdesc)) == 0) { return(0); } } start--; line->state |= CACHE_VALID; }/* Need to free some locks if do this... if (validstart != -1) { start = validstart; }*/ } if (prefetchtype & CACHE_PREFETCH_RESTOFLINE) { cache_atom *line = cache_find_atom(cache, rwdesc->req->devno, end); int validend = -1; int lockgran = cache->lockgran; while ((end+1) % linesize) { line = line->line_next; if (line->state & CACHE_VALID) {/*fprintf (outputfile, "already valid forwards: lbn %d\n", line->lbn);*/ break;/* if (line->state & CACHE_DIRTY) { break; } if (validend == -1) { validend = line->lbn; }*/ } else { validend = -1; } if ((line->lbn % lockgran) == 0) { if ((!cache->prefetch_waitfor_locks) && (cache_atom_islocked(cache, line))) { break; } if ((lockgran = cache_get_write_lock(cache, line, rwdesc)) == 0) { return(0); } } end++; line->state |= CACHE_VALID; }/* Need to free some locks if do this... if (validend != -1) { end = validend; }*/ } fillreq = ioreq_copy(rwdesc->req); fillreq->blkno = start; fillreq->bcount = end - start + 1; fillreq->type = IO_ACCESS_ARRIVE; fillreq->flags |= READ; rwdesc->req->tempint1 = start; rwdesc->req->tempint2 = end; rwdesc->type = (rwdesc->type == CACHE_EVENT_READ) ? CACHE_EVENT_READEXTRA : CACHE_EVENT_WRITEFILLEXTRA; cache_waitfor_IO(cache, 1, rwdesc, fillreq); // fprintf (outputfile, "%f: Issueing line fill request: blkno %d, bcount %d\n", simtime, fillreq->blkno, fillreq->bcount); (*cache->issuefunc)(cache->issueparam, fillreq); return(end - start + 1);}static void cache_unlock_attached_prefetch (struct cache_mem *cache, struct cache_mem_event *rwdesc){ int fillstart = rwdesc->req->tempint1; int fillend = rwdesc->req->tempint2 + 1; /* one beyond, actually */ int reqstart = rwdesc->req->blkno; int reqend = reqstart + rwdesc->req->bcount; /* one beyond, actually */ // fprintf (outputfile, "Entered cache_unlock_attached_prefetch: fillstart %d, fillend %d, reqstart %d, reqend %d\n", fillstart, fillend, reqstart, reqend); if (fillstart < reqstart) { int lockgran = cache->lockgran; while (reqstart % cache->lockgran) { reqstart--; } reqstart--; if (fillstart <= reqstart) { cache_atom *line = cache_find_atom(cache, rwdesc->req->devno, reqstart); do { if ((line->lbn % lockgran) == (lockgran-1)) { lockgran = cache_free_write_lock(cache, line, rwdesc->req); /* Can't free lock if not held */ ASSERT(lockgran != 0); } line = line->line_prev; reqstart--; } while (fillstart <= reqstart); } } if (fillend > reqend) { int lockgran = cache->lockgran; while (reqend % cache->lockgran) { reqend++; }/* if ((fillend / cache->lockgran) == (reqend / cache->lockgran)) { } else { reqend++; }*/ if (fillend > reqend) { cache_atom *line = cache_find_atom(cache, rwdesc->req->devno, reqend); do { if ((line->lbn % lockgran) == 0) { lockgran = cache_free_write_lock(cache, line, rwdesc->req); /* Can't free lock if not held */ ASSERT(lockgran != 0); } line = line->line_next; reqend++; } while (fillend > reqend); } }}static int cache_read_continue (struct cache_mem *cache, struct cache_mem_event *readdesc){ cache_atom *line = NULL; cache_atom *tmp; int i, j; struct cache_mem_event *waitee; int stop = 0; int curlock; int lockgran; int ret; int linesize = max(1, cache->linesize); int devno = readdesc->req->devno; int lbn = readdesc->req->blkno; int size = readdesc->req->bcount; int validpoint = readdesc->validpoint; if (cache->size == 0) { cache_waitfor_IO(cache, 1, readdesc, readdesc->req); cache->stat.readmisses++; cache->stat.fillreads++; cache->stat.fillreadatoms += readdesc->req->bcount; readdesc->req->type = IO_ACCESS_ARRIVE; (*cache->issuefunc)(cache->issueparam, ioreq_copy(readdesc->req)); return(1); } i = readdesc->lockstop; // fprintf (outputfile, "Entered cache_read_continue: lbn %d, size %d, i %d\n", lbn, size, i);read_cont_loop: while (i < size) { line = cache_find_atom(cache, devno, (lbn + i)); waitee = NULL; if (line == NULL) { if ((waitee = cache_allocate_space(cache, (lbn + i), 1, readdesc))) { readdesc->lockstop = i; return(1); } else { continue; } } stop = min(rounduptomult((size - i), cache->atomsperbit), (linesize - ((lbn + i) % linesize))); // fprintf (outputfile, "stop %d, lbn %d, atomsperbit %d, i %d, size %d, linesize %d\n", stop, lbn, cache->atomsperbit, i, size, linesize); // fprintf (outputfile, "validpoint %d, i %d\n", validpoint, i); j = 0; tmp = line; curlock = 2; lockgran = 0; while (j < stop) { int locktype = (tmp->state & CACHE_VALID) ? 1 : 2; // fprintf (outputfile, "j %d, valid %d, validpoint %d, curlock %d, lockgran %d\n", j, (tmp->state & CACHE_VALID), validpoint, curlock, lockgran); if (locktype > curlock) { curlock = locktype; lockgran = 0; locktype = 3; } else { curlock = locktype; } if ((lockgran) && ((lbn+i+j) % lockgran)) { } else if ((ret = cache_get_rw_lock(cache, locktype, readdesc, tmp, (i+j), 1))) { // fprintf (outputfile, "Non-zero return from cache_get_rw_lock: %d\n", ret); if (ret == 1) { readdesc->lockstop = i + j; return(1); } else { /* ret == 2, indicating that identity changed */ goto read_cont_loop; } } lockgran = cache->lockgran; if ((tmp->state & CACHE_VALID) == 0) { tmp->state |= CACHE_VALID; if (validpoint == -1) { validpoint = tmp->lbn; readdesc->validpoint = validpoint; } /* Possibly begin filling (one at a time) ?? */ } else { if (validpoint != -1) { /* start fill of partial line */ readdesc->allocstop |= 2; cache->stat.fillreads++; readdesc->lockstop = - (readdesc->req->blkno % cache->atomsperbit); // fprintf (outputfile, "Going to issue_fillreq on partial line\n"); cache->stat.fillreadatoms += cache_issue_fillreq(cache, validpoint, (tmp->lbn - 1), readdesc, cache->read_prefetch_type); readdesc->validpoint = -1; return(1); } } tmp = tmp->line_next; j++; } if ((validpoint != -1) && ((cache->read_line_by_line) || (!cache_concatok(cache, validpoint, 1, (validpoint+1), (line->lbn + stop - validpoint))))) { /* Start fill of the line */ readdesc->allocstop |= 1; cache->stat.fillreads++; // fprintf (outputfile, "Going to issue_fillreq on full line\n"); cache->stat.fillreadatoms += cache_issue_fillreq(cache, validpoint, (line->lbn + stop - 1), readdesc, cache->read_prefetch_type); readdesc->validpoint = -1; return(1); } i += linesize - ((lbn + i) % linesize); //fprintf (outputfile, "validpoint %d, i %d\n", validpoint, i); } if (validpoint != -1) { /* Do the fill if necessary */ readdesc->allocstop |= 1; cache->stat.fillreads++; /* reset to what was in beginning */ readdesc->lockstop = - (readdesc->req->blkno % cache->atomsperbit); readdesc->validpoint = -1; // fprintf (outputfile, "Going to issue_fillreq on full request\n"); cache->stat.fillreadatoms += cache_issue_fillreq(cache, validpoint, (line->lbn + stop - 1), readdesc, cache->read_prefetch_type); return(1); } cache->stat.getblockreaddones++; cache->stat.reads++; cache->stat.readatoms += readdesc->req->bcount; if (readdesc->allocstop) { cache->stat.readmisses++; } else { cache->stat.readhitsfull++; } if (readdesc->flags & CACHE_FLAG_WASBLOCKED) { /* callback to say done */ (*readdesc->donefunc)(readdesc->doneparam, readdesc->req); addtoextraq((event *) readdesc); } return(0);}static int cache_write_continue (struct cache_mem *cache, struct cache_mem_event *writedesc){ int stop; struct cache_mem_event *waitee; cache_atom *line; cache_atom *tmp; int lockgran; int i, j; int startfillstart; int startfillstop = 0; int endfillstart; int endfillstop = 0; int ret; int devno = writedesc->req->devno; int lbn = writedesc->req->blkno; int size = writedesc->req->bcount; int linesize = (cache->linesize > 1) ? cache->linesize : 1; if (cache->size == 0) { return(0); } i = writedesc->lockstop; // fprintf (outputfile, "Entered cache_write_continue: lbn %d, size %d, i %d\n", lbn, size, i);write_cont_loop: while (i < size) { line = cache_find_atom(cache, devno, (lbn + i)); waitee = NULL; if (line == NULL) { if (cache->no_write_allocate) { /* track non-resident part and continue */ fprintf(stderr, "Not yet handling write-no-allocate\n"); exit(1); } else { if ((waitee = cache_allocate_space(cache, (lbn + i), 1, writedesc))) { writedesc->lockstop = i; return(1); } else { continue; } } } stop = min(rounduptomult((size - i), cache->atomsperbit), (linesize - ((lbn + i) % linesize))); j = 0; tmp = line; lockgran = 0; startfillstart = -1; endfillstart = -1; while (j < stop) { if ((lockgran) && ((lbn+i+j) % lockgran)) { } else if ((ret = cache_get_rw_lock(cache, 2, writedesc, tmp, (i+j), 1))) { if (ret == 1) { writedesc->lockstop = i; return(1); } else { /* ret == 2, indicates that line changed identity */ goto write_cont_loop; } } lockgran = cache->lockgran; if ((tmp->lbn < lbn) && ((tmp->state & CACHE_VALID) == 0)) { writedesc->allocstop |= 2; tmp->state |= CACHE_VALID; if (startfillstart == -1) { startfillstart = tmp->lbn; } startfillstop = tmp->lbn; } else if ((tmp->state & CACHE_VALID) == 0) { int tmpval = tmp->lbn - (lbn + size - 1); writedesc->allocstop |= 2; if ((tmpval > 0) && (tmpval < (cache->atomsperbit - ((lbn + size - 1) % cache->atomsperbit)))) { tmp->state |= CACHE_VALID; if (endfillstart == -1) { endfillstart = tmp->lbn; } endfillstop = tmp->lbn; } } else if (tmp->state & CACHE_DIRTY) { writedesc->allocstop |= 4; } tmp = tmp->line_next; j++; } /* if writing only part of space covered by valid/dirty bit, read */ /* (fill) first -- flag undo of allocation to bypass (no bypass for now */ if ((startfillstart != -1) || (endfillstart != -1)) { int fillblkno = (startfillstart != -1) ? startfillstart : endfillstart; int fillbcount = 1 - fillblkno; fillbcount += ((startfillstart != -1) && (endfillstart == -1)) ? startfillstop : endfillstop; cache->stat.writeinducedfills++; // fprintf (outputfile, "Write induced fill: blkno %d, bcount %d\n", fillblkno, fillbcount);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -