📄 disksim_cachemem.c
字号:
// fprintf (outputfile, "j %d, valid %d, validpoint %d, curlock %d, lockgran %d\n", j, (tmp->state & CACHE_VALID), validpoint, curlock, lockgran);
if (locktype > curlock) {
curlock = locktype;
lockgran = 0;
locktype = 3;
} else {
curlock = locktype;
}
if ((lockgran) && ((lbn+i+j) % lockgran)) {
} else if ((ret = cache_get_rw_lock(cache, locktype, readdesc, tmp, (i+j), 1))) {
// fprintf (outputfile, "Non-zero return from cache_get_rw_lock: %d\n", ret);
if (ret == 1) {
readdesc->lockstop = i + j;
return(1);
} else { /* ret == 2, indicating that identity changed */
goto read_cont_loop;
}
}
lockgran = cache->lockgran;
if ((tmp->state & CACHE_VALID) == 0) {
tmp->state |= CACHE_VALID;
if (validpoint == -1) {
validpoint = tmp->lbn;
readdesc->validpoint = validpoint;
}
/* Possibly begin filling (one at a time) ?? */
} else {
if (validpoint != -1) {
/* start fill of partial line */
readdesc->allocstop |= 2;
cache->stat.fillreads++;
readdesc->lockstop = - (readdesc->req->blkno % cache->atomsperbit);
// fprintf (outputfile, "Going to issue_fillreq on partial line\n");
cache->stat.fillreadatoms += cache_issue_fillreq(cache, validpoint, (tmp->lbn - 1), readdesc, cache->read_prefetch_type);
readdesc->validpoint = -1;
return(1);
}
}
tmp = tmp->line_next;
j++;
}
if ((validpoint != -1) && ((cache->read_line_by_line) || (!cache_concatok(cache, validpoint, 1, (validpoint+1), (line->lbn + stop - validpoint))))) {
/* Start fill of the line */
readdesc->allocstop |= 1;
cache->stat.fillreads++;
// fprintf (outputfile, "Going to issue_fillreq on full line\n");
cache->stat.fillreadatoms += cache_issue_fillreq(cache, validpoint, (line->lbn + stop - 1), readdesc, cache->read_prefetch_type);
readdesc->validpoint = -1;
return(1);
}
i += linesize - ((lbn + i) % linesize);
//fprintf (outputfile, "validpoint %d, i %d\n", validpoint, i);
}
if (validpoint != -1) {
/* Do the fill if necessary */
readdesc->allocstop |= 1;
cache->stat.fillreads++;
/* reset to what was in beginning */
readdesc->lockstop = - (readdesc->req->blkno % cache->atomsperbit);
readdesc->validpoint = -1;
// fprintf (outputfile, "Going to issue_fillreq on full request\n");
cache->stat.fillreadatoms += cache_issue_fillreq(cache, validpoint, (line->lbn + stop - 1), readdesc, cache->read_prefetch_type);
return(1);
}
cache->stat.getblockreaddones++;
cache->stat.reads++;
cache->stat.readatoms += readdesc->req->bcount;
if (readdesc->allocstop) {
cache->stat.readmisses++;
} else {
cache->stat.readhitsfull++;
}
if (readdesc->flags & CACHE_FLAG_WASBLOCKED) {
/* callback to say done */
(*readdesc->donefunc)(readdesc->doneparam, readdesc->req);
addtoextraq((event *) readdesc);
}
return(0);
}
static int cache_write_continue (cache_def *cache, cache_event *writedesc)
{
int stop;
cache_event *waitee;
cache_atom *line;
cache_atom *tmp;
int lockgran;
int i, j;
int startfillstart;
int startfillstop = 0;
int endfillstart;
int endfillstop = 0;
int ret;
int devno = writedesc->req->devno;
int lbn = writedesc->req->blkno;
int size = writedesc->req->bcount;
int linesize = (cache->linesize > 1) ? cache->linesize : 1;
if (cache->size == 0) {
return(0);
}
i = writedesc->lockstop;
// fprintf (outputfile, "Entered cache_write_continue: lbn %d, size %d, i %d\n", lbn, size, i);
write_cont_loop:
while (i < size) {
line = cache_find_atom(cache, devno, (lbn + i));
waitee = NULL;
if (line == NULL) {
if (cache->no_write_allocate) {
/* track non-resident part and continue */
fprintf(stderr, "Not yet handling write-no-allocate\n");
exit(1);
} else {
if ((waitee = cache_allocate_space(cache, (lbn + i), 1, writedesc))) {
writedesc->lockstop = i;
return(1);
} else {
continue;
}
}
}
stop = min(rounduptomult((size - i), cache->atomsperbit), (linesize - ((lbn + i) % linesize)));
j = 0;
tmp = line;
lockgran = 0;
startfillstart = -1;
endfillstart = -1;
while (j < stop) {
if ((lockgran) && ((lbn+i+j) % lockgran)) {
} else if ((ret = cache_get_rw_lock(cache, 2, writedesc, tmp, (i+j), 1))) {
if (ret == 1) {
writedesc->lockstop = i;
return(1);
} else { /* ret == 2, indicates that line changed identity */
goto write_cont_loop;
}
}
lockgran = cache->lockgran;
if ((tmp->lbn < lbn) && ((tmp->state & CACHE_VALID) == 0)) {
writedesc->allocstop |= 2;
tmp->state |= CACHE_VALID;
if (startfillstart == -1) {
startfillstart = tmp->lbn;
}
startfillstop = tmp->lbn;
} else if ((tmp->state & CACHE_VALID) == 0) {
int tmpval = tmp->lbn - (lbn + size - 1);
writedesc->allocstop |= 2;
if ((tmpval > 0) && (tmpval < (cache->atomsperbit - ((lbn + size - 1) % cache->atomsperbit)))) {
tmp->state |= CACHE_VALID;
if (endfillstart == -1) {
endfillstart = tmp->lbn;
}
endfillstop = tmp->lbn;
}
} else if (tmp->state & CACHE_DIRTY) {
writedesc->allocstop |= 4;
}
tmp = tmp->line_next;
j++;
}
/* if writing only part of space covered by valid/dirty bit, read */
/* (fill) first -- flag undo of allocation to bypass (no bypass for now */
if ((startfillstart != -1) || (endfillstart != -1)) {
int fillblkno = (startfillstart != -1) ? startfillstart : endfillstart;
int fillbcount = 1 - fillblkno;
fillbcount += ((startfillstart != -1) && (endfillstart == -1)) ? startfillstop : endfillstop;
cache->stat.writeinducedfills++;
// fprintf (outputfile, "Write induced fill: blkno %d, bcount %d\n", fillblkno, fillbcount);
cache->stat.writeinducedfillatoms += cache_issue_fillreq(cache, fillblkno, (fillblkno + fillbcount - 1), writedesc, cache->writefill_prefetch_type);
return(1);
}
i += linesize - ((lbn + i) % linesize);
}
cache->stat.writes++;
cache->stat.writeatoms += writedesc->req->bcount;
cache->stat.getblockwritedones++;
if (writedesc->allocstop & 4) {
cache->stat.writehitsdirty++;
} else if (writedesc->allocstop) {
cache->stat.writemisses++;
} else {
cache->stat.writehitsclean++;
}
if (writedesc->flags & CACHE_FLAG_WASBLOCKED) {
/* callback */
(*writedesc->donefunc)(writedesc->doneparam, writedesc->req);
addtoextraq((event *) writedesc);
}
return(0);
}
/* Gets the appropriate block, locked and ready to be accessed read or write */
int cachemem_get_block (cache_def *cache, ioreq_event *req, void (**donefunc)(void *, ioreq_event *), void *doneparam)
{
cache_event *rwdesc = (cache_event *) getfromextraq();
int ret;
// fprintf (outputfile, "totalreqs = %d\n", disksim->totalreqs);
// fprintf (outputfile, "%.5f: Entered cache_get_block: rw %d, devno %d, blkno %d, size %d\n", simtime, (req->flags & READ), req->devno, req->blkno, req->bcount);
rwdesc->type = (req->flags & READ) ? CACHE_EVENT_READ : CACHE_EVENT_WRITE;
rwdesc->donefunc = donefunc;
rwdesc->doneparam = doneparam;
rwdesc->req = req;
req->next = NULL;
req->prev = NULL;
rwdesc->validpoint = -1;
rwdesc->lockstop = - (req->blkno % cache->atomsperbit);
rwdesc->allocstop = 0; /* overload -- use for determining hit type */
rwdesc->flags = 0;
if (req->flags & READ) {
cache->stat.getblockreadstarts++;
ret = cache_read_continue(cache, rwdesc);
} else {
cache->stat.getblockwritestarts++;
ret = cache_write_continue(cache, rwdesc);
}
// fprintf (outputfile, "rwdesc %p, ret %x, validpoint %d\n", rwdesc, ret, rwdesc->validpoint);
if (ret == 0) {
(*donefunc)(doneparam, req);
addtoextraq((event *) rwdesc);
} else {
rwdesc->flags |= CACHE_FLAG_WASBLOCKED;
}
return(ret);
}
/* frees the block after access complete, block is clean so remove locks */
/* and update lru */
void cachemem_free_block_clean (cache_def *cache, ioreq_event *req)
{
cache_atom *line = NULL;
int lockgran = 0;
int i;
// fprintf (outputfile, "%.5f: Entered cache_free_block_clean: blkno %d, bcount %d, devno %d\n", simtime, req->blkno, req->bcount, req->devno);
cache->stat.freeblockcleans++;
if (cache->size == 0) {
return;
}
for (i=0; i<req->bcount; i++) {
if (line == NULL) {
line = cache_find_atom(cache, req->devno, (req->blkno + i));
/* Can't free unallocated space */
ASSERT(line != NULL);
if (req->type) {
cache_access(cache, line);
}
}
if (((line->lbn % cache->lockgran) == (cache->lockgran-1)) || (i == (req->bcount-1))) {
lockgran += cache_free_read_lock(cache, line, req);
}
line = line->line_next;
}
/* Must have unlocked entire requests worth of data */
ASSERT2((lockgran >= req->bcount), "lockgran", lockgran, "reqbcount", req->bcount);
}
static void cache_write_line_by_line (cache_def *cache, ioreq_event *flushreq, cache_event *writedesc, int reqdone)
{
cache_event *tmp = cache->partwrites;
while ((tmp) && (tmp->req != writedesc->req)) {
tmp = tmp->next;
}
if (tmp == NULL) {
/* partial write sync must have been initiated if it is done */
ASSERT(!reqdone);
tmp = (cache_event *) getfromextraq();
tmp->req = writedesc->req;
tmp->locktype = writedesc->req->blkno;
tmp->lockstop = writedesc->req->bcount;
tmp->next = cache->partwrites;
tmp->prev = NULL;
if (tmp->next) {
tmp->next->prev = tmp;
}
cache->partwrites = tmp;
}
if (reqdone) {
tmp->req->bcount = tmp->accblkno - flushreq->blkno;
tmp->req->blkno = flushreq->blkno;
tmp->req->type = 0;
cachemem_free_block_clean(cache, tmp->req);
if (tmp->accblkno >= (tmp->locktype + tmp->lockstop)) {
if (tmp->prev) {
tmp->prev->next = tmp->next;
} else {
cache->partwrites = tmp->next;
}
if (tmp->next) {
tmp->next->prev = tmp->prev;
}
tmp->req->blkno = tmp->locktype;
tmp->req->bcount = tmp->lockstop;
(*writedesc->donefunc)(writedesc->doneparam, tmp->req);
addtoextraq((event *) tmp);
} else {
tmp->req->bcount = tmp->locktype + tmp->lockstop - tmp->accblkno;
tmp->req->blkno = tmp->accblkno;
cache->linebylinetmp = 1;
cachemem_free_block_dirty(cache, tmp->req, writedesc->donefunc, writedesc->doneparam);
}
addtoextraq((event *) writedesc);
} else {
writedesc->type = CACHE_EVENT_SYNCPART;
tmp->accblkno = flushreq->blkno + flushreq->bcount;
}
}
/* a delayed write - set dirty bits, remove locks and update lru. */
/* If cache doesn't allow delayed writes, forward this to async */
int cachemem_free_block_dirty (cache_def *cache, ioreq_event *req, void (**donefunc)(void *, ioreq_event *), void *doneparam)
{
cache_atom *line = NULL;
ioreq_event *flushreq = 0;
cache_event *writedesc = 0;
int lockgran = 0;
int flushblkno = req->blkno;
int flushbcount = req->bcount;
int linebyline = cache->linebylinetmp;
int i;
int writethru = (cache->size == 0) || (cache->writescheme != CACHE_WRITE_BACK);
// fprintf (outputfile, "%.5f, Entered cache_free_block_dirty: blkno %d, size %d, writethru %d\n", simtime, req->blkno, req->bcount, writethru);
cache->linebylinetmp = 0;
cache->stat.freeblockdirtys++;
if (writethru) {
writedesc = (cache_event *) getfromextraq();
writedesc->type = CACHE_EVENT_SYNC;
writedesc->donefunc = donefunc;
writedesc->doneparam = doneparam;
writedesc->req = req;
req->type = IO_REQUEST_ARRIVE;
req->next = NULL;
req->prev = NULL;
flushreq = ioreq_copy(req);
flushreq->type = IO_ACCESS_ARRIVE;
flushreq->buf = cache;
}
if (cache->size == 0) {
cache->stat.destagewrites++;
cache->stat.destagewriteatoms += flushreq->bcount;
cache_waitfor_IO(cache, 1, writedesc, flushreq);
(*cache->issuefunc)(cache->issueparam, flushreq);
return(1);
}
// fprintf (outputfile, "flushblkno %d, reqblkno %d, atomsperbit %d\n", flushblkno, req->blkno, cache->atomsperbit);
flushblkno -= (req->blkno % cache->atomsperbit);
flushbcount += (req->blkno % cache->atomsperbit);
i = flushblkno + flushbcount;
flushbcount += rounduptomult(i, cache->atomsperbit) - i;
// fprintf (outputfile, "in free_block_dirty: flushblkno %d, flushsize %d\n", flushblkno, flushbcount);
for (i=0; i<flushbcount; i++) {
if (line == NULL) {
if ((lockgran) && (writethru) && ((cache->write_line_by_line) || (!cache_concatok(cache, flushblkno, 1, (flushblkno+1), i)))) {
flushbcount = i;
linebyline = 1;
break;
}
line = cache_find_atom(cache, req->devno, (flushblkno + i));
/* dirtied space must be allocated */
ASSERT(line != NULL);
cache_access(cache, line);
}
if (!writethru) {
line->busno = req->busno;
line->slotno = req->slotno;
}
line->state |= (writethru) ? CACHE_VALID : (CACHE_VALID|CACHE_DIRTY);
if (
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -