📄 disksim_cachemem.c
字号:
dirtystart = cache_flush_cluster(cache, dirtyatom->devno, dirtystart, 1, -1); } if (cache_issue_flushreq(cache, dirtystart, dirtyend, dirtyatom, allocdesc) == 0) { return(flushcnt); } dirtystart = -1; flushcnt++; } tmp = tmp->line_next; } if (dirtystart != -1) { int linesize = max(cache->linesize, 1); int linecnt; if ((cache->flush_maxlinecluster > 1) && (dirtystart == dirtyline->lbn)) { dirtystart = cache_flush_cluster(cache, dirtyatom->devno, dirtystart, 1, -1); } linecnt = 1 + ((dirtyline->lbn - dirtystart) / linesize); if ((linecnt < cache->flush_maxlinecluster) && (dirtyend == (dirtyline->lbn + linesize -1))) { dirtyend = cache_flush_cluster(cache, dirtyatom->devno, dirtyend, linecnt, 1); } flushcnt += cache_issue_flushreq(cache, dirtystart, dirtyend, dirtyatom, allocdesc); } // fprintf (outputfile, "flushcnt %d\n", flushcnt); return(flushcnt);}static struct cache_mem_event *cache_get_flushdesc(){ struct cache_mem_event *flushdesc = (struct cache_mem_event *) getfromextraq(); flushdesc->type = CACHE_EVENT_SYNC; flushdesc->donefunc = &disksim->donefunc_cachemem_empty; flushdesc->req = NULL; return(flushdesc);}static void cache_cleanup_flushdesc (struct cache_mem_event *flushdesc){ if (flushdesc->req) { if (flushdesc->req->next == NULL) { flushdesc->accblkno = flushdesc->req->blkno; } } else { addtoextraq((event *) flushdesc); }}/* Not currently dealing with case of two-handed flushing. Easiest way to *//* do this will be to allocate the cache as one big chunk of memory. Then,*//* use the addresses of cache_atoms rather than the pointers to traverse. */static void cache_periodic_flush (timer_event *timereq){ struct cache_mem *cache = (struct cache_mem *) timereq->ptr; int segcnt = (cache->replacepolicy == CACHE_REPLACE_SLRU) ? cache->numsegs : 1; int i, j; cache_atom *line; cache_atom *stop; cache_atom *tmp; struct cache_mem_event *flushdesc = cache_get_flushdesc(); int flushcnt = 0; int startit; for (i=0; i<=cache->mapmask; i++) { for (j=0; j<segcnt; j++) { line = cache->map[i].lru[j]; stop = line; startit = 1; while ((startit) || (line != stop)) { startit = 0; tmp = line; while (tmp) { if (tmp->state & CACHE_DIRTY) { flushcnt += cache_initiate_dirty_block_flush(cache, tmp, flushdesc); } tmp = tmp->line_next; } line = line->lru_next; }; } } cache_cleanup_flushdesc(flushdesc); timereq->time += cache->flush_period; addtointq((event *)timereq); // fprintf (outputfile, "%f: cache_periodic_flush, %d flushes started\n", simtime, flushcnt);}static void cache_idletime_detected (void *idleworkparam, int idledevno){ struct cache_mem *cache = idleworkparam; cache_atom *line = cache_get_replace_startpoint(cache, 0); cache_atom *stop = line; cache_atom *tmp; int segcnt = (cache->replacepolicy == CACHE_REPLACE_SLRU) ? cache->numsegs : 1; int i; struct cache_mem_event *flushdesc; int startit; if (ioqueue_get_number_in_queue((*cache->queuefind)(cache->queuefindparam, idledevno))) { return; } flushdesc = cache_get_flushdesc(); flushdesc->type = CACHE_EVENT_IDLESYNC; for (i=0; i<segcnt; i++) { if (i) { line = cache->map[0].lru[i]; stop = line; } startit = 1; while ((startit) || (line != stop)) { startit = 0; if (line->devno == idledevno) { tmp = line; while (tmp) { if (tmp->state & CACHE_DIRTY) { (void)cache_initiate_dirty_block_flush(cache, tmp, flushdesc); if (flushdesc->req) { goto cache_idletime_detected_idleused; } } tmp = tmp->line_next; } } line = line->lru_next; } }cache_idletime_detected_idleused: cache_cleanup_flushdesc(flushdesc);}static void cache_unmap_line (struct cache_mem *cache, cache_atom *line, int set){ cache_atom *tmp; if (line->lru_next) { cache_remove_from_lrulist(&cache->map[set], line, (line->state & CACHE_SEGNUM)); } if (cache->linesize == 0) { while ((tmp = line)) { line = line->line_next; tmp->line_next = NULL; tmp->line_prev = NULL; cache_remove_entry_from_hash(cache, tmp); cache_add_to_lrulist(&cache->map[set], tmp, CACHE_SEGNUM); } } else { cache_add_to_lrulist(&cache->map[set], line, CACHE_SEGNUM); while (line) { cache_remove_entry_from_hash(cache, line); line = line->line_next; } }}static int cache_replace (struct cache_mem *cache, int set, struct cache_mem_event *allocdesc){ int numwrites; cache_atom *line; cache_atom *tmp; cache_atom *stop; int dirty = FALSE; int locked = FALSE; struct cache_mem_event *flushdesc = (cache->allocatepolicy & CACHE_ALLOCATE_NONDIRTY) ? NULL : allocdesc; if (cache->map[set].freelist) { return(0); } if ((line = cache_get_replace_startpoint(cache, set)) == NULL) { /* All lines between ownership */ cache_replace_waitforline(cache, allocdesc); return(-1); } stop = line;cache_replace_loop_continue: if (locked | dirty) { line = (cache->replacepolicy == CACHE_REPLACE_LIFO) ? line->lru_prev : line->lru_next; } if (line == stop) { if (locked) { if ((flushdesc) && (cache->allocatepolicy & CACHE_ALLOCATE_NONDIRTY)) { cache_cleanup_flushdesc(flushdesc); } cache_replace_waitforline(cache, allocdesc); return(-1); } } locked = FALSE; tmp = line; while (tmp) { if ((locked = (tmp->readlocks || tmp->writelock))) { goto cache_replace_loop_continue; } tmp = tmp->line_next; } dirty = FALSE; tmp = line; while (tmp) { if ((dirty = tmp->state & CACHE_DIRTY)) { if (flushdesc == NULL) { flushdesc = cache_get_flushdesc(); } numwrites = cache_initiate_dirty_block_flush(cache, tmp, flushdesc); if (cache->allocatepolicy & CACHE_ALLOCATE_NONDIRTY) { goto cache_replace_loop_continue; } else { return(numwrites); } } tmp = tmp->line_next; } cache_unmap_line(cache, line, set); return(0);}/* Return number of writeouts (dirty block flushes) to be waited for. *//* Also fill pointer to block allocated. Null indicates that blocks must *//* be written out but no specific one has yet been allocated. */static int cache_get_free_atom (struct cache_mem *cache, int lbn, cache_atom **ret, struct cache_mem_event *allocdesc){ int writeouts = 0; int set = (cache->mapmask) ? (lbn % cache->mapmask) : 0; // fprintf (outputfile, "Entered cache_get_free_atom: lbn %d, set %d, freelist %p\n", lbn, set, cache->map[set].freelist); if (cache->map[set].freelist == NULL) { writeouts = cache_replace(cache, set, allocdesc); } if ((*ret = cache->map[set].freelist)) { cache_remove_from_lrulist(&cache->map[set], *ret, CACHE_SEGNUM); } return(writeouts);}/* Still need to add check for outstanding allocations by other people, toavoid allocation replication */struct cache_mem_event *cache_allocate_space_continue (struct cache_mem *cache, struct cache_mem_event *allocdesc){ int numwrites = 0; cache_atom *new;/* cache_atom *toclean = NULL; cache_atom *tocleanlast; int flushstart = -1;*/ int devno = allocdesc->req->devno; int lbn = allocdesc->lockstop; int stop = allocdesc->allocstop; cache_atom *cleaned = allocdesc->cleaned; cache_atom *lineprev = allocdesc->lineprev; int linesize = (cache->linesize) ? cache->linesize : 1; // fprintf (outputfile, "Entered allocate_space_continue: lbn %d, stop %d\n", lbn, stop); if (allocdesc->waitees) { struct cache_mem_event *rwdesc = allocdesc->waitees; if (rwdesc->type == CACHE_EVENT_READ) { cache_read_continue(cache, rwdesc); } else { cache_write_continue(cache, rwdesc); } addtoextraq((event *) allocdesc); return(NULL); } while (lbn < stop) { if ((new = cleaned) == NULL) { numwrites += cache_get_free_atom(cache, lbn, &new, allocdesc); } if (numwrites == 0) { ASSERT(new != NULL); do { new->devno = devno; new->lbn = lbn; /* Re-allocated cache atom must not still be locked */ ASSERT((!new->writelock) && (!new->readlocks));/* new->writelock = allocdesc->prev->req;*/ new->state = CACHE_LOCKDOWN; cache_insert_new_into_hash(cache, new); lbn++; new = (lbn % linesize) ? new->line_next : new; } while (lbn % linesize); if (cache->linesize == 0) { new->line_next = NULL; new->line_prev = lineprev; if (lineprev) { lineprev->line_next = new; } lineprev = ((cache->linesize == -1) || (lbn % linesize)) ? new : NULL; }/* } else if (cache->startallflushes) { if (flushstart == -1) { flushstart = i; } if (new) { if (toclean) { tocleanlast->line_next = new; tocleanlast = new; } else { toclean = new; tocleanlast = new; } while (tocleanlast->line_next) { tocleanlast = tocleanlast->line_next; } }*/ } else { allocdesc->lockstop = lbn; allocdesc->cleaned = new; allocdesc->lineprev = lineprev; /* This needs fixing! *//* cache_waitfor_IO(cache, numwrites, allocdesc, NULL);*/ return(allocdesc); } }/* if (numwrites) { allocdesc->lockstop = flushstart; allocdesc->cleaned = toclean; allocdesc->lineprev = lineprev; cache_wait(cache, numwrites, allocdesc); return(allocdesc); }*/ addtoextraq((event *) allocdesc); return(NULL);}static struct cache_mem_event * cache_allocate_space (struct cache_mem *cache, int lbn, int size, struct cache_mem_event *rwdesc){ struct cache_mem_event *allocdesc = (struct cache_mem_event *) getfromextraq(); int linesize = max(1, cache->linesize); // fprintf (outputfile, "Entered cache_allocate_space: lbn %d, size %d, linesize %d\n", lbn, size, cache->linesize); allocdesc->type = CACHE_EVENT_ALLOCATE; allocdesc->req = rwdesc->req; allocdesc->flags = rwdesc->flags & CACHE_FLAG_LINELOCKED_ALLOCATE; allocdesc->lockstop = lbn - (lbn % linesize); allocdesc->allocstop = lbn + size + (linesize - 1 - ((lbn + size - 1) % linesize)); allocdesc->cleaned = NULL; allocdesc->lineprev = NULL; allocdesc->prev = rwdesc; allocdesc->waitees = NULL; if ((allocdesc = cache_allocate_space_continue(cache, allocdesc))) { allocdesc->waitees = rwdesc; rwdesc->flags |= allocdesc->flags & CACHE_FLAG_LINELOCKED_ALLOCATE; } return(allocdesc);}static int cache_get_rw_lock (struct cache_mem *cache, int locktype, struct cache_mem_event *rwdesc, cache_atom *line, int i, int stop){ int lockgran; cache_atom *tmp = line; int j = 0; int lbn = rwdesc->req->blkno; int devno = rwdesc->req->devno; // fprintf (outputfile, "Entered cache_get_rw_lock: lbn %d, i %d, stop %d, locktype %d\n", line->lbn, i, stop, locktype); while (j < stop) { if (locktype == 1) { lockgran = cache_get_read_lock(cache, tmp, rwdesc); } else { if (locktype == 3) { cache_free_read_lock(cache, tmp, rwdesc->req); } lockgran = cache_get_write_lock(cache, tmp, rwdesc); } // fprintf (outputfile, "got lock: lockgran %d, lbn %d\n", lockgran, tmp->lbn); if (lockgran == 0) { return(1); } else { if ((line->lbn != (lbn + i)) || (line->devno != devno)) { /* NOTE: this precaution only covers us when FIRST atom of line */ /* changes identity. Otherwise, must have other support. */ if (locktype == 1) { cache_free_read_lock(cache, tmp, rwdesc->req); } else { cache_free_write_lock(cache, tmp, rwdesc->req); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -