📄 disksim_diskcache.c
字号:
min(currdisk->effectivehda->ioreqlist->bcount, 10)) { return(FALSE); } } else { if ((seg->endblkno - seg->startblkno) < 15) { return(FALSE); } } } } else { fprintf(stderr, "Not actively using disk at disk_buffer_block_available\n"); exit(1); } return(TRUE);}/* This function may be out of date. Goal is/was to assist in understanding *//* how sequentiality/locality is affected by disk array data organizations *//* and disk striping in particular. */void disk_interferestats (disk *currdisk, ioreq_event *curr){ if (!device_printinterferestats) { return; } if ((curr->cause != currdisk->lastgen) && (curr->flags & (SEQ|LOCAL))) { if (curr->flags & SEQ) { currdisk->stat.interfere[0]++; } else { currdisk->stat.interfere[1]++; } } currdisk->lastgen = curr->cause;}static void disk_buffer_stats (disk *currdisk, ioreq_event *curr, segment *seg, int hittype){ double size; if (!device_printbufferstats) { return; } /* Add stats for read hits on write data? */ switch (hittype) { case BUFFER_NOMATCH: if (curr->flags & READ) { currdisk->stat.readmisses++; } else { currdisk->stat.writemisses++; } break; case BUFFER_WHOLE: currdisk->stat.fullreadhits++; break; case BUFFER_APPEND: currdisk->stat.appendhits++; break; case BUFFER_PREPEND: currdisk->stat.prependhits++; break; case BUFFER_PARTIAL: if (seg->startblkno <= curr->blkno) { size = (double) (seg->endblkno - curr->blkno); } else {/* size = (double) (curr->blkno + curr->bcount - seg->startblkno);*/ fprintf(stderr, "Tail hits not currently allowed.\n"); exit(1); } if (seg->state == BUFFER_READING) {/* fprintf (outputfile, "Ongoing hit: blkno %d bcount %d segstart %d segstop %d read %d\n", curr->blkno, curr->bcount, seg->startblkno, seg->endblkno, seg->access->type);*/ currdisk->stat.readinghits++; currdisk->stat.runreadingsize += size; currdisk->stat.remreadingsize += curr->bcount - size; } else {/* fprintf (outputfile, "Partial hit: blkno %d bcount %d segstart %d segstop %d read %d\n", curr->blkno, curr->bcount, seg->startblkno, seg->endblkno, ((curr->flags & READ) && (seg->state == BUFFER_CLEAN)));*/ currdisk->stat.parthits++; currdisk->stat.runpartsize += size; currdisk->stat.rempartsize += curr->bcount - size; } break; default: fprintf(stderr, "Invalid hittype in disk_buffer_stats - blkno %d, bcount %d, state %d\n", curr->blkno, curr->bcount, seg->state); exit(1); } return;}/* segment selection priority for read requests: 11 BUFFER_WHOLE, BUFFER_DIRTY (write data is freshest, less chance of data scrolling off the segment quickly) 10 BUFFER_WRITING (write data is freshest) 9 BUFFER_READING (possible prefetch-stream hit) 8 BUFFER_CLEAN 7 BUFFER_PARTIAL, BUFFER_DIRTY (write data is freshest) 6 BUFFER_WRITING (write data is freshest) 5 BUFFER_READING (possible prefetch-stream hit, includes case of current prefetch block is first block in the new request) 4 BUFFER_CLEAN 3 BUFFER_NOMATCH, BUFFER_EMPTY (doesn't trash other data) 2 BUFFER_CLEAN 1 BUFFER_READING (must be last place for preempt check) not usable: BUFFER_NOMATCH, BUFFER_DIRTY BUFFER_WRITING BUFFER_CLEAN (if pending requests) BUFFER_READING (if not pre-emptable or pending requests) recycled segs dedicatedwriteseg not usable if !enablecache BUFFER_DIRTY BUFFER_WRITING BUFFER_READING (if non-stoppable) not usable if no read hits on write data: BUFFER_WHOLE, BUFFER_DIRTY BUFFER_WRITING BUFFER_CLEAN (if previous request was a write) BUFFER_PARTIAL, BUFFER_DIRTY BUFFER_WRITING BUFFER_CLEAN (if previous request was a write) NO seg usable if overlapping dirty data exists which cannot be "hit" -- return BUFFER_COLLISION hittype if seg in question is not "reusable". NO seg usable if two segments are found with overlapping dirty data -- return BUFFER_COLLISION hittype.*//* this routine sets currdiskreq->seg and currdiskreq->hittype *//* as per above table, it finds the best segment in the cache matching this * request. If it doesn't find one, it sets seg to NULL and hittype to * BUFFER_NOMATCH */static segment * disk_buffer_select_read_segment(disk *currdisk, diskreq *currdiskreq){ segment *seg; ioreq_event *first_ioreq = currdiskreq->ioreqlist; ioreq_event *tmpioreq; int best_value = 0; int curr_value; int curr_hittype = 0; if (disk_printhack && (simtime >= disk_printhacktime)) {fprintf (outputfile, "%12.6f %8p Entering disk_buffer_select_read_segment\n",simtime,currdiskreq);fflush(outputfile);} currdiskreq->seg = NULL; currdiskreq->hittype = BUFFER_NOMATCH; seg = currdisk->seglist; while (seg) { curr_value = -1; if (currdiskreq->hittype == BUFFER_COLLISION) { } else if (seg->recyclereq) { } else if ((currdisk->dedicatedwriteseg) && (seg == currdisk->dedicatedwriteseg)) { /* check for collision with dirty data */ if ((seg->state == BUFFER_DIRTY) || (seg->state == BUFFER_WRITING)) { tmpioreq = currdiskreq->ioreqlist; while (tmpioreq) { if (disk_buffer_overlap(seg,tmpioreq)) { if (!disk_buffer_reusable_segment_check(currdisk, seg)) { currdiskreq->hittype = BUFFER_COLLISION; currdiskreq->seg = NULL; } break; } tmpioreq = tmpioreq->next; } } } else if (seg->state == BUFFER_EMPTY) { curr_value = 3; curr_hittype = BUFFER_NOMATCH; } else if ((first_ioreq->blkno < seg->startblkno) || (first_ioreq->blkno >= seg->endblkno)) { curr_hittype = BUFFER_NOMATCH; if ((seg->state == BUFFER_CLEAN) && !(seg->diskreqlist)) { curr_value = 2; } else if (seg->state == BUFFER_READING) { if ((seg->endblkno == first_ioreq->blkno) && currdisk->almostreadhits) { /* special case: block currently being prefetched is the * first block of this request. We count this as a * partial read hit. */ curr_hittype = BUFFER_PARTIAL; curr_value = 5; } else { if (!seg->diskreqlist->ioreqlist) { curr_value = 1; } } } else if ((first_ioreq->blkno < seg->startblkno) && ((seg->state == BUFFER_DIRTY) || (seg->state == BUFFER_WRITING))) { /* check for collision with dirty data */ tmpioreq = currdiskreq->ioreqlist; while (tmpioreq) { if (disk_buffer_overlap(seg,tmpioreq)) { if (!disk_buffer_reusable_segment_check(currdisk, seg)) { currdiskreq->hittype = BUFFER_COLLISION; currdiskreq->seg = NULL; } break; } tmpioreq = tmpioreq->next; } } } else { switch (seg->state) { case BUFFER_DIRTY: if (currdisk->enablecache) { if ((currdisk->readhitsonwritedata) && (best_value < 10)) { curr_value = 11; } else { if (!disk_buffer_reusable_segment_check(currdisk, seg)) { currdiskreq->hittype = BUFFER_COLLISION; currdiskreq->seg = NULL; } } } break; case BUFFER_WRITING: if (currdisk->enablecache) { if ((currdisk->readhitsonwritedata) && (best_value < 10)) { curr_value = 10; } else { if (!disk_buffer_reusable_segment_check(currdisk, seg)) { currdiskreq->hittype = BUFFER_COLLISION; currdiskreq->seg = NULL; } } } break; case BUFFER_READING: if (currdisk->enablecache) { curr_value = 9; } break; case BUFFER_CLEAN: if (currdisk->readhitsonwritedata || (seg->access && (seg->access->flags & READ))) { curr_value = 8; } break; default: ddbg_assert(0); } if (seg->endblkno >= (first_ioreq->blkno + first_ioreq->bcount)) { curr_hittype = BUFFER_WHOLE; } else { curr_hittype = BUFFER_PARTIAL; curr_value -= 4; } } if (curr_value > best_value) { currdiskreq->seg = seg; currdiskreq->hittype = curr_hittype; best_value = curr_value; } seg = seg->next; } /* If BUFFER_NOMATCH && BUFFER_READING, perform preemption check. * Note that currdiskreq->seg should be set before the call. */ if ((best_value == 1) && !disk_buffer_stopable_access(currdisk,currdiskreq)) { currdiskreq->seg = NULL; } if (disk_printhack && (simtime >= disk_printhacktime)) {fprintf (outputfile, " segment = %8p, hittype = %d\n",currdiskreq->seg,currdiskreq->hittype);fflush(outputfile);} return currdiskreq->seg;}/* segment selection priority for write requests: 7 BUFFER_APPEND, BUFFER_WRITING (keep the stream going) 6 BUFFER_DIRTY 5 BUFFER_PREPEND, BUFFER_DIRTY 4 BUFFER_NOMATCH, BUFFER_CLEAN, overlapping (will be emptied anyways) 3 BUFFER_EMPTY (don't trash other data) 2 BUFFER_CLEAN 1 BUFFER_NOMATCH, BUFFER_READING (must be last place for preempt check) not usable: BUFFER_APPEND (if no combining writes) BUFFER_PREPEND (if no combining writes) BUFFER_NOMATCH, BUFFER_DIRTY BUFFER_WRITING BUFFER_READING (if not pre-emptable or pending requests) BUFFER_CLEAN (if pending requests) recycled segs NO seg usable if overlapping data exists which cannot be "cleaned" -- return BUFFER_COLLISION hittype if seg is question isn't "reusable".*//* this routine sets currdiskreq->seg and currdiskreq->hittype */static segment* disk_buffer_select_write_segment(disk *currdisk, diskreq *currdiskreq){ segment *seg; diskreq *tmp_diskreq; diskreq *holddiskreq; ioreq_event *first_ioreq; ioreq_event *last_ioreq; ioreq_event *tmpioreq; int best_value = 0; int curr_value; int curr_hittype;/* int reusable_dirty_segment = FALSE;*/if (disk_printhack && (simtime >= disk_printhacktime)) {fprintf (outputfile, "%12.6f %8p Entering disk_buffer_select_write_segment\n",simtime,currdiskreq);fprintf (outputfile, " numdirty = %d\n", currdisk->numdirty);fflush(outputfile);} currdiskreq->seg = NULL; currdiskreq->hittype = BUFFER_NOMATCH; seg = currdisk->seglist; while (seg) { curr_value = -1; curr_hittype = BUFFER_NOMATCH; if (currdiskreq->hittype == BUFFER_COLLISION) { } else if (seg->recyclereq) { } else if ((currdisk->dedicatedwriteseg) && (seg != currdisk->dedicatedwriteseg)) { /* check for collision with uncleanable data */ if (((seg->state == BUFFER_CLEAN) && (seg->diskreqlist)) || ((seg->state == BUFFER_READING) && ((seg->diskreqlist->ioreqlist) || (seg->diskreqlist->seg_next) || !disk_buffer_stopable_access(currdisk,currdiskreq)))) { tmpioreq = currdiskreq->ioreqlist; while (tmpioreq) { if (disk_buffer_overlap(seg,tmpioreq)) { if (!disk_buffer_reusable_segment_check(currdisk, seg)) { currdiskreq->hittype = BUFFER_COLLISION; currdiskreq->seg = NULL; } break; } tmpioreq = tmpioreq->next; } } } else if (seg->state == BUFFER_EMPTY) { if (currdisk->numdirty < currdisk->numwritesegs) {/* || reusable_dirty_segment) {*/ curr_value = 3; } } else if (seg->state == BUFFER_READING) { if (!seg->diskreqlist->ioreqlist && !seg->diskreqlist->seg_next) { if (currdisk->numdirty < currdisk->numwritesegs) {/* || reusable_dirty_segment) {*/ curr_value = 1; } } else { tmpioreq = currdiskreq->ioreqlist; while (tmpioreq) { if (disk_buffer_overlap(seg,tmpioreq)) { if (!disk_buffer_reusable_segment_check(currdisk, seg)) { currdiskreq->hittype = BUFFER_COLLISION; currdiskreq->seg = NULL; } break; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -