📄 cache.c
字号:
/* register cache stats */voidcache_reg_stats(struct cache_t *cp, /* cache instance */ struct stat_sdb_t *sdb) /* stats database */{ char buf[512], buf1[512], *name; /* get a name for this cache */ if (!cp->name || !cp->name[0]) name = "<unknown>"; else name = cp->name; sprintf(buf, "%s.accesses", name); sprintf(buf1, "%s.hits + %s.misses", name, name); stat_reg_formula(sdb, buf, "total number of accesses", buf1, "%12.0f"); sprintf(buf, "%s.hits", name); stat_reg_counter(sdb, buf, "total number of hits", &cp->hits, 0, NULL); sprintf(buf, "%s.misses", name); stat_reg_counter(sdb, buf, "total number of misses", &cp->misses, 0, NULL); sprintf(buf, "%s.replacements", name); stat_reg_counter(sdb, buf, "total number of replacements", &cp->replacements, 0, NULL); sprintf(buf, "%s.writebacks", name); stat_reg_counter(sdb, buf, "total number of writebacks", &cp->writebacks, 0, NULL); sprintf(buf, "%s.invalidations", name); stat_reg_counter(sdb, buf, "total number of invalidations", &cp->invalidations, 0, NULL); sprintf(buf, "%s.miss_rate", name); sprintf(buf1, "%s.misses / %s.accesses", name, name); stat_reg_formula(sdb, buf, "miss rate (i.e., misses/ref)", buf1, NULL); sprintf(buf, "%s.repl_rate", name); sprintf(buf1, "%s.replacements / %s.accesses", name, name); stat_reg_formula(sdb, buf, "replacement rate (i.e., repls/ref)", buf1, NULL); sprintf(buf, "%s.wb_rate", name); sprintf(buf1, "%s.writebacks / %s.accesses", name, name); stat_reg_formula(sdb, buf, "writeback rate (i.e., wrbks/ref)", buf1, NULL); sprintf(buf, "%s.inv_rate", name); sprintf(buf1, "%s.invalidations / %s.accesses", name, name); stat_reg_formula(sdb, buf, "invalidation rate (i.e., invs/ref)", buf1, NULL);}/* print cache stats */voidcache_stats(struct cache_t *cp, /* cache instance */ FILE *stream) /* output stream */{ double sum = (double)(cp->hits + cp->misses); fprintf(stream, "cache: %s: %.0f hits %.0f misses %.0f repls %.0f invalidations\n", cp->name, (double)cp->hits, (double)cp->misses, (double)cp->replacements, (double)cp->invalidations); fprintf(stream, "cache: %s: miss rate=%f repl rate=%f invalidation rate=%f\n", cp->name, (double)cp->misses/sum, (double)(double)cp->replacements/sum, (double)cp->invalidations/sum);}/* access a cache, perform a CMD operation on cache CP at address ADDR, places NBYTES of data at *P, returns latency of operation if initiated at NOW, places pointer to block user data in *UDATA, *P is untouched if cache blocks are not allocated (!CP->BALLOC), UDATA should be NULL if no user data is attached to blocks */unsigned int /* latency of access in cycles */cache_access(struct cache_t *cp, /* cache to access */ enum mem_cmd cmd, /* access type, Read or Write */ md_addr_t addr, /* address of access */ void *vp, /* ptr to buffer for input/output */ int nbytes, /* number of bytes to access */ tick_t now, /* time of access */ byte_t **udata, /* for return of user data ptr */ md_addr_t *repl_addr) /* for address of replaced block */{ byte_t *p = vp; md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); md_addr_t bofs = CACHE_BLK(cp, addr); struct cache_blk_t *blk, *repl; int lat = 0; /* default replacement address */ if (repl_addr) *repl_addr = 0; /* check alignments */ if ((nbytes & (nbytes-1)) != 0 || (addr & (nbytes-1)) != 0) fatal("cache: access error: bad size or alignment, addr 0x%08x", addr); /* access must fit in cache block */ /* FIXME: ((addr + (nbytes - 1)) > ((addr & ~cp->blk_mask) + (cp->bsize - 1))) */ if ((addr + nbytes) > ((addr & ~cp->blk_mask) + cp->bsize)) fatal("cache: access error: access spans block, addr 0x%08x", addr); /* permissions are checked on cache misses */ /* check for a fast hit: access to same block */ if (CACHE_TAGSET(cp, addr) == cp->last_tagset) { /* hit in the same block */ blk = cp->last_blk; goto cache_fast_hit; } if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) goto cache_hit; } } /* cache block not found */ /* **MISS** */ cp->misses++; /* select the appropriate block to replace, and re-link this entry to the appropriate place in the way list */ switch (cp->policy) { case LRU: case FIFO: repl = cp->sets[set].way_tail; update_way_list(&cp->sets[set], repl, Head); break; case Random: { int bindex = myrand() & (cp->assoc - 1); repl = CACHE_BINDEX(cp, cp->sets[set].blks, bindex); } break; default: panic("bogus replacement policy"); } /* remove this block from the hash bucket chain, if hash exists */ if (cp->hsize) unlink_htab_ent(cp, &cp->sets[set], repl); /* blow away the last block to hit */ cp->last_tagset = 0; cp->last_blk = NULL; /* write back replaced block data */ if (repl->status & CACHE_BLK_VALID) { cp->replacements++; if (repl_addr) *repl_addr = CACHE_MK_BADDR(cp, repl->tag, set); /* don't replace the block until outstanding misses are satisfied */ lat += BOUND_POS(repl->ready - now); /* stall until the bus to next level of memory is available */ lat += BOUND_POS(cp->bus_free - (now + lat)); /* track bus resource usage */ cp->bus_free = MAX(cp->bus_free, (now + lat)) + 1; if (repl->status & CACHE_BLK_DIRTY) { /* write back the cache block */ cp->writebacks++; lat += cp->blk_access_fn(Write, CACHE_MK_BADDR(cp, repl->tag, set), cp->bsize, repl, now+lat); } } /* update block tags */ repl->tag = tag; repl->status = CACHE_BLK_VALID; /* dirty bit set on update */ /* read data block */ lat += cp->blk_access_fn(Read, CACHE_BADDR(cp, addr), cp->bsize, repl, now+lat); /* copy data out of cache block */ if (cp->balloc) { CACHE_BCOPY(cmd, repl, bofs, p, nbytes); } /* update dirty status */ if (cmd == Write) repl->status |= CACHE_BLK_DIRTY; /* get user block data, if requested and it exists */ if (udata) *udata = repl->user_data; /* update block status */ repl->ready = now+lat; /* link this entry back into the hash table */ if (cp->hsize) link_htab_ent(cp, &cp->sets[set], repl); /* return latency of the operation */ return lat; cache_hit: /* slow hit handler */ /* **HIT** */ cp->hits++; /* copy data out of cache block, if block exists */ if (cp->balloc) { CACHE_BCOPY(cmd, blk, bofs, p, nbytes); } /* update dirty status */ if (cmd == Write) blk->status |= CACHE_BLK_DIRTY; /* if LRU replacement and this is not the first element of list, reorder */ if (blk->way_prev && cp->policy == LRU) { /* move this block to head of the way (MRU) list */ update_way_list(&cp->sets[set], blk, Head); } /* tag is unchanged, so hash links (if they exist) are still valid */ /* record the last block to hit */ cp->last_tagset = CACHE_TAGSET(cp, addr); cp->last_blk = blk; /* get user block data, if requested and it exists */ if (udata) *udata = blk->user_data; /* return first cycle data is available to access */ return (int) MAX(cp->hit_latency, (blk->ready - now)); cache_fast_hit: /* fast hit handler */ /* **FAST HIT** */ cp->hits++; /* copy data out of cache block, if block exists */ if (cp->balloc) { CACHE_BCOPY(cmd, blk, bofs, p, nbytes); } /* update dirty status */ if (cmd == Write) blk->status |= CACHE_BLK_DIRTY; /* this block hit last, no change in the way list */ /* tag is unchanged, so hash links (if they exist) are still valid */ /* get user block data, if requested and it exists */ if (udata) *udata = blk->user_data; /* record the last block to hit */ cp->last_tagset = CACHE_TAGSET(cp, addr); cp->last_blk = blk; /* return first cycle data is available to access */ return (int) MAX(cp->hit_latency, (blk->ready - now));}/* return non-zero if block containing address ADDR is contained in cache CP, this interface is used primarily for debugging and asserting cache invariants */int /* non-zero if access would hit */cache_probe(struct cache_t *cp, /* cache instance to probe */ md_addr_t addr) /* address of block to probe */{ md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); struct cache_blk_t *blk; /* permissions are checked on cache misses */ if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) return TRUE; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) return TRUE; } } /* cache block not found */ return FALSE;}/* flush the entire cache, returns latency of the operation */unsigned int /* latency of the flush operation */cache_flush(struct cache_t *cp, /* cache instance to flush */ tick_t now) /* time of cache flush */{ int i, lat = cp->hit_latency; /* min latency to probe cache */ struct cache_blk_t *blk; /* blow away the last block to hit */ cp->last_tagset = 0; cp->last_blk = NULL; /* no way list updates required because all blocks are being invalidated */ for (i=0; i<cp->nsets; i++) { for (blk=cp->sets[i].way_head; blk; blk=blk->way_next) { if (blk->status & CACHE_BLK_VALID) { cp->invalidations++; blk->status &= ~CACHE_BLK_VALID; if (blk->status & CACHE_BLK_DIRTY) { /* write back the invalidated block */ cp->writebacks++; lat += cp->blk_access_fn(Write, CACHE_MK_BADDR(cp, blk->tag, i), cp->bsize, blk, now+lat); } } } } /* return latency of the flush operation */ return lat;}/* flush the block containing ADDR from the cache CP, returns the latency of the block flush operation */unsigned int /* latency of flush operation */cache_flush_addr(struct cache_t *cp, /* cache instance to flush */ md_addr_t addr, /* address of block to flush */ tick_t now) /* time of cache flush */{ md_addr_t tag = CACHE_TAG(cp, addr); md_addr_t set = CACHE_SET(cp, addr); struct cache_blk_t *blk; int lat = cp->hit_latency; /* min latency to probe cache */ if (cp->hsize) { /* higly-associativity cache, access through the per-set hash tables */ int hindex = CACHE_HASH(cp, tag); for (blk=cp->sets[set].hash[hindex]; blk; blk=blk->hash_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) break; } } else { /* low-associativity cache, linear search the way list */ for (blk=cp->sets[set].way_head; blk; blk=blk->way_next) { if (blk->tag == tag && (blk->status & CACHE_BLK_VALID)) break; } } if (blk) { cp->invalidations++; blk->status &= ~CACHE_BLK_VALID; /* blow away the last block to hit */ cp->last_tagset = 0; cp->last_blk = NULL; if (blk->status & CACHE_BLK_DIRTY) { /* write back the invalidated block */ cp->writebacks++; lat += cp->blk_access_fn(Write, CACHE_MK_BADDR(cp, blk->tag, set), cp->bsize, blk, now+lat); } /* move this block to tail of the way (LRU) list */ update_way_list(&cp->sets[set], blk, Tail); } /* return latency of the operation */ return lat;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -