📄 tc.c
字号:
/* * Copyright (C) 1996-1998 by the Board of Trustees * of Leland Stanford Junior University. * * This file is part of the SimOS distribution. * See LICENSE file for terms of the license. * *//**************************************************************** * tc.c - Translation cache * * Author: $Author: bosch $ * Date: $Date: 1998/02/10 00:28:07 $ *****************************************************************/#define _BSD_SIGNALS#include <stdio.h>#include <stdlib.h>#ifdef sgi#include <sys/cachectl.h>#endif#include <sys/types.h>#include <sys/stat.h>#include <sys/mman.h>#include <fcntl.h>#include <unistd.h>#include <signal.h>#include <strings.h>#include "simtypes.h"#include "sim_error.h"#include "simutil.h"#include "tc.h"#include "userflush.h"#include "sim_error.h"#include "cpu_interface.h"#include "arch_specifics.h"#define HOST_PAGE_SIZE (getpagesize()) #define NEXT_HOST_PAGE(_x) ( ((size_t)(_x)+HOST_PAGE_SIZE-1) & ~(HOST_PAGE_SIZE-1))extern int continue_run();static int logPC = 0;int tcGenNumber = 0;static int numCaches;static Inst breakOpcode;static struct TC { char *fixedStart; char *fixedEnd; TCA curPos; char *curEnd; int annotationBuffer; int pctcSize; int mask; Reg genId; struct PCTCEntry{ VA vAddr; MA mAddr; TCA tca; Reg genId; } *pctc;} real_tc[TC_MAX_CACHES]; /* #define DEBUG_TCFLUSH *//* User, Kernel and Interface code Translation Caches */#define TC_USER 0#define TC_KERN 1#define TC_GLUE 2#ifdef DEBUG_TCFLUSHstatic char *flushType[4];#endif#define TC_START 0x0c000000#define PCTC_ASSOC 4 #define PCTC_HASH(_code,_ma) ((PTR_TO_UINT(_ma)>>2 & real_tc[_code].mask)*PCTC_ASSOC)#ifdef sgi struct { int count; VA pc; SimTime cc;} lastSigIll;static void TC_SIGILLHandler(int sig, int code, struct sigcontext *scp){ int done = 1; intPtrSize pc = (intPtrSize)scp->sc_pc; SimTime cc = CPUVec.CycleCount(0); /* This should really take into account how many TC's we have, * which could vary. On the SGI's (this ifdef) we have 3. */ if (TC_InTC(TC_USER,(TCA)pc) || TC_InTC(TC_KERN,(TCA)pc) || TC_InTC(TC_GLUE,(TCA)pc)) { if (pc != lastSigIll.pc || cc != lastSigIll.cc) { done = 0; } } lastSigIll.cc = cc; lastSigIll.pc = pc; lastSigIll.count++; if (done) { CPUError("%lld TC_SIGILL: sig=%d code=%d scp=0x%x pc=0x%x --> abort\n", CPUVec.CycleCount(0),sig,code,scp,pc); } else { CPUWarning("%lld TC_SIGILL: sig=%d code=%d scp=0x%x pc=0x%x (continue) \n", CPUVec.CycleCount(0),sig,code,scp,pc); slowcacheflush((char*)pc, 32*1024, BCACHE); } }#endif#ifdef __alpha#define BCACHE 12 /* bogus */static void TC_SIGILLHandler(int sig){ CPUError("TC_SIGILLHandler signal=%d\n",sig); slowcacheflush(0,16*1024,BCACHE); }#endif void TC_init(int numC, int cacheSizes[], int pctcSize[], int annotationBuffer[], Inst breakOp ){ static int initialized=0; int malloc_size; uint *ptr; int zfd,i; if (initialized) { TC_flush( TCFLUSH_ALL ); return; } initialized = 1; numCaches = numC; breakOpcode = breakOp; ASSERT(numCaches >= 0 && numCaches < TC_MAX_CACHES); malloc_size = 3*HOST_PAGE_SIZE; for (i=0;i<numCaches;i++) { real_tc[i].annotationBuffer = annotationBuffer[i]; real_tc[i].pctcSize = pctcSize[i]; real_tc[i].pctc = (struct PCTCEntry*)malloc(pctcSize[i]*sizeof(struct PCTCEntry)); bzero((char*)real_tc[i].pctc,pctcSize[i]*sizeof(struct PCTCEntry)); real_tc[i].mask = (pctcSize[i] / PCTC_ASSOC)-1; ASSERT((1<<GetLog2(pctcSize[i]))==pctcSize[i]); ASSERT (cacheSizes[i] >=2*annotationBuffer[i]); malloc_size += cacheSizes[i]; }#if defined(__alpha) ptr = (uint*)malloc( malloc_size); bzero((char*)ptr,malloc_size); ASSERT(ptr);#elif defined(_ABIN32) ptr = (uint *) memalign(HOST_PAGE_SIZE, malloc_size); if (ptr == NULL) { perror("malloc of TC"); CPUError("Can't allocate TC\n"); } /* Make sure TC is in jump inst range of code segment */ ASSERT((((uint)ptr ^ (uint)continue_run) >> 28) == 0);#else /* O32 */ zfd = open("/dev/zero", O_RDWR, 0); if (zfd < 0) { perror( "Opening /dev/zero in TC_init"); CPUError("TC_init can't start"); } ptr = (uint *)mmap((uint*)TC_START, malloc_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_SHARED, zfd, 0); if (ptr != (uint *) TC_START) { perror("mapping tc_init"); CPUError("Can't map tc_inits"); }#endif for (i=0;i<numCaches;i++) { real_tc[i].fixedStart = (char *)NEXT_HOST_PAGE(ptr); real_tc[i].fixedEnd = real_tc[i].fixedStart + cacheSizes[i]; real_tc[i].curPos = (TCA)real_tc[i].fixedEnd; /* force flush */ ptr = (uint *)real_tc[i].fixedEnd +1; } if (signal(SIGILL, TC_SIGILLHandler) == SIG_ERR) CPUError("signal(SIGILL)"); /* This sets next and kern_next */ TC_flush( TCFLUSH_ALL ); initialized = 1;}static void TC_flush1(int code){ TCA tca; uint size; /* This is not true for Embra - we have a third cache which * contains the glue code */ /* ASSERT (code == TC_USER || code == TC_KERN); */#if 0 CPUPrint("Translation Cache flush from 0x%llx to 0x%llx for %4d KB\n", (uint64)real_tc[code].fixedStart, (uint64)real_tc[code].curPos, ((uint64)real_tc[code].curPos-(uint64)real_tc[code].fixedStart)/1024);#endif for(tca=(TCA)real_tc[code].fixedStart; tca !=(TCA)real_tc[code].curPos; tca++) { *tca = breakOpcode; } real_tc[code].curPos = (TCA)real_tc[code].fixedStart; real_tc[code].curEnd = real_tc[code].fixedEnd - real_tc[code].annotationBuffer; size = PTR_TO_UINT(tca) - PTR_TO_UINT(real_tc[code].fixedStart); if (size > 0 && slowcacheflush(real_tc[code].fixedStart, size, BCACHE) == -1) { perror("TC_flush1"); } real_tc[code].genId++; }void TC_flush(int code ) {#ifdef DEBUG_TCFLUSH CPUWarning("EMBRA: %10lld TC_flush %s \n", (uint64)CPUVec.CycleCount(0), flushType[code]); #endif if (code==TCFLUSH_ALL) { int i; for (i=0;i<numCaches;i++){ TC_flush1(i); } } else { ASSERT (code>=0 && code < numCaches); TC_flush1(code); } tcGenNumber++;}TCA TC_GetTCPtr( int cache ) { ASSERT( cache>=0 && cache < numCaches); return real_tc[cache].curPos;}void TC_SetTCNext(int cache , TCA start, TCA finish ){ if( usercacheflush((void *) start, (char*)finish - (char*)start, BCACHE) <0) { CPUWarning("Cacheflush failure\n"); } ASSERT( start == real_tc[cache].curPos); ASSERT( PTR_TO_UINT(finish) < PTR_TO_UINT(real_tc[cache].curEnd)); ASSERT( PTR_TO_UINT(finish) < PTR_TO_UINT(real_tc[cache].fixedEnd)); real_tc[cache].curPos = finish;}int TC_InTC( int cache, TCA addr){ return( PTR_TO_UINT(addr) >= PTR_TO_UINT(real_tc[cache].fixedStart) && PTR_TO_UINT(addr) < PTR_TO_UINT(real_tc[cache].curEnd) );}int TC_Is_Room( int lenInInstr, int cache ){ return( TC_InTC(cache,real_tc[cache].curPos+lenInInstr ) );}void TC_IncrementSize(int cache, int size){ if (real_tc[cache].curEnd==real_tc[cache].fixedEnd) { /* * We'we already been here and the warning has been written out * ignore */ return; } if (real_tc[cache].curEnd+size >= real_tc[cache].fixedEnd) { /* * overflow */ CPUWarning("EMBRA: TC annotation overflow buffer full. Repeatability is broken. Increase tc.h:TC_ANNOTATION_SIZE \n"); real_tc[cache].curEnd = real_tc[cache].fixedEnd; return; } real_tc[cache].curEnd += size;}void TC_PCInsert(int cache, TCA tca, VA vAddr, MA mAddr){ int idx = PCTC_HASH(cache,mAddr); int set = (PTR_TO_UINT(tca)>>2)%PCTC_ASSOC; /* random */ int i; struct PCTCEntry *e = &real_tc[cache].pctc[idx]; ASSERT (cache<numCaches); ASSERT (TC_InTC(cache,tca)); for (i=0;i<PCTC_ASSOC;i++) { if (e[i].genId!=real_tc[cache].genId) { set = i; break; } } e[set].vAddr = vAddr; e[set].mAddr = mAddr; e[set].tca = tca; e[set].genId = real_tc[cache].genId;}TCA TC_PCLookup(int cache, VA vAddr, MA mAddr){ int idx = PCTC_HASH(cache,mAddr); int i; int genId = real_tc[cache].genId; struct PCTCEntry *e = &real_tc[cache].pctc[idx]; if (logPC) { CPUPrint("pclog: VA=%llx\n", vAddr); } for(i=0;i<PCTC_ASSOC;i++,e++) { if (e->genId!=genId) { return 0; } if (e->mAddr==mAddr && e->vAddr==vAddr) { if (logPC) { CPUPrint("pclog TC=%llx\n", e->tca); } return e->tca; } } return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -