📄 r4k_cp0.c
字号:
/* * Copyright (C) 1996-1998 by the Board of Trustees * of Leland Stanford Junior University. * * This file is part of the SimOS distribution. * See LICENSE file for terms of the license. * *//**************************************************************** * r4k_cp0.c * * This file contains the R4000 implementation of the TLB. For * further elucidation see r4k_cp0.c in Mipsy. * Author: $Author: bosch $ * Date: $Date: 1998/02/10 00:30:54 $ *****************************************************************/#include <bstring.h>#include "list.h"#include "simmisc.h"#include "embra.h"#include "cp0.h"#include "main_run.h"#include "mem_control.h"#include "qc.h"#include "clock.h"#include "decoder.h"#include "driver.h"#include "eventcallback.h"#include "simmagic.h"#include "stats.h"#include "annotations.h"#include "firewall.h"#include "hw_events.h"#include "registry.h"#include "c_port.h"#include "machine_defs.h"#include "tc.h"int embraprintsr = 0;int exceptionDuringBackdoor = FALSE;#define REFILL_FLAG 0x8000000#define XREFILL_FLAG 0x4000000/* * MIPS r4k manual is WRONG on this one. * Always latch badvaddr */#define LATCH_BADVADDR(_sr) (1)#if defined(SIM_MIPS32)#define ANY_HIGH32_BITS(_vAddr) (0)#define HAS_BAD_VADDR_BITS(_vAddr) (0)#define REGION_COMPARE(_a,_b) (1)#define IN_32BIT_MODE(_p) (1)#define ONLY_4K_PAGE_SIZE /* 32bit only needs 4K pages */#define IS_R10000(P) 0#else#define ANY_HIGH32_BITS(_vAddr) ((_vAddr) >> 32)#define _BAD_VADDR_MASK (~(((0x1LL << VA_VALID_BITS)-1)|(0x3LL<<62)))#define HAS_BAD_VADDR_BITS(_vAddr) ((_vAddr) & _BAD_VADDR_MASK)#define REGION_COMPARE(_a,_b) ((_a) == (_b))#define IN_32BIT_MODE(_p) ((_p)->is32bitMode)/* * Is the R10000 TLB? */#define IS_R10000(P) ((P)->numTlbEntries == 64)#endif#ifdef ONLY_4K_PAGE_SIZE#define ComputeTlbEntrySize(_pgmask) (0)#elsestatic unsigned char ComputeTlbEntrySize(uint pgMsk);#endifstruct PgMaskStruct PgSz[TLBPGMASK_NUMSIZES] = { { ~(TLBPGMASK_4K>>13), (1 << 12), 4*1024-1}, { ~(TLBPGMASK_16K>>13), (1 << 14), 16*1024-1}, { ~(TLBPGMASK_64K>>13), (1 << 16), 64*1024-1}, { ~(TLBPGMASK_256K>>13), (1 << 18), 256*1024-1}, { ~(TLBPGMASK_1M>>13), (1 << 20), 1024*1024-1}, { ~(TLBPGMASK_4M>>13), (1 << 22), 4*1024*1024-1}, { ~(TLBPGMASK_16M>>13), (1 << 24), 16*1024*1024-1},};#ifndef ONLY_4K_PAGE_SIZE#define SZ2MASK(_s) PgSz[(_s)].mask#define SIZES_TO_CHECK TLBPGMASK_NUMSIZES#else#define SZ2MASK(_s) -1#define SIZES_TO_CHECK 1#endif#ifndef ONLY_4K_PAGE_SIZEstatic unsigned char ComputeTlbEntrySize(uint pgMsk){ unsigned char s; uint match = ~(pgMsk >> 13); ASSERT(SIZES_TO_CHECK < (1 << (sizeof(unsigned char)*8))); for (s = 0; s < SIZES_TO_CHECK; s++) { if ((uint)SZ2MASK(s) == match) { return s; } } CPUError("Bad PgMsk 0x%x in ComputeTlbEntrySize\n", pgMsk); return (unsigned char) 0;}#endif/*extern int curr_cpu; */extern EmbraState* curEmp;/* Local Data *//* Local CP0 functions */static void REFILL_EXCEPTION(int cpuNum, int, int,VA,VA,int);static uint Do_TLB_Write(int cpuNum, int);/* This is called to allow insertion into the hash table */static void Insert_TLB_HT( int cpuNum, int i );/* for debugging */void Em_Dump_Tlb(int cpuNum);static void UpdateCPUMode(CPUState *P);static EventCallbackHdr timerCallbackHdr[SIM_MAXCPUS];static void EmbraSetTimerCallback(int cpuNum);void Em_Dump_Tlb(int cpuNum){ int i; CPUPrint("TLB DUMP %d\n", cpuNum); for( i = 0; i < EMP[cpuNum].numTlbEntries; i++) { CPUPrint("%d PgMsk:0x%x Hi:0x%llx Lo_0:0x%llx Lo_1:0x%llx\n", i, EMP[cpuNum].tlbEntry[i].PgMsk, (Reg64)EMP[cpuNum].tlbEntry[i].Hi, (Reg64)EMP[cpuNum].tlbEntry[i].Lo0, (Reg64)EMP[cpuNum].tlbEntry[i].Lo1); }}/* This is called to set up the List_Links hashing chains *//* This sets up the hash bucket list headers, and the Index list that *//* they point to */void Em_Tlb_Init_Lists( int cpuNum ){ int i; /* If we have been called before, this would have been set up */ /* Yeah, its a little hacky */ if( EMP[cpuNum].indexList[10].index == 10 ) return; for( i = 0; i < TLB_HASH_SIZE; i++ ) List_Init( &EMP[cpuNum].tlbIndexHeaders[i] ); for( i = 0; i < EMP[cpuNum].numTlbEntries; i++ ) { List_InitElement( &EMP[cpuNum].indexList[i].links ); EMP[cpuNum].indexList[i].index = i; EMP[cpuNum].indexList[i].onList = 0; }}void Em_Tlb_Do_Init( int cpuNum, int swtch ){ int i; Em_Tlb_Init_Lists(cpuNum); quick_ASID[cpuNum] = GET_ASID( EMP[cpuNum].CP0[C0_TLBHI] ); /* This clears qc and sets up K0 */ qc_renew(cpuNum); for (i=0; i < EMP[cpuNum].numTlbEntries; i++) { /* Add the new entry(it rejects K0 entries)*/ /* Don't add to index list if we're switching CPUs */ if (!swtch) Insert_TLB_HT( cpuNum, i ); /* XXX Initialize entry size!!! Should this be done here?? -BL */ EMP[cpuNum].tlbEntrySize[i] = ComputeTlbEntrySize( EMP[cpuNum].tlbEntry[i].PgMsk); /* Update the QC only with current ASID*/ if( (IS_GLOBAL_HI( EMP[cpuNum].tlbEntry[i].Hi ) || quick_ASID[cpuNum] == GET_ASID( EMP[cpuNum].tlbEntry[i].Hi ) ) ) { qc_map_page( cpuNum,i); } } UpdateCPUMode(&EMP[cpuNum]);}/********************************************************************* * This can only be called AFTER Tlb_Init is called * This is really just a qc_mmu_context_switch, but our assumptions * are different. Namely, 1. the current quick_ASID can be 0and * 2. We need to map global entries in the QC *********************************************************************/void Em_Tlb_Init( int cpuNum, int swtch ){ int i; static int mmu_initialized = 0; /* Note: we arrive here when either: * - SimOS starts in Embra mode * - we switch from Mipsy to Embra for the first time * - we switch from Mipsy to Embra after already having done so before. * In the first two cases, the mmu has not been allocated/initialized * yet, so we don't want to do the Tlb_Clear. */ if (mmu_initialized++) Em_Tlb_Clear( cpuNum ); if( embra.MPinUP ) { int cpu; for( cpu = 0; cpu < TOTAL_CPUS; cpu++) { Em_Tlb_Do_Init( cpu, swtch ); } } else { Em_Tlb_Do_Init( cpuNum, swtch ); }}static void Em_Tlb_Remove(int cpuNum,int idx){ if( embra.emode == EMBRA_CACHE ) { qc_cache_inval_page( cpuNum, idx); } qc_tlb_replace_page( cpuNum, idx);}/* We call this on exit from mshade */void Em_Tlb_Clear( int cpuNum ){ int i; /* If we have been called before, this would have been set up */ /* Yeah, its a little hacky */ if( EMP[cpuNum].indexList[10].index != 10 ) return; if(embra.MPinUP ) { int cpu; ASSERT( cpuNum==0); for( cpu = 0; cpu < TOTAL_CPUS; cpu++ ) { for( i = 0; i < EMP[cpuNum].numTlbEntries; i++ ) { if( EMP[cpu].indexList[i].onList ) { List_Remove( &EMP[cpu].indexList[i].links ); EMP[cpu].indexList[i].onList = 0; Em_Tlb_Remove(cpu,i); } } } } else { for( i = 0; i < EMP[cpuNum].numTlbEntries; i++ ) { if( EMP[cpuNum].indexList[i].onList ) { List_Remove( &EMP[cpuNum].indexList[i].links ); EMP[cpuNum].indexList[i].onList = 0; Em_Tlb_Remove(cpuNum,i); } } }}#if defined(SIM_MIPS32)#define TLBHash(_vpn2,_region, _asid) TLBHash32BitOnly(_vpn2,_asid)/***************************************************************** * Hash function for converting a virtual page number and ASID * into an entry in the hash table. Hash function of R3000 == * hash function of R4000. Hash table stores chained TLB entry * numbers at each bucket, thus decreasing translation time. * R4000 COMMENTS >> In all calls to this function, the vpn is * actually vpn2. (you want both vpn's to map to the same index!) *****************************************************************/static int TLBHash32BitOnly(register VPN vpn2, register ASID asid){ return ((vpn2 ^ (vpn2 >> (NUM_VPN_BITS - LOG_TLB_HASH_SIZE)) ^ (asid << (LOG_TLB_HASH_SIZE - NUM_ASID_BITS))) % TLB_HASH_SIZE);}#else#define TLBHash(_vpn2,_region, _asid) TLBHash64bit(_vpn2,_region, _asid)/***************************************************************** * Hash function for converting a virtual page number and ASID * into an entry in the hash table. * Hash table stores chained TLB entry * numbers at each bucket, thus decreasing translation time. *****************************************************************/static int TLBHash64bit(register VPN vpn2, register uint region, register ASID asid){ return ((vpn2 ^ (vpn2 >> (NUM_VPN2_BITS - LOG_TLB_HASH_SIZE)) ^ (vpn2 >> (32 - (NUM_OFFSET_BITS+1) - LOG_TLB_HASH_SIZE)) ^ region ^ (asid << (LOG_TLB_HASH_SIZE - NUM_ASID_BITS))) % TLB_HASH_SIZE);}#endif/************************************************************************* * This function returns the index+1 of the TLB entry with the given * vpn2 (!! takes in vpn2, not vpn), and given asid. It returns 0 if * the index is not found. * The algorithm is: probe the hash table under the given asid, then probe * under asid 0 (to catch global entries ) *************************************************************************/IDX Tlb_Lookup( int cpuNum, uint region, VPN vpn2, ASID asid ){ int hashNum; List_Links* indexPtr; register unsigned idx; register int s; register EmbraState *P = EMP + cpuNum; for (s = 0; s < SIZES_TO_CHECK; s++) { VPN vpn; VPN vpnLookup = vpn2 & SZ2MASK(s); /* Check for pages under this process's true asid */ hashNum = TLBHash(vpnLookup, region, asid); LIST_FORALL( &(P->tlbIndexHeaders[hashNum]), indexPtr) { idx = ((IndexListLink*)indexPtr)->index; vpn = vpn2 & SZ2MASK(P->tlbEntrySize[idx]); /* Don't check global bit because if the global bit is set, the * entry is stored under ASID 0 */ if ((vpn == GET_VPN2(P->tlbEntry[idx].Hi)) && (asid == GET_ASID(P->tlbEntry[idx].Hi)) && REGION_COMPARE(region, GET_REGION(P->tlbEntry[idx].Hi))) { return idx+1; } } /* Now check for global pages, all of which are stored under asid 0 */ hashNum = TLBHash(vpnLookup, region, 0); LIST_FORALL( &(P->tlbIndexHeaders[hashNum]), indexPtr) { idx = ((IndexListLink*)indexPtr)->index; vpn = vpn2 & SZ2MASK(P->tlbEntrySize[idx]); if ((vpn == GET_VPN2(P->tlbEntry[idx].Hi)) && (IS_GLOBAL_HI(P->tlbEntry[idx].Hi)) && REGION_COMPARE(region,GET_REGION(P->tlbEntry[idx].Hi))) { return idx+1; } } } return 0;}/***************************************************************** * Address translation on R4000. * * input: virtual address * pointer to phys addr * Fully associative search (using hash table) * 1.) User Mode or Supervisor Mode * if MSB == 1 in user or MSB = 0 | 110 * 2.) VPN2 match? * a.) if VPN2 does not match and 32 bit-mode, -> TLB Refill. * b.) VPN2 match, Not Global, No ASID match, -> TLB Refill. * c.) VPN2 match, not Global and ASID match, OR Global, then if not valid * -> TLB Invalid. * d.) valid, not dirty, but writing -> TLB Mod. * e.) valid, dirty, or not dirty and not writing, Non_Cacheable == T * then -> Access Main Mem, else Access Cache. Phys addr output. * * Returns FAILURE if an exception occurred telling the CPU to try
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -