📄 qc64.c
字号:
/* * Copyright (C) 1996-1998 by the Board of Trustees * of Leland Stanford Junior University. * * This file is part of the SimOS distribution. * See LICENSE file for terms of the license. * *//**************************************************************** * qc64.c * * This file handles the quickcheck and Physarray structures. * pa/phys_mem_ref gets called in case of callout from the * Translation Cache * * $Author: bosch $ * $Date: 1998/02/10 00:30:50 $ *****************************************************************/#include <stdio.h>#include <sys/types.h>#include <sys/stat.h>#include <sys/mman.h>#include <fcntl.h>#include <bstring.h>#include <string.h>#include <assert.h>#include "simmisc.h"#include "embra.h"#include "cache.h"#include "qc.h"#include "stats.h"#include "driver.h"#include "clock.h"#include "mem_control.h"#include "main_run.h"#include "cp0.h"#include "annotations.h"#include "simutil.h"#include "machine_defs.h"#include "firewall.h"#include "simmagic.h"#include "tc.h"#include "tc_coherence.h"#include "simtypes.h"#ifdef EMBRA_USE_QC64void qc_CheckForDuplicates(CPUState *P);static struct { VA vpn_mask; /* VPN mask for a TLB entry */ int page_count; /* Number of 4K pages mapped by a TLB entry */ int offset_mask; /* Page offset mask. */} TlbEntrySz[TLBPGMASK_NUMSIZES] = { { ~(((TLBPGMASK_4K>>13)<<1)|1), (2*4*1024)/DEFAULT_PAGESZ, 4*1024-1}, { ~(((TLBPGMASK_16K>>13)<<1)|1), (2*16*1024)/DEFAULT_PAGESZ, 16*1024-1}, { ~(((TLBPGMASK_64K>>13)<<1)|1), (2*64*1024)/DEFAULT_PAGESZ, 64*1024-1}, { ~(((TLBPGMASK_256K>>13)<<1)|1), (2*256*1024)/DEFAULT_PAGESZ, 256*1024-1}, { ~(((TLBPGMASK_1M>>13)<<1)|1), (2*1024*1024)/DEFAULT_PAGESZ, 1024*1024-1}, { ~(((TLBPGMASK_4M>>13)<<1)|1), (2*4*1024*1024)/DEFAULT_PAGESZ, 4*1024*1024-1}, { ~(((TLBPGMASK_16M>>13)<<1)|1), (2*16*1024*1024)/DEFAULT_PAGESZ,16*1024*1024-1},};#define IS_KERNEL_ONLY_ADDR(_va) (((Reg64_s)(_va)) < 0)#define ANY_HIGH32_BITS(_va) ((_va)>>32)extern int (*QC64HashFunc)(VA vAddr);static void QC64Check(EmbraState *P, int myASID);/* * ReinitQC64(EmbraState *P) - Reinitialize the QC64 hash table * and backmapping table. */static void ReinitQC64(EmbraState *P){ int i; for (i = 0; i < QC64_NUM_ENTRIES; i++) { P->kernelMMU[i].vpn = QC64_INVALID_VPN; P->userMMU[i].vpn = QC64_INVALID_VPN; } for (i = 0; i < MAX_NTLBENTRIES; i++) { P->QC64TLBBackMap[i] = QC64_INVALID_VPN; }}/* * AddAddrQC64() Add an vAddr -> MA mapping to the QC64 */static voidAddAddrQC64(EmbraState *P, VA vAddr, MA ma, int writable){ int hash; hash = QC64HashFunc(vAddr); ma = (MA) ((uint)ma & ~(DEFAULT_PAGESZ-1)); P->kernelMMU[hash].vpn = vAddr/DEFAULT_PAGESZ; P->kernelMMU[hash].ma = ma; P->kernelMMU[hash].writable = writable; if (!IS_KERNEL_ONLY_ADDR(vAddr)) { P->userMMU[hash].vpn = vAddr/DEFAULT_PAGESZ; P->userMMU[hash].ma = ma; P->userMMU[hash].writable = writable; }}/* * RemoveAddrQC64(EmbraState *P, VA vAddr) - Remove an * address from the QC64. */static voidRemoveAddrQC64(EmbraState *P, VA vAddr){ int hash = QC64HashFunc(vAddr); if (P->kernelMMU[hash].vpn == vAddr/DEFAULT_PAGESZ) { P->kernelMMU[hash].vpn = QC64_INVALID_VPN; } if (P->userMMU[hash].vpn == vAddr/DEFAULT_PAGESZ) { P->userMMU[hash].vpn = QC64_INVALID_VPN; }}/* * FlushTLBEntryQC64 - Remove the specified TLB entry from the * QC64. */static void FlushTLBEntryQC64(EmbraState *P, int idx){ int hash, i; VA vpn = P->QC64TLBBackMap[idx]; if (vpn != QC64_INVALID_VPN) { int kernelOnly = IS_KERNEL_ONLY_ADDR(vpn*DEFAULT_PAGESZ); int size = P->tlbEntrySize[idx]; /* Must remove all the default (4K) pages that make * up this page */ vpn = vpn & TlbEntrySz[size].vpn_mask; for (i = 0; i < TlbEntrySz[size].page_count; i++) { hash = QC64HashFunc(vpn*DEFAULT_PAGESZ + i*DEFAULT_PAGESZ); P->kernelMMU[hash].vpn = QC64_INVALID_VPN; if (!kernelOnly) { P->userMMU[hash].vpn = QC64_INVALID_VPN; } } } P->QC64TLBBackMap[idx] = QC64_INVALID_VPN;}static void InitQC64(void){ int cpu; assert( embra.sequential); for( cpu = 0; cpu < TOTAL_CPUS; cpu++ ) { if (EMP[cpu].mmu) continue; EMP[cpu].kernelMMU = (QC64HashEntry *) malloc(sizeof(QC64HashEntry)*QC64_NUM_ENTRIES); EMP[cpu].mmu = EMP[cpu].kernelMMU; if (embra.separateMMUs) { EMP[cpu].userMMU = malloc(sizeof(QC64HashEntry)*QC64_NUM_ENTRIES); } else { EMP[cpu].userMMU = EMP[cpu].kernelMMU; } EMP[cpu].QC64TLBBackMap = (VA *) malloc(MAX_NTLBENTRIES*sizeof(VA)); ReinitQC64(EMP + cpu); } /* Page mode doesn't use quick checks */ if( embra.emode == EMBRA_PAGE ) return; ASSERT(!embra.useVQC); for( cpu = 0; cpu < TOTAL_CPUS; cpu++ ) { /* Record QC_V reloc base */ if (EMP[cpu].pa_p) continue; EMP[cpu].pa_p = (pa_info_t*) ZALLOC_PERM(PA_SIZE(M_FROM_CPU(cpu)),"EmbraPA"); /* memory access entry holds value for PA_REG */ EMP[cpu].cache_ax=(pa_info_t *)(EMP[cpu].pa_p- (MA_TO_UINT(SIM_MEM_ADDR(M_FROM_CPU(cpu))) >> log2SCACHE_LINE_SIZE)); }}/* Invalidate the quick check entries specified in cpu_bits *//* Invalidate physical first to avoid race condition */void qc_clobber( PA pAddr, int cpuNum, EmVQCMemState state ){ EMP[cpuNum].pa_p[ADDR2SLINE(pAddr)] = PA_SET_INV; }voidqc64_remove_addr(int cpuNum, VA va){ RemoveAddrQC64(&EMP[cpuNum], va);}/* Transition from exclusive to read shared -- used when newly written *//* code is executed. This allows us to detect further writes */void qc_downgrade(int cpuNum, VA vAddr, int new_state){ ASSERT( VQC_SHARED(new_state) ); RemoveAddrQC64(&EMP[cpuNum], vAddr);}/* ************************************************************* * for all cpus, downgrade the protection. called by * icache_coherence_mark_code * *************************************************************/void qc_downgrade_ifpresent(VA vAddr){ int cpuNum; for (cpuNum = 0; cpuNum < TOTAL_CPUS; cpuNum++) { RemoveAddrQC64(&EMP[cpuNum], vAddr); }}/* Page mode: note that driver.c:EmbraInstallMemAnnotation will be * called after qc_renew runs, so we are free to put in translations * here for all pages, even those with load/store annotations on them. */void qc_renew( int cpuNum ){ if( !embra.MPinUP || cpuNum == 0 ) InitQC64(); ReinitQC64(EMP + cpuNum); Clear_Translation_State( TCFLUSH_ALL);} /* * Em_QC64Reload - Handle the reloading the QC64 on a * miss. Returns 0 if QC wasn't filled. */MAEm_QC64Reload(VA vAddr, int flags ){ int myASID, region, hashNum, idx; PA pAddr; Reg VPN2, lo_reg; Reg32 sr_reg; List_Links* indexPtr; EmbraState *P; int writeable = 0; MA ma; P = curEmp; sr_reg = (Reg32)(P->CP0[C0_SR]); region = GET_REGION(vAddr); if (region == 0) { /* User region access */ if (!(sr_reg & SR_UX) && ANY_HIGH32_BITS(vAddr)) return 0; /* Fall thru to TLB lookup */ } else if (region == 2) { /* xkphys access */ uint cache_algorithm = XKPHYS_CACHE_ALGORITHM(vAddr); /* xkphys - only available in kernel 64bit mode */ if (!IS_KERNEL_MODE(P) || !(sr_reg & SR_KX)) return 0; /* Currently we only support EXCLUSIVE_WRITE access quick check */ if (cache_algorithm == CBIT_EXCLUSIVE_WRITE) { pAddr = K0_TO_PHYS_REMAP(vAddr, P->myNum); writeable = 1; idx = -1; goto checkAccess; } return 0; } else if (region == 3) { /* Kernel region, no user and limited supervisor */ if (vAddr >= CKSEG0_START_ADDR) { if (!IS_KERNEL_MODE(P)) { /* No user and supervisor limited to single range */ if (IS_BACKDOOR(vAddr) || (sr_reg & SR_KSU_USR) || ((sr_reg & SR_KSU_SUP) && !IS_SUPERV_SEG(vAddr))) return 0; } if (IS_KSEG0(vAddr)) { if (!IS_KERNEL_MODE(P)) return 0; /* Kernel only */#ifndef TORNADO if (remapVec->RemapEnable[P->myNum] && (vAddr >= __MAGIC_OSPC_BASE && vAddr < __MAGIC_OSPC_END)) { return 0; }#endif pAddr = K0_TO_PHYS_REMAP(vAddr, P->myNum); idx = -1; writeable = 1; goto checkAccess; } if (IS_KSEG1(vAddr)) return 0; /* Fall thru to TLB lookup */ } else { /* If we got here we better not be in 32bit mode and * there is nothing that the user or supvisor can access. */ if (!(sr_reg & SR_KX) || !IS_KERNEL_MODE(P)) return 0; } /* Fall thru to TLB lookup */ } else if (region == 1) { /* Supervisor region - only available in 64bit mode */ if (!(sr_reg & SR_SX) || ((sr_reg & SR_KSU_USR) && !IS_KERNEL_MODE(P))) return 0; /* Fall thru to TLB lookup */ } /* Check TLB assuming first a 16K page then a 4K page */ VPN2 = GET_VPN2(vAddr); myASID = GET_ASID(P->CP0[C0_TLBHI]); idx = Tlb_Lookup( P->myNum, region, VPN2, myASID ); if (idx) { int szEntry; idx--; /* We have a matching VPN and ASID - see if it is valid */ szEntry = P->tlbEntrySize[idx]; /* Which lo register? */ if (vAddr & (TlbEntrySz[szEntry].offset_mask+1)) lo_reg = P->tlbEntry[idx].Lo1; else lo_reg = P->tlbEntry[idx].Lo0; if (IS_VALID(lo_reg) && ( IS_DIRTY(lo_reg) || !(flags & QC64_WRITE))) { /* Everything is cool - form the address */ pAddr = ((((GET_PFN(lo_reg)))*DEFAULT_PAGESZ) & ~(VA)TlbEntrySz[szEntry].offset_mask) + (vAddr & TlbEntrySz[szEntry].offset_mask); writeable = IS_DIRTY(lo_reg); goto checkAccess; } } /* Missed in TLB */ return 0; checkAccess: if (!IS_VALID_PA(M_FROM_CPU(P->myNum), pAddr)) { ASSERT(0); return 0; }#ifdef BROKEN if (SIMFIREWALL_ON) { if (SimMagic_IsIncoherent(pAddr)) return 0; if (!CheckFirewall(P->myNum, pAddr)) { if (flags & QC64_WRITE) return 0; writeable = 0; } }#endif if (annWatchpoints && AnnFMRangeCheck(vAddr,ANNFM_LD_TYPE|ANNFM_ST_TYPE)) return 0; ma = PHYS_TO_MEMADDR(M_FROM_CPU(P->myNum),pAddr); if ( writeable && (flags&QC64_WRITE) && EmbraTCCoherenceCheck(P->myNum,vAddr, (pAddr&~(DEFAULT_PAGESZ-1)), (pAddr&~(DEFAULT_PAGESZ-1))+DEFAULT_PAGESZ)) { AddAddrQC64(P,vAddr, ma, 0); if (idx >= 0) { P->QC64TLBBackMap[idx] = vAddr/DEFAULT_PAGESZ; }#if 0 CPUWarning("Flushing the TC in Em_QC64Reload (TC coherence) PC=0x%llx vAddr=0x%llx \n", (Reg64)P->PC, (Reg64)vAddr);#endif ReenterTC(P); /* NOTREACHED() */ } if (writeable && TCcoherence_is_code(PHYS_TO_MEMADDR(M_FROM_CPU(P->myNum), pAddr))) { writeable = 0; } AddAddrQC64(P,vAddr, ma, writeable); if (idx >= 0) { P->QC64TLBBackMap[idx] = vAddr/DEFAULT_PAGESZ; } return ma;}void qc_tlb_inval_page( int cpuNum, int idx){ EmbraState *P = EMP + cpuNum; EntryHi hi = P->tlbEntry[idx].Hi; if( !IS_UNMAPPED_TLBHI( hi ) ) { FlushTLBEntryQC64(&EMP[cpuNum], idx); }}void qc_tlb_replace_page( int cpuNum, int idx){ qc_tlb_inval_page( cpuNum, idx);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -