⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 qc.c

📁 一个用在mips体系结构中的操作系统
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * Copyright (C) 1996-1998 by the Board of Trustees *    of Leland Stanford Junior University. *  * This file is part of the SimOS distribution.  * See LICENSE file for terms of the license.  * *//**************************************************************** * qc.c * * This file handles the quickcheck and Physarray structures. * pa/phys_mem_ref gets called in case of callout from the  * Translation Cache *  * $Author: bosch $ * $Date: 1998/02/10 00:30:46 $ *****************************************************************/#include <stdio.h>#include <sys/types.h>#include <sys/stat.h>#include <sys/mman.h>/* #include <sys/immu.h> */#include <fcntl.h>#include <bstring.h>#include <string.h>#include <assert.h>#include "simmisc.h"#include "embra.h"#include "cache.h"#include "qc.h"#include "stats.h"#include "driver.h"#include "clock.h"#include "mem_control.h"#include "main_run.h"#include "cp0.h"#include "annotations.h"#include "simutil.h"#include "machine_defs.h"#include "firewall.h"#include "simmagic.h"#include "tc.h"#include "tc_coherence.h"#include "simtypes.h"#ifndef EMBRA_USE_QC64/* There are good reasons not to let backdoor references hit in the *//* quick check *//* 1. Some of them need to be relocated MPinUP (like CPUID) *//* 2. They all need to be relocated when self-hosting *//* This is not bad because the number of backdoor references should be *//* small enough to make it non performance critical *//* XXX -- The above argument no longer holds. *//* The new interrupt subsystem makes many backdoor references, thus we   will try to make backdoor references succeed in the quick check   whenever we can *//* #define DISABLE_QC */#undef LOG_QC_UPDATE  /* log every change in qc_p or PhysArray */#define TLB_ENT2VPN(_ent) (PAGE_NUMBER(TLBHI2ADDR(_ent)))void qc_CheckForDuplicates(CPUState *P);/* ********************************************************** * SetMMUEntry: update the (potentially two different  * array when applicable * note that user_mode and supervisor_mode have the same * privileges. * **********************************************************/static void SetMMUEntry( CPUState *P, VA vAddr, MA value){     if (value &&        !EMBRA_IS_MEMADDR(M_FROM_CPU(P->myNum),MMU_PROT_READ(value))) {      CPUError("SetMMUEntry ASSERT: vAddr=%llx value=%llx not a MA\n",               (uint64)vAddr,(uint64)value);   }   P->kernelMMU[PAGE_NUMBER(vAddr)] = value;   if (embra.separateMMUs &&        (IS_KUSEG(vAddr) || IS_SUPERV_SEG(vAddr)) ) {      P->userMMU[PAGE_NUMBER(vAddr)] = value;   }}void qc_cache_init(int cpuNum){    if( embra.sequential) {        int cpu;        for( cpu = 0; cpu < TOTAL_CPUS; cpu++ ) {            /* Record MMU reloc base */            if (EMP[cpu].mmu) continue;            EMP[cpu].kernelMMU = (MA*)ZALLOC_PERM(MMU_RELOC_SIZE,"EmbraMMU");            EMP[cpu].mmu =  EMP[cpu].kernelMMU;            CPUPrint("0x%x D MMU_RELOC_BASE_%d 0x%x\n",                      EMP[cpu].mmu, cpu,                     (uint)EMP[cpu].mmu + MMU_RELOC_SIZE );            if (embra.separateMMUs) {                 EMP[cpu].userMMU =                     (MA*)ZALLOC_PERM(MMU_RELOC_SIZE,"EmbraMMU");                CPUPrint("0x%x D MMU_RELOC_BASE_%d 0x%x (kernel) \n",                          EMP[cpu].userMMU, cpu,                          (uint)EMP[cpu].userMMU + MMU_RELOC_SIZE );             } else {                 EMP[cpu].userMMU = EMP[cpu].kernelMMU;            }        }    } else {        ASSERT (0);    }        /* Page mode doesn't use quick checks */    if( embra.emode == EMBRA_PAGE )        return;    if (embra.useVQC){        if( embra.sequential) {            int cpu;            for( cpu = 0; cpu < TOTAL_CPUS; cpu++ ) {                /* Record QC_V reloc base */                if (EMP[cpu].qc_v) continue;                EMP[cpu].qc_v = (char*) ZALLOC_PERM(QC_VIRT_SIZE,"EmbraVQC");                /* memory access entry holds value for QC_REG */                EMP[cpu].cache_ax = EMP[cpu].qc_v;                /* Nm output */                CPUPrint("0x%x D QC_VBASE_%d 0x%x\n",                         EMP[cpu].qc_v, cpu,                         (uint)EMP[cpu].qc_v + QC_VIRT_SIZE );                /* Record QC_P reloc base */                EMP[cpu].qc_p =                    (phys_info_t*)ZALLOC_PERM(QC_PHYS_SIZE(M_FROM_CPU(cpu)),                                              "EmbraPQC");                CPUPrint("0x%x D QC_PBASE_%d 0x%x\n",                         EMP[cpu].qc_p, cpu,                         (uint)EMP[cpu].qc_p + QC_PHYS_SIZE(M_FROM_CPU(cpu)));            }        } else {            ASSERT (0);        }    } else { /* !embra.useVQC */        if( embra.sequential) {            int cpu;            for( cpu = 0; cpu < TOTAL_CPUS; cpu++ ) {                /* Record QC_V reloc base */                if (EMP[cpu].pa_p) continue;                EMP[cpu].pa_p = (pa_info_t*)                    ZALLOC_PERM(PA_SIZE(M_FROM_CPU(cpu)),"EmbraPA");                /* Nm output */                CPUPrint("0x%x D PA_BASE_%d 0x%x\n",                         EMP[cpu].pa_p, cpu,                         (uint)EMP[cpu].pa_p + PA_SIZE(M_FROM_CPU(cpu)));                /* memory access entry holds value for PA_REG */                EMP[cpu].cache_ax=(pa_info_t *)(EMP[cpu].pa_p-                           (MA_TO_UINT(SIM_MEM_ADDR(M_FROM_CPU(cpu))) >>                            log2SCACHE_LINE_SIZE));            }        } else {            ASSERT (0);        }    }}/* Invalidate the quick check entries specified in cpu_bits *//* Invalidate physical first to avoid race condition */void qc_clobber( PA pAddr, int cpuNum, EmVQCMemState state ){  phys_info_t* phys;  pa_info_t* pa;  char* virt = 0;#ifdef DISABLE_QC  return; /* EXP */#endif  if (embra.useVQC){    /* Does this work?  We read vline, it changes, we invalidate a vline       which was invalid and the pqc, but process uses vqc.  Could ll/sc       the write of phys */    phys = &EMP[cpuNum].qc_p[ADDR2SLINE(pAddr)];    if( PQC_VLINE(*phys) ) {      virt = &EMP[cpuNum].qc_v[PQC_VLINE(*phys)];    }    /*     * remove the qc entry even if we only have a downgrade     */        *phys = PQC_SET_INV;    if( virt )       *virt =  MEM_INVALID;  } else { /* !embra.useVQC */     EMP[cpuNum].pa_p[ADDR2SLINE(pAddr)]=PA_SET_INV;   }}/* Transition from exclusive to read shared -- used when newly written *//* code is executed.  This allows us to detect further writes */void qc_downgrade(int cpuNum, VA vAddr, int new_state){  ASSERT( VQC_SHARED(new_state) );   switch(embra.emode) {   case EMBRA_CACHE:      if (embra.useVQC){         ASSERT (EMP[cpuNum].mmu[PAGE_NUMBER(vAddr)]);         EMP[cpuNum].qc_v[ADDR2SLINE(vAddr)] = new_state;           } else {         EMP[cpuNum].mmu[PAGE_NUMBER(vAddr)]  =             MMU_PROT_READ(EMP[cpuNum].mmu[PAGE_NUMBER(vAddr)] );      }        break;   case EMBRA_PAGE:      SetMMUEntry(&EMP[cpuNum],vAddr,MMU_PROT_READ(EMP[cpuNum].mmu[PAGE_NUMBER(vAddr)]));      break;  }}/* ************************************************************* * for all cpus, downgrade the protection. called by * for TC coherence * *************************************************************/void qc_downgrade_ifpresent(VA vAddr){   int cpuNum;   if (embra.emode == EMBRA_CACHE && embra.useVQC) {      for (cpuNum=0;cpuNum<TOTAL_CPUS;cpuNum++) {         if (VQC_EXCL(EMP[cpuNum].qc_v[ADDR2SLINE(vAddr)])) {            EMP[cpuNum].qc_v[ADDR2SLINE(vAddr)] &=~MEM_D_EXCLUSIVE;         }      }   } else {      for (cpuNum=0;cpuNum<TOTAL_CPUS;cpuNum++) {         SetMMUEntry(&EMP[cpuNum],vAddr, MMU_PROT_READ(EMP[cpuNum].mmu[PAGE_NUMBER(vAddr)]));      }   }}static long clearVec=0;  /* bitvector, each CPU calls qc_clear only ones *//* Clear mmu relocation info (for the mapped areas) *//* and clear quick cache state info (for all cached areas)*/static void qc_clear(int cpuNum){ASSERT(!(clearVec&(1<<cpuNum)));clearVec=clearVec|(1<<cpuNum);}/* Page mode: note that driver.c:EmbraInstallMemAnnotation will be * called after qc_renew runs, so we are free to put in translations * here for all pages, even those with load/store annotations on them.   */void qc_renew( int cpuNum ){   K0A i;   MA mAddr;   int machNo = M_FROM_CPU(cpuNum);   if( !embra.MPinUP || cpuNum == 0 ) qc_cache_init(cpuNum);   /* qc_clear(cpuNum); */   /* If mmu has not been filled-in for the valid KSEG0 range,    * do it now.    */   if( !EMP[cpuNum].kernelMMU[PAGE_NUMBER(__MAGIC_OSPC_END)] ) {      /* Store the proper page translations for all other K0 addresses */      if (!CPUVec.CheckFirewall) {          for(i = (Reg32)K0BASE ; i < (Reg32)(K0BASE + MEM_SIZE(machNo)) ; i += DEFAULT_PAGESZ ) {            /* Own text segment exclusive */            MA mAddr = PHYS_TO_MEMADDR(machNo, K0_TO_PHYS_REMAP(i, cpuNum));            if(embra.emode == EMBRA_PAGE || !embra.useVQC)               EMP[cpuNum].kernelMMU[PAGE_NUMBER(i)] = MMU_PROT_WRITE(mAddr);            else /* Cache Mode with QC */               EMP[cpuNum].kernelMMU[PAGE_NUMBER(i)] = mAddr;         }      } else {         /* k0 pages without firewall permission should not be in          * the mmu          */         for(i = (Reg32)K0BASE ; i < (Reg32)(K0BASE + MEM_SIZE(machNo)); i += DEFAULT_PAGESZ ) {            /* Own text segment exclusive */            PA pAddr = K0_TO_PHYS_REMAP(i, cpuNum);            MA mAddr = PHYS_TO_MEMADDR(machNo, pAddr);            if (SimMagic_IsIncoherent(pAddr)) {               EMP[cpuNum].mmu[PAGE_NUMBER(i)] = 0;            } else if (embra.emode == EMBRA_CACHE && embra.useVQC) {               EMP[cpuNum].mmu[PAGE_NUMBER(i)] = mAddr;            } else if (!CPUVec.CheckFirewall(cpuNum, pAddr)) {               EMP[cpuNum].kernelMMU[PAGE_NUMBER(i)] = MMU_PROT_READ(mAddr);            } else {               EMP[cpuNum].kernelMMU[PAGE_NUMBER(i)] = MMU_PROT_WRITE(mAddr);            }         }      }      if (annWatchpoints) {         uint64 addr;         AnnPtr rec;         for (addr = AnnFirst("load", &rec); rec; addr = AnnNext(&rec)) {            VA vAddr = (VA) addr;            if (IS_KSEG0(vAddr)) {               PA pAddr = K0_TO_PHYS(vAddr);               EMP[cpuNum].kernelMMU[PAGE_NUMBER(pAddr)] = 0;            }         }         for (addr = AnnFirst("store", &rec); rec; addr = AnnNext(&rec)) {            VA vAddr = (VA) addr;            if (IS_KSEG0(vAddr)) {               PA pAddr = K0_TO_PHYS(vAddr);               EMP[cpuNum].kernelMMU[PAGE_NUMBER(pAddr)] = 0;            }         }      }   }   /* remap region requires special handling. Currently, we can only    * remap one page.    */   mAddr = PHYS_TO_MEMADDR(machNo, K0_TO_PHYS_REMAP(K0BASE, cpuNum));   if(embra.emode == EMBRA_PAGE || !embra.useVQC)     EMP[cpuNum].kernelMMU[PAGE_NUMBER(K0BASE)] = MMU_PROT_WRITE(mAddr);   else     EMP[cpuNum].kernelMMU[PAGE_NUMBER(K0BASE)] = mAddr;   /* the OSPC range is mapped to the second page of KSEG0 in the    * 32 bit address space version, since we need this to be in cached    * address space (for Flite). We cannot place a translation for it in    * the mmu, since all the references have to be fielded by    * simmagic.c.    *    * Note that the OSPC range is only enabled if the remap range is.    */   ASSERT( __MAGIC_OSPC_BASE + DEFAULT_PAGESZ == __MAGIC_OSPC_END );   if ( remapVec->RemapEnable[cpuNum] )     EMP[cpuNum].kernelMMU[PAGE_NUMBER(__MAGIC_OSPC_BASE)] = 0;  /* Ensure no code is hanging around in TC that depended on previous   * state of these pages   */  Clear_Translation_State( TCFLUSH_ALL);} void qc_tlb_inval_page( int cpuNum, int idx){  EntryHi hi = EMP[cpuNum].tlbEntry[idx].Hi & (~TLBHI_G);  EntryLo lo0 = EMP[cpuNum].tlbEntry[idx].Lo0;  EntryLo lo1 = EMP[cpuNum].tlbEntry[idx].Lo1;    /*   * check that there is no mapping from qc_v    */   if (embra.emode ==EMBRA_CACHE && embra.useVQC && !IS_KSEG0(hi)) {       int i;      int base = ADDR2SLINE(TLBHI2ADDR(hi));      int max = 2 * LINES_PER_PAGE;      ASSERT( !(TLB_ENT2VPN(hi)&1));      for (i=0;i<max;i++) {         if (EMP[cpuNum].qc_v[base+i]) {            CPUWarning("qc_tlb_inval_page hi=%#x lo=%#x %#x i=%d mapped \n",                       hi,lo0,lo1,i);         }      }   }   if( !IS_UNMAPPED_TLBHI( hi ) ) {      SetMMUEntry(&EMP[cpuNum],TLB_ENT2VPN(hi) * DEFAULT_PAGESZ,0);      SetMMUEntry(&EMP[cpuNum],(TLB_ENT2VPN(hi)|1)*DEFAULT_PAGESZ,0);   }  

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -