⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 r4k_cp0.c

📁 一个用在mips体系结构中的操作系统
💻 C
📖 第 1 页 / 共 5 页
字号:
 * this instruction again.  *  *  * This doesn't really return EXCEPTION_CODE, because the EXCEPTION * routine does not return, it jumps to continue_run with the  * handler PC.  I keep them in to be descriptive. * *****************************************************************/uintEm_TranslateVirtual(int cpuNum, VA vAddr, PA *pAddr, Em_accesstype act ){  IDX         tlbIndex;  ContextReg contextReg;  XContextReg xcontextReg;  int myASID = GET_ASID(curEmp->CP0[C0_TLBHI]);  Reg         VPN2;  Reg        lo_reg;  int           region = GET_REGION(vAddr);  unsigned char writing = (act == ACT_DWRITE);  Reg32      sr_reg = (Reg32)(curEmp->CP0[C0_SR]);  ASSERT( curEmp->myNum == cpuNum);  /* Not the best way to do this... */  if( !curEmp->outOfSlaveLoop ) {     return BACKDOOR_CODE;  }   if (region == 0) {       /* Make sure the address is with acceptable range. For the       * 32 bit mode this means 32 bits, for 64bit mode this means       * impelementation defined bits.        */      if (IN_32BIT_MODE(curEmp)) {         if (ANY_HIGH32_BITS(vAddr)) goto addrErr;      } else {         if (HAS_BAD_VADDR_BITS(vAddr)) goto addrErr;         if (IS_R10000(curEmp) && !(sr_reg & SR_UX) && ANY_HIGH32_BITS(vAddr)) goto addrErr;      }      /* KUSEG becomes an uncached space when the ERL bit is set. This is        * needed for cache error handling. */      if (sr_reg & SR_ERL) {          /* need to add k1base because registry works on kseg1 addresses */         vAddr += K1BASE;         goto bdoor;      }      /* Fall thru to TLB lookup */   } else if (region == 3) {      /* Kernel region, no user and limited supervisor */      if (vAddr >= CKSEG0_START_ADDR) {         if (!IS_KERNEL_MODE(curEmp)) {            /* No user and supervisor limited to single range */            if (IS_BACKDOOR(vAddr) ) goto bdoor;            if (sr_reg & SR_KSU_USR) goto addrErr;            if ((sr_reg & SR_KSU_SUP) && !IS_SUPERV_SEG(vAddr)) goto addrErr;         }         if (IS_KSEG0(vAddr)) {            if (!IS_KERNEL_MODE(curEmp)) goto addrErr; /* Kernel only */            if (remapVec->RemapEnable[cpuNum] &&              (vAddr >= __MAGIC_OSPC_BASE && vAddr < __MAGIC_OSPC_END))               goto bdoor;            *pAddr = K0_TO_PHYS_REMAP(vAddr, cpuNum);            if (!IS_VALID_PA(M_FROM_CPU(cpuNum), *pAddr))  goto addrErr;            if (CPUVec.CheckFirewall)               goto firewall;            return NORMAL_CODE;         }         if (IS_KSEG1(vAddr)) goto bdoor;         /* Fall thru to TLB lookup */      } else {          /* If we got here we better not be in 32bit mode and          * there is nothing that the user or supvisor can access.          */         if (IN_32BIT_MODE(curEmp) || !IS_KERNEL_MODE(curEmp)) goto addrErr;         if (vAddr > XKSEG_END_ADDR) goto addrErr;      }      /* Fall thru to TLB lookup */#if defined(SIM_MIPS64)   } else if (region == 2) {      uint cache_algorithm = XKPHYS_CACHE_ALGORITHM(vAddr);      /* xkphys - only available in kernel 64bit mode */      if (IN_32BIT_MODE(curEmp) || !IS_KERNEL_MODE(curEmp)) goto addrErr;      switch (cache_algorithm) {      case CBIT_UPDATE:         if (IS_R10000(curEmp)) {            break; /* fall thru to error, doesn't work on R10000 */         }      case CBIT_NONCOHERENT:      case CBIT_EXCLUSIVE:      case CBIT_EXCLUSIVE_WRITE:      {         Reg64 offset = XKPHYS_ONE_PAGE_OFFSET(vAddr);         if (XKPHYS_INVALID_OFFSET(offset)) goto addrErr;         *pAddr = K0_TO_PHYS_REMAP(XKPHYS_BASE + offset, cpuNum);         /* XXX - NEED TO ACCESS FLAVOR */         return NORMAL_CODE;      }      case CBIT_UNCACHED:      case CBIT_UNCACHED_ACCEL:      {         uint flavor = XKPHYS_UNCACHED_FLAVOR(vAddr);         Reg64 offset = XKPHYS_FOUR_PAGE_OFFSET(vAddr);         if (XKPHYS_INVALID_OFFSET(offset)) goto addrErr;         if (!IS_R10000(curEmp) && (flavor != 0)) goto addrErr;         goto bdoor;      }      default:         /* invalid cache alogrithm fall thru to error */         break;      }      goto addrErr;   } else if (region == 1) {      /* Supervisor region - only available in 64bit mode */      if (IN_32BIT_MODE(curEmp) || ((sr_reg & SR_KSU_USR) && !IS_KERNEL_MODE(curEmp)) ||           HAS_BAD_VADDR_BITS(vAddr)) goto addrErr;      if (IS_R10000(curEmp) && !(sr_reg & SR_SX)) goto addrErr;      /* Fall thru to TLB lookup */#endif /* notdef MIPS32 */   }    /* Check TLB */    VPN2 = GET_VPN2(vAddr);    tlbIndex = Tlb_Lookup(cpuNum, region, VPN2, myASID);    if (tlbIndex) {      int szEntry;       /* We have a matching VPN and ASID - see if it is valid */      tlbIndex--;      szEntry = curEmp->tlbEntrySize[tlbIndex];      /* Which lo register? */      if (vAddr & PgSz[szEntry].loBit)         lo_reg = curEmp->tlbEntry[tlbIndex].Lo1;      else         lo_reg = curEmp->tlbEntry[tlbIndex].Lo0;            if (IS_VALID(lo_reg)) {         /* Check if the page is dirty or we are reading */         if ( IS_DIRTY(lo_reg) || !(writing)) {            /* Everything is cool - form the address */#if defined(SIM_MIPS64) && defined(IRIX6_4)            int cache_algorithm = GET_CACHE_ALGOR(lo_reg);            if (!((cache_algorithm == CBIT_EXCLUSIVE) ||                   (cache_algorithm == CBIT_EXCLUSIVE_WRITE))) {               CPUWarning("%lld Unsupported TLB cache algorithm (%d) at address 0x%llx by cpu %d at PC 0x%llx\n",                          (uint64)EmbraCpuCycleCount(cpuNum), cache_algorithm,                             (uint64)vAddr, cpuNum, (uint64)EMP[cpuNum].PC);            }#endif            *pAddr = (((GET_PFN(lo_reg)&SZ2MASK(szEntry))*4*1024) |                (vAddr & PgSz[szEntry].offset_mask));            if (!EMBRA_IS_PADDR(M_FROM_CPU(cpuNum),*pAddr)) {              /* Bad physical address */              if (LATCH_BADVADDR(sr_reg)) EMP[cpuNum].CP0[C0_BADVADDR] = vAddr;              Em_EXCEPTION(cpuNum, EXC_DBE,0);              return EXCEPTION_CODE;            }            if (CPUVec.CheckFirewall)               goto firewall;                        return NORMAL_CODE;         } else {             	    /* TLB MODIFICATION */	    /* Page is not dirty and we want to write */	    /* Set up the CTxt register !!! */	    /* EPC set up in EXCEPTION, badVaddr and context */	    /* stored in macro */	    contextReg.tc_data = curEmp->CP0[C0_CTXT];	    contextReg.s32.tc_badvpn = VPN2;            xcontextReg.tc_data = curEmp->CP0[C0_XCTXT];            xcontextReg.s64.tc_region = region;#ifndef BIG_BIT_FIELD_BROKEN            xcontextReg.s64.tc_badvpn = VPN2;#else            xcontextReg.s64.tc_badvpn_hi3 = VPN2>>28;            xcontextReg.s64.tc_badvpn_lo28 = VPN2;#endif             EMP[cpuNum].CP0[C0_TLBHI] = (((Reg)region << TLBHI_REGIONSHIFT) |                      (VPN2 << TLBHI_VPN2SHIFT)  |                      ((Reg)myASID << TLBHI_PIDSHIFT)) ;            if (LATCH_BADVADDR(sr_reg)) EMP[cpuNum].CP0[C0_BADVADDR] = vAddr;            EMP[cpuNum].CP0[C0_CTXT] = contextReg.tc_data;            EMP[cpuNum].CP0[C0_XCTXT] = xcontextReg.tc_data;                         Em_EXCEPTION(cpuNum,EXC_MOD,0);            return EXCEPTION_CODE;         }      } else { 	 /* TLB INVALID */	 contextReg.tc_data = curEmp->CP0[C0_CTXT];	 contextReg.s32.tc_badvpn = VPN2;         xcontextReg.tc_data = curEmp->CP0[C0_XCTXT];         xcontextReg.s64.tc_region = region;#ifndef BIG_BIT_FIELD_BROKEN         xcontextReg.s64.tc_badvpn = VPN2;#else         xcontextReg.s64.tc_badvpn_hi3 = VPN2>>28;         xcontextReg.s64.tc_badvpn_lo28 = VPN2;#endif         EMP[cpuNum].CP0[C0_TLBHI] =  (((Reg)region << TLBHI_REGIONSHIFT) |                      (VPN2 << TLBHI_VPN2SHIFT)  |                      ((Reg)myASID << TLBHI_PIDSHIFT)) ;         EMP[cpuNum].CP0[C0_CTXT] = contextReg.tc_data;         EMP[cpuNum].CP0[C0_XCTXT] = xcontextReg.tc_data;         if (LATCH_BADVADDR(sr_reg)) EMP[cpuNum].CP0[C0_BADVADDR] = vAddr;         Em_EXCEPTION(cpuNum,writing ? EXC_WMISS : EXC_RMISS,0);         return EXCEPTION_CODE;      }    }    /* TLB REFILL      * Since there were no matching VPN2s, there is a TLB refill exception.     * First put the VPN2 and ASID of the non-matching address in Hi.     * The BadVAddr and Context registers also need to be set.         * 1.) set TLBL or TLBS(store only) code in cause register     * use EPC and BD bit in cause reg, inst or load, or store.     * 2.) BadVAddr, Context, XContect and EntryHi hold the vAddr     * that failed. Entry HI also has ASID. EPC pts to last instruction     * take care of branches.     */    contextReg.tc_data = curEmp->CP0[C0_CTXT];    contextReg.s32.tc_badvpn = VPN2;    xcontextReg.tc_data = curEmp->CP0[C0_XCTXT];    xcontextReg.s64.tc_region = region;#ifndef BIG_BIT_FIELD_BROKEN    xcontextReg.s64.tc_badvpn = VPN2;#else    xcontextReg.s64.tc_badvpn_hi3 = VPN2>>28;    xcontextReg.s64.tc_badvpn_lo28 = VPN2;#endif    /* BadVAddr register should be loaded */    if (LATCH_BADVADDR(sr_reg)) EMP[cpuNum].CP0[C0_BADVADDR] = vAddr;    EMP[cpuNum].CP0[C0_TLBHI] =   (((Reg)region << TLBHI_REGIONSHIFT) |                      (VPN2 << TLBHI_VPN2SHIFT)  |                      ((Reg)myASID << TLBHI_PIDSHIFT)) ;    EMP[cpuNum].CP0[C0_CTXT] = contextReg.tc_data;    EMP[cpuNum].CP0[C0_XCTXT] = xcontextReg.tc_data;    if (sr_reg & SR_EXL) {        Em_EXCEPTION(cpuNum,writing ? EXC_WMISS : EXC_RMISS, 0);    } else {           int isXRefill = 0;       if (((region == 0) && (sr_reg & SR_UX)) ||           ((region == 3) && (sr_reg & SR_KX)) ||           ((region == 1) && (sr_reg & SR_SX))) {          isXRefill = 1;       }       REFILL_EXCEPTION(cpuNum,writing ? EXC_WMISS : EXC_RMISS,                         act== ACT_IREAD,                        EMP[cpuNum].PC,                        vAddr, isXRefill);    }  return EXCEPTION_CODE;addrErr:    /* Illegal address - generate an address error */  if (LATCH_BADVADDR(sr_reg)) curEmp->CP0[C0_BADVADDR] = vAddr;  Em_EXCEPTION(cpuNum, (act==ACT_IREAD) ? EXC_IBE : EXC_DBE, 0);  return EXCEPTION_CODE;                                   bdoor:   {      void *dat;      uint flag;      int  rval;      if (vAddr == EMP[cpuNum].PC) {         *pAddr = (PA)RegistryGetSimFunction(vAddr);         if (*pAddr) return BACKDOOR_CODE;         /* no such function */         {            /* better only warn once because cpu can go into             * an infinite loop: the IBE handler is a bad             * backdoor address if BEV is set             */            static int warned = 0;            if (!warned) {               CPUWarning("%lld Bad backdoor ifetch to address 0x%x by cpu %d\n",                          (uint64)EmbraCpuCycleCount(cpuNum), vAddr, cpuNum);               warned = 1;               CPUWarning("Bad backdoor warning will not be repeated\n");            }         }         if (LATCH_BADVADDR(sr_reg)) EMP[cpuNum].CP0[C0_BADVADDR] = vAddr;         Em_EXCEPTION(cpuNum, EXC_IBE, 0);         return EXCEPTION_CODE;      }      if (!RegistryIsInRange(vAddr,&dat, &flag)) {         CPUWarning("%lld Bad backdoor reference to address 0x%x by cpu %d\n",                    (uint64)EmbraCpuCycleCount(cpuNum), vAddr, cpuNum);         if (LATCH_BADVADDR(sr_reg)) EMP[cpuNum].CP0[C0_BADVADDR] = vAddr;         Em_EXCEPTION(cpuNum, 			(act==ACT_IREAD) ? EXC_IBE : EXC_DBE, 			0);         return EXCEPTION_CODE;      }      if (flag & REG_DATA) {         *pAddr = (PA) dat;      } else {         Instruction instr;         Result returnFlag;	 /* NOTE: the following MUST be static, since for backdoor loads,	  * the value "loaded" from the backdoor is returned here (actually,	  * pAddr is set to point to this, and the caller fetches the	  * value, foolishly believing it's accessing mem.	  * This is a hack and should be fixed (yeah, when?).	  */         static uint64 buffer;         /* data addressed backdoor function: decode the simulated instruction to             find out the register number, and pass that value to backdoor function.            Only works for KSEG0 PCs, otherwise would have to translate the PC. */         ASSERT(flag & REG_FUNC);         if (!IS_KERNEL_MODE(&EMP[cpuNum])) {            CPUError("BACKDOOR ACCESS WHILE NOT IN KERNEL: pc=%#llx ra=%#llx addr=%#llx \n",                     (uint64)curEmp->PC,(uint64)curEmp->R[31],(uint64)vAddr);         }	 /*	  * XXX NOTE: is this the right place to update the current CPU??	  * I'm not sure, but this should work (with little perf impact),	  * at least for the backdoor.	  */	 /* caveat: we might be in a branch delay slot... */#ifdef EMBRA_USE_QC64         instr = *(Instruction *)Em_QC64Reload(CLEAR_BD(EMP[cpuNum].PC),                                               QC64_READ);#else         instr = *(Instruction *)K0_TO_MEMADDR(M_FROM_CPU(cpuNum),                                               CLEAR_BD(EMP[cpuNum].PC));#endif                  switch ( MAJOR_OPCODE(instr) ) {         case lb_op:	 case lbu_op:	    rval = ((MagicFunction)dat)(cpuNum,					vAddr, BDOOR_LOAD_BYTE, &buffer);	    returnFlag = rval ? BUSERROR : SUCCESS;            break;         case lh_op:	 case lhu_op:	    rval = ((MagicFunction)dat)(cpuNum,					vAddr, BDOOR_LOAD_HALF, &buffer);	    returnFlag = rval ? BUSERROR : SUCCESS;            break;         case lw_op:	    rval = ((MagicFunction)dat)(cpuNum,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -