⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 translator.c

📁 一个用在mips体系结构中的操作系统
💻 C
📖 第 1 页 / 共 5 页
字号:
     ECi(ADDR_ADDI_OP, A0, PC_REG, 0);     ECj(jal_op_, Em_HackedIPageMMUQC); /* Returns in SIM_T2 */     ECi(ori_op_,RA, SHADOW0, 0); /* Restore RA for chaining purpose */#else     if( embra.MPinUP ) {        /* This code uses the actual virtual address of the current */        /* process because we can't do the faster version for MPinMP. */        /* There is such strong incentive for all dynamically linked */         /* libraries to be mapped at the same VA, that we can simply */        /* say that each VA, PA pair has its own translated block.  This */        /* allows the faster check done below. */        ECsh( srl_op_, SIM_T2, PC_REG, NUM_OFFSET_BITS );        /* Word Align */        ECsh( sll_op_, SIM_T2, SIM_T2, 2 );        ECs(addu_op_, SIM_T2, SIM_T2, MMU_REG );        ECi(lw_op_, SIM_T2, SIM_T2, 0);     } else {                /* Entry point number 2 is for regular chaining.  It checks */        /* physical  address match */        Load_Op_Immed( lw_op, SIM_T2,                       (uint)&EMP[instrGrp->cpuNum].mmu[PAGE_NUMBER(instrGrp->virt_pc)] );     }#endif     Load_32_Bit_Immed( SIM_T1,  MA_TO_UINT(instrGrp->maPC)&0x7FFFF000 );     /* If the physical page corresponding to the present virtual page is */     /* not what we think know ours to be, then bail out.  */     /* Don't allow matches to pages which are set exclusive -- */     /* this implies we are executing of a page which was just */     /* written to */     ECilab( bne_op_, SIM_T1, SIM_T2, USE_LABEL(cont_run));     /* Branch delay instruction in user mode is Icache check for */     /* cache mode */     if (!SAME_PAGE_ENTRY)       SAME_PAGE_ENTRY = memptr - start;     /* SAME_PAGE_ENTRY: no need to check anything */     ASSERT (start + SAME_PAGE_ENTRY ==memptr);  } /* if (!IS_UNMAPPED_ADDR) */} /* Cache_Prelude_Chain_Check *//* Execution is equivalent to reading */static void I_Memory_Check( TransState *trans, int cpuNum, VA imm){   /* Note: A0 is set in instr_read_wrapper by clearing the lower bit */   /* of A2, and copying the result into A0 */   /* Note: A1 is set in instr_read_wrapper to MEM_I_SHARED */   /* Don't need an icache check in page mode because of prelude chain check*/   TCA temp=memptr;   trans->fp_tested = 0;   switch( embra.emode ) {   case EMBRA_CACHE:#if defined(SIM_MIPS64)      CPUError("Cache mode doesn't work with 64bit stuff\n");#endif       if (embra.useVQC){         SET_LABEL(rewind_iqc );         if( embra.MPinUP ) {            /* In this case need to use the qc_v register */            Load_32_Bit_Immed(SIM_T4, (uint)(ADDR2SLINE(imm)));            ECs(addu_op_, V0, SIM_T4, QC_REG);            ECi(lb_op_, SIM_T4, V0, 0);         } else {            /* Load Status byte into SIM_T4 */            Load_Op_Immed( lb_op, SIM_T4,                            (unsigned)&EMP[cpuNum].qc_v[ADDR2SLINE(imm)] );         }         /* ENSURE that this line is read-only.  That way we catch it if */         /* someone writes code, jumps to it, and eventually changes the */         /* code */          ECi(bgtz_op_, G0, SIM_T4, 7);	          /* Put PC into A0 */         ECi(ADDR_ADDI_OP, A0, PC_REG,              COMPOSE_PC(trans) - trans->instrGrp->virt_pc);         ECi(REG_ST_OP, A0, VSS_BASE, PC_OFF);         ECi(addiu_op_, A3, G0, trans->cycle_correction);         ECi(addiu_op_, A1, G0, MEM_I_SHARED);         /* Because we are in the same segment as TC, and we are jumping to */         /* an assembly routine, we can just use the bits directly */         if( embra.sequential ) {            /* Don't rewind */            ECi(addiu_op_, SIM_T2, G0, 0);         } else {            ECilab(addiu_op_, SIM_T2, G0, USE_LABEL_VALUE(rewind_iqc));         }         ECj(jal_op_, phys_mem_ref_wrapper);	 ECnop;	 VCTARGET;	 if (!I_QC_LEN)	   I_QC_LEN = memptr - temp;	 ASSERT(temp ==memptr-I_QC_LEN);      } else { /* !embra.useVQC */	static v_label_t mmu_or_cache_miss, cache_hit;         /* get offset into page at Translate Time */         unsigned offset = imm & (DEFAULT_PAGESZ-1);	 /* initialize labels */	 mmu_or_cache_miss = v_genlabel();	 cache_hit = v_genlabel();         if( embra.MPinUP ) {            /* In this case need to use the MMU_RELOC register */            Load_32_Bit_Immed(SIM_T4, (uint)(PAGE_NUMBER(imm)*                                             sizeof(EMP[cpuNum].mmu[0] )) );            ECs(addu_op_, SIM_T4, SIM_T4, MMU_REG);            ECi(lw_op_, SIM_T4, SIM_T4, 0);         } else {            /* Load Relocated page into ST4 */                     Load_Op_Immed( lw_op, SIM_T4,                            ((unsigned)&EMP[cpuNum].mmu[PAGE_NUMBER(imm)]));         }         /* ENSURE that this page is read-only.  That way we catch it if */         /* someone writes */          /* code, jumps to it, and eventually changes the code */	 /* MMU hit -> check physarray */         ECs(and_op_, SIM_T4, MMUMASK_REG, SIM_T4); /* clear prot bit of MMU entry */	 v_bleii(VREGS[SIM_T4], 0, mmu_or_cache_miss);	 ECnop;         /* add offset to MA page # */         ECi(ori_op_,SIM_T4,SIM_T4,offset);           ECsh(srl_op_,SIM_T4,SIM_T4,log2SCACHE_LINE_SIZE);         /* physical line number : ST1 , ST2 = PA_REG + ST2 */         ECs( addu_op_, SIM_T4, SIM_T4, PA_REG );            ECi( lb_op_, SIM_T4, SIM_T4, 0 );/* Load PA entry byte into ST2 */	 v_bneii(VREGS[SIM_T4], 0, cache_hit);		/* branch on PA hit */	 	 v_label(mmu_or_cache_miss);	          ECi(ADDR_ADDI_OP, A0, PC_REG,              COMPOSE_PC(trans) - trans->instrGrp->virt_pc);         ECi(REG_ST_OP, A0, VSS_BASE, PC_OFF);          /* Because we are in the same segment as TC, and we are jumping to */         /* an assembly routine, we can just use the bits directly */         /* Correct the cycle count */         ECi(addiu_op_, A3, G0, trans->cycle_correction);         ECi(addiu_op_, A1, G0, MEM_I_SHARED);         /* This instruction must be in the delay slot so the USE_LABEL will */         /* work correctly */         if(embra.sequential ) {            /* In embra.MPinUP no need to rewind quick check */            ECi(addiu_op_, SIM_T2, G0, 0);         } else {            ECilab(addiu_op_, SIM_T2, G0, USE_LABEL_VALUE(rewind_dqc));         }         ECj(jal_op_, pa_mem_ref_wrapper);	 ECnop;	 v_label(cache_hit);	 if (!I_PA_LEN)	   I_PA_LEN = memptr - temp;	          ASSERT(temp==(memptr-I_PA_LEN));      }      break;   case EMBRA_PAGE:#ifndef EMBRA_USE_QC64     if( embra.MPinUP ) {         /* In this case need to use the MMU_RELOC register */         Load_32_Bit_Immed(SIM_T4, (uint)(PAGE_NUMBER(imm)*                                          sizeof(EMP[cpuNum].mmu[0] )) );         ECs(addu_op_, SIM_T4, SIM_T4, MMU_REG);         ECi(lw_op_, SIM_T4, SIM_T4, 0);      } else {         /* Load Relocated page into SIM_T4 */         Load_Op_Immed( lw_op, SIM_T4,                         (unsigned)&EMP[cpuNum].mmu[PAGE_NUMBER(imm)]);      }      /* ENSURE that this page is read-only.  That way we catch it if */      /* someone writes */       /* code, jumps to it, and eventually changes the code */      ECi(bgtz_op_, G0, SIM_T4, 6);#else     ECnop;     ECj(jal_op_, Em_HackedIPageMMUQC); /* Returns in SIM_T2 */     ECi(ADDR_ADDI_OP, A0, PC_REG, imm - trans->instrGrp->virt_pc);     ECi(bgtz_op_, G0, SIM_T2, 5 );#endif      ECi(ADDR_ADDI_OP, A2, PC_REG,           COMPOSE_PC(trans) - trans->instrGrp->virt_pc);      ASSERT(COMPOSE_PC(trans)!=0);      ECi(REG_ST_OP, A2, VSS_BASE, PC_OFF);      ECi(addiu_op_, A1, G0, MEM_I_SHARED);      ECi(addiu_op_, A3, G0, trans->cycle_correction);      ECj(jal_op_, mem_ref_wrapper);      ECnop;            VCTARGET;      break;   }}/* Select which cache to use for a given vAddr based on its location and * the SR mode */#define TC_CACHE(vAddr) (IS_USER(vAddr) && \			 ((EMP[cpuNum].CP0[C0_SR] & SR_KSU_MSK) == \			  SR_KSU_USR) \			 ?  TC_USER : TC_KERN)  /* ********************************************************************** * FindTCA * * Called by ChainBB. Might have side-effects and might not return. * **********************************************************************/TCA FindTCA(VA vAddr, MA mAddr, int cpuNum){   int tcCache = TC_CACHE(vAddr);      if( !mAddr ) {      uint tvRes;      PA pa;      /* Must be user code */      tvRes = Em_TranslateVirtual( cpuNum, vAddr, &pa, ACT_IREAD);      if( tvRes == BACKDOOR_CODE ) {         /* Let Translate emulate this call */         STAT_INC( pc_tc_bdoor_misses );         STAT_INC( pc_tc_lookup_misses );         /* Do this here so we don't chain backdoor addresses */         mem_translate( cpuNum, vAddr );         /* NOT REACHED */         ASSERT(0);         return 0;      } else if (tvRes == NORMAL_CODE) {         /* this might not be appropriate for cache mode */         CPUError("EMBRA: pc_tc_lookup error: PC=0x%x \n",vAddr);      } else {         if( tvRes == EXCEPTION_CODE ) {            ReenterTC(&EMP[cpuNum]);            /* NOT REACHED */         }      }      mAddr = PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pa);      ASSERT( tvRes == NORMAL_CODE );   } else {        ASSERT( EMBRA_IS_MEMADDR(M_FROM_CPU(cpuNum), mAddr));   }      ASSERT( IS_KSEG0(vAddr) || IS_KUSEG(vAddr) || IS_KSEG2(vAddr));   return TC_PCLookup(tcCache,vAddr,mAddr);}TCA FindTCA_NoSE(int cpuNum, VA vAddr){   MA mAddr;   int tcCache;   K0A k0A = non_excepting_tv(cpuNum, vAddr);   if (!k0A) return 0;   mAddr = K0_TO_MEMADDR(M_FROM_CPU(cpuNum),k0A);   if (!mAddr) return 0;   tcCache = TC_CACHE(vAddr);   return TC_PCLookup(tcCache,vAddr,mAddr);}/* *********************************************************************** * FindEntryPoint *  * This is called on non-speculative chainings only, either at translation * time (direct-chaining) or at execution time (delayed chaining) * **********************************************************************/TCA FindEntryPoint(VA startBB, VA endBB, VA target, TCA targetTrans ){   /*    * the offsets are defined in bytes, not instructions    */  TCA to = targetTrans +  CHECK_MAPPING_ENTRY;   int success = 1;      /* SAME_PAGE_ENTRY only exists for user code (because its mapped) */   if( !IS_UNMAPPED_ADDR( target ) ) {      STAT_INC(chain_user_chains);      if( PAGE_NUMBER(startBB) == PAGE_NUMBER(target) &&           PAGE_NUMBER(endBB)==PAGE_NUMBER(target) ) {         STAT_INC(chain_samepg_chains);         to += SAME_PAGE_ENTRY;       } else {          /*          * just being anal, but we cannot afford to have this          * chaining bypass fail and the next one (cache mode stuff)          * succeed          */         success = 0;         }   }   if( success &&        embra.emode == EMBRA_CACHE &&        ADDR2SLINE(startBB)   == ADDR2SLINE(target) &&        ADDR2SLINE(endBB) == ADDR2SLINE(target)) {      STAT_INC(chain_sameln_chains);      to += IREF_LEN;   }#ifdef BROKENNOW_KEEP_FOR_LATER   /*    * this just checks that the entry point has     * an "expected" opcode.  Unfortunately, this is now broken     */      ASSERT((*(unsigned*)to & 0xffff0000) == 0x26940000 ||          (*(unsigned*)to & 0xfc000000) == 0x3c000000 ||          (*(unsigned*)to & 0xfc0007ff) == 0x00000302);#endif            return  to;}                      /* ******************************************************************* * ChainBasicBlock  *  * This function is called from the TC through the continue_run and * continue_run_without_chaining wrappers. * * The purpose of this function is to determine the translation of the * target BB and to patch the last instruction of the chain-from BB * to bypass ChainBasicBlock * (one time self-modifying code) * * On entry, jump_addr is the address of the jump at the end of the * previous basic block, and new_pc is the (simulated) PC of the * target basic block that we are jumping to. * * *******************************************************************//* Chaining note: if the cost of cacheflushing is too high, the direct *//* jal instruction can be replaced with a load (from an area which can *//* be overwritten) and jump.  *//* This function is called from continue_run, and that procedure does *//* some work for it */ /* these macros are going to be machine-dependent */#define MIPS_CHANGE_JUMP_TARGET(jump_addr, new_target) \   *(unsigned*)jump_addr = ComposeJump( jal_op, (uint)new_target>>2 )#define MIPS_IS_JUMP(jump_addr) \     ((*(unsigned*)jump_addr & 0xfc000000) == 0x0c000000)/* XXX-XXX XXX *//* Note this check is totally dependent on what is done in *//* Update_PC. Here we check to see if the upper 11 bits of the *//* previous instruction are non-0. If they are, then we know *//* that this can't be a register indirect jump because they*/ /* put an OR with rs==0 in the delay slot */#define MIPS_ISNT_REGIND(jump_addr) (*(jump_addr-1) & 0xffe00000 )TCA ChainBasicBlock( TCA jump_addr, VA new_pc)/* *        register TCA to,  *         register VA old_pc, *         register VA new_pc ) */{#ifdef EMBRA_USE_QC64   MA newMA = (MA) Em_QC64Reload(new_pc,QC64_READ);   TCA targetTCA = FindTCA(curEmp->PC,newMA, curEmp->myNum);   TCA chainTCA;#else   MA mapping    = curEmp->mmu[PAGE_NUMBER(curEmp->PC)];   MA newMA      = (mapping? mapping  + PAGE_OFFSET(curEmp->PC):0);   TCA targetTCA = FindTCA(curEmp->PC,MMU_PROT_READ(newMA),curEmp->myNum);   TCA chainTCA;#endif      ASSERT( new_pc == curEmp->PC);   ASSERT( new_pc > 0x100000); /* XXX for debugging */      if (!targetTCA) {      int has_flushed;      targetTCA = Translate(curEmp->myNum,curEmp->PC, &has_flushed);      SyncInstr();            /* Translate flushed the TC, so the jump we're supposed to patch up is       * not a valid instruction anymore.       */      if (has_flushed) return targetTCA;   }    /*    * make sure that the qc data structures are ok    */   if (embra.emode==EMBRA_CACHE && embra.useVQC) {      ASSERT( !VQC_EXCL(curEmp->qc_v[ADDR2SLINE(curEmp->PC)]));   } else { 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -