📄 main_run.c
字号:
CORRECT_INCOMING_CC; LOAD_COMMON_VARS; v_ldui(ra, vss_base, JUMPPC_OFF); /* v_jp(ra); */ return v_end(used).v;}/* This is preserved for Historical purposes */#ifdef EMMETT_TOO_INTENSE{ /* We know we are not in a delay slot */ sw PC_REG, PC_OFF(VSS_BASE) sw ra, JUMPPC_OFF(VSS_BASE) lw a0, callout_interval /* EMP[0].cycleCountdown += callout_interval */ addu CLOCK_REG, a0 /* Load lo end of cycle count */ lw t1, LOCC_OFF(VSS_BASE) /* EMP[curr_cpu].cycleCount += timeQuantum */ /* Actually increase clock by amount used in previous quantum */ addu v0, t1, a0 /* If that overflows (changes sign bit which really represents both overflow and transition from 31 to 32 bit quantity, but who really cares), then do it the long way */ xor t1, v0 bltzal t1, IncCCasm /* Both long and short paths store lo part of cycle count back */ sw v0, LOCC_OFF(VSS_BASE) /* Load callback time ptr */ lw t0, CALL_OFF(VSS_BASE) /* Load hi end of callback time */ lw t1, 0(t0) /* Load hi end of cycle count */ lw t2, HICC_OFF(VSS_BASE) /* If the most signifigant word of the two do not agree, bail */ bne t1, t2, 1f /* Load lo end of callback time */ lw t1, 4(t0) /* Load lo end of cycle count */ lw t2, LOCC_OFF(VSS_BASE) /* t3 = cyclecount - callbacktime */ subu t3, t2, t1 /* If the callback time is greater, then likely no callback is pending */ bltz t3, 2f1: SPILL_FP_IF_ENABLED CORRECT_OUTGOING_CC(zero) jal CEmEventPoll /* Certified FP free so return is safe */ /* Other Sregs the same b/c this is normal call/return */2: CORRECT_INCOMING_CC LOAD_COMMON_VARS lw ra, JUMPPC_OFF(VSS_BASE) j ra}#endif vfptr GenEmbra_CX(char *buf, int size, int *used){ static v_label_t IncCC_label; v_lambda("Embra_CX", "", NULL, V_LEAF, (v_code *) buf, size); IncCC_label = v_genlabel(); /* Context switch for MP_IN_UP */ /* Ensure that clock and pc are up to date for prev CPU*/ v_stui(ra, vss_base, JUMPPC_OFF); REG_ST_OP(pc_reg, vss_base, PC_OFF); /* EMP[curr_cpu].cycleCountdown += timeQuantum */ v_ldui(t0, vss_base, TQ_OFF); v_addu(clock_reg, clock_reg, t0); v_stui(clock_reg, vss_base, CCD_OFF); v_stui(dhit_reg, vss_base, SDHIT_OFF); v_stui(ihit_reg, vss_base, SIHIT_OFF); /* Load lo end of cycle count */ v_ldui(t1, vss_base, LOCC_OFF); /* EMP[curr_cpu].cycleCount += timeQuantum */ /* Actually increase clock by amount used in previous quantum */ v_addu(v0, t1, t0); /* If that overflows (changes sign bit which really represents both overflow and transition from 31 to 32 bit quantity, but who really cares), then do it the long way */ v_xoru(t1, t1, v0); /* if LT, jump to IncCCasm */ v_bgeii(t1, 0, IncCC_label); ASSERT(IncCCasm != 0); v_jalpi(ra, IncCCasm); v_label(IncCC_label); /* Both long and short paths store lo part of cycle count back */ v_stui(v0, vss_base, LOCC_OFF); SPILL_FP_IF_ENABLED /* VSS_BASE = VSS_BASE->next */ v_ldui(vss_base, vss_base, NEXT_OFF); v_setl(t1, ((unsigned) &curEmp)); v_stui(vss_base, t1, 0); /* Load S registers */ LOAD_COMMON_VARS v_ldui(clock_reg, vss_base, CCD_OFF); v_ldui(dhit_reg, vss_base, SDHIT_OFF); v_ldui(ihit_reg, vss_base, SIHIT_OFF); REG_LD_OP(pc_reg, vss_base, PC_OFF); v_ldui(t9, vss_base, JUMPPC_OFF); ASSERT(Embra_CX_nochain != 0); v_jpi(Embra_CX_nochain); v_nop(); return v_end(used).v;}vfptr GenEmbra_CX_nochain(char *buf, int size, int *used){ v_lambda("Embra_CX_nochain", "", NULL, V_LEAF, (v_code *) buf, size); /* XXX BL: I think this whole scheme is broken now... */ /* XXX - By doing a jal here, we can catch the case where cpu 0 calls the CX code, and the jumpPC contained in cpu 1 is to a block which was chained speculatively and is actually bogus. In this case, the jal will set the RA to continue_run_without_chaining which we will check in Chain_To */ v_jalp(ra,t9); v_nop(); return v_end(used).v;}extern void SyncInstr(void);vfptr Gencontinue_run_without_chaining(char *buf, int size, int *used){ v_lambda("continue_run_without_chaining", "", NULL, V_LEAF, (v_code *) buf, size); /* XXX BL: I think this may be broken as well now that we * are using the setjmp/longjmp method of entry, but whatever... * it doesn't crash at the moment. */ /* Pretend we have a frame, because we leave 32w at top of the */ /* stack for this purpose */ v_stui(ra, sp, STOFF_RA); OUT_OF_TC#ifdef PRINT_PC /* Something like this may be useful in the future for MP debugging */ lw a0, MYNUM_OFF(VSS_BASE) move a1, PC_REG jal print_pc ;#endif REG_ST_OP(pc_reg, vss_base, PC_OFF); CORRECT_OUTGOING_CC(zero); SPILL_FP_IF_ENABLED; /* * No chaining a0==0 */ v_setu(a0, 0);#ifdef SIM_MIPS64 v_movl(a1, pc_reg);#else v_movi(a1, pc_reg);#endif /* XXX UGH!! t9 convention... what to do??? */ v_setl(t9, ((unsigned) ChainBasicBlock)); v_jalpi(ra,ChainBasicBlock); /* XXX HELP!! How do we do this?? Need new vcode operation... */ /* sync */ v_jalpi(ra, SyncInstr); /* actually only need to restore the common variables,not the volatiles */ LOAD_COMMON_VARS CORRECT_INCOMING_CC ENTERING_TC v_jp(v0); v_nop(); return v_end(used).v;}vfptr Gencontinue_run(char *buf, int size, int *used){ v_lambda("continue_run", "", NULL, V_LEAF, (v_code *) buf, size); v_stui(ra, sp, STOFF_RA); OUT_OF_TC REG_ST_OP(pc_reg, vss_base, PC_OFF); CORRECT_OUTGOING_CC(zero); SPILL_FP_IF_ENABLED v_ldui(a0, sp, STOFF_RA); v_addui(a0, a0, -2*INST_SIZE);#ifdef SIM_MIPS64 v_movl(a1, pc_reg);#else v_movi(a1, pc_reg);#endif /* xxx ugh - more t9 */ v_setl(t9, ((unsigned )ChainBasicBlock)); v_jalp(ra, t9); LOAD_COMMON_VARS CORRECT_INCOMING_CC ENTERING_TC v_jp(v0); v_nop(); return v_end(used).v;}/* ********************************************************* * SpillFP and RestoreFP can only be called from assembly * as they use VSS_BASE * !!! Both functions smash SIM_T4. Beware of that one. * *********************************************************/vfptr GenSpillFP(char *buf, int size, int *used){ int i; v_lambda("SpillFP", "", NULL, V_LEAF, (v_code *) buf, size); /* Save the FP registers */ for (i = 0; i <= 31; i++) { /* XXX This is truly hosed */#ifdef SIM_MIPS64 VC_sdc1_op_(FVREGS[i], vss_base, i*8 + FP_OFF);#else v_stfi(FVREGS[i], vss_base, i*4 + FP_OFF);#endif } /* we don't use the real FC registers any more, so we don't need to do the following: cfc1 SIM_T4, $0 sw SIM_T4, 0*4+FCR_OFF( FPVSS_BASE ) cfc1 SIM_T4, $30 sw SIM_T4, 30*4+FCR_OFF( FPVSS_BASE ) cfc1 SIM_T4, $31 sw SIM_T4, 31*4+FCR_OFF( FPVSS_BASE ) */ /* v_jp(ra); */ return v_end(used).v;}vfptr GenRestoreFP(char *buf, int size, int *used){ int i; v_lambda("RestoreFP", "", NULL, V_LEAF, (v_code *) buf, size); /* Load the FP registers */ for (i = 0; i <= 31; i++) { /* XXX This is truly hosed */#ifdef SIM_MIPS64 VC_ldc1_op_(FVREGS[i], vss_base, i*8 + FP_OFF);#else v_ldfi(FVREGS[i], vss_base, i*4 + FP_OFF);#endif } /* We don't use the real FCR's, so we don't do this any more: Load floating point control registers lw SIM_T4, 0*4+FCR_OFF( FPVSS_BASE ) ctc1 SIM_T4, $0 lw SIM_T4, 30*4+FCR_OFF( FPVSS_BASE ) ctc1 SIM_T4, $30 lw SIM_T4, 31*4+FCR_OFF( FPVSS_BASE ) ctc1 SIM_T4, $31 */ /* v_jp(ra); */ return v_end(used).v;} /* This is sad; we have no way of doing this as yet... LEAF(SyncInstr) sync j ra END(SyncInstr)*//* XXX NOTE: * I am inserting the callout stuff here for now because * it is convenient to have it all in one file.... * maybe in the future we can come up with a nice way * of doing this.... *//* CALLOUT CONVENTION: a0 - contains the cpunumber a1 - function specific parameter a2 - function specific parameter a3 - for loads and stores, number of cycles left in the basic block All functions do Em_PE[a0].pc = a1; as one of the first operations*/extern void Em_MoveFromC0(int cpuNum, Inst instr); /* r4k_cp0.c */extern void Em_ExceptionReturn(int cpuNum);extern void Em_CacheOP(int cpuNum, Inst instr);/* XXX Should really get the type checking right here... */vfptr target_table[] = { 0, /* do_periodic_callout */ (vfptr) Em_ReadTLBEntry, /* TLBR */ (vfptr) Em_WriteTLBEntry, /* TLBW */ (vfptr) Em_WriteRandomTLBEntry, /* TLBWR */ (vfptr) Em_ProbeTLB, /* TLBP */ (vfptr) Em_RestoreFromException, /* RFE */ (vfptr) Em_ExceptionReturn, /* ERET */ (vfptr) Em_RaiseC1Unusable, (vfptr) Em_MoveToC0, (vfptr) Em_RaiseEXCEPTION, (vfptr) TNS, (vfptr) Embra_SimosDebugBreak, (vfptr) Embra_SimosKernDebugBreak, (vfptr) Embra_DoAnn, 0, /*SimulatorLock*/ 0, /*SimulatorUnlock*/ (vfptr) CountFP, (vfptr) Em_MoveFromC0, /* MFC0 */ (vfptr) Embra_DoPrePCAnn, (vfptr) Em_CacheOP};/* Gencallout: generate code for callouts from translated code */vfptr Gencallout(char *buf, int size, int *used){ v_lambda("callout", "", 0, V_LEAF, (v_code *) buf, size); /* Notice that we CORRECT_CC this is a small expense, but it allows callout routines to return to the translation cache via ReenterTC. This (specifically) allows MoveToC0 to uncover and take an interrupt */ OUT_OF_TC; STACK_SAVE_REGS; SAVE_SHADOW_REGS; CORRECT_OUTGOING_CC(a3); SPILL_FP_IF_ENABLED; /* XXX - delete ?? */ v_ldui(a0, vss_base, MYNUM_OFF); /* XXX It's that old t9 convention... THIS WILL HAVE TO CHANGE - BL * Note also that vcode should have a way to access jump tables of * C functions; */ v_setl(t8, ((unsigned) target_table)); v_addu(t9, t8, sim_t2); v_ldui(t9, t9, 0); v_jalp(ra, t9); CORRECT_INCOMING_CC; STACK_RESTORE_REGS; RESTORE_SHADOW_REGS; ENTERING_TC; /* j ra */ return v_end(used).v;}/* Genmem_ref_wrapper: generate memory reference wrapper; * this is an extremely important function */mr_ptr Genmem_ref_wrapper(char *buf, int size, int *used){ v_lambda("mem_ref_wrapper", "", 0, V_LEAF, (v_code *) buf, size); /* a0 - address of reference (except instr case which is blank) a1 - new_state a2 - a3 - cycle count correction */ /* THESE functions are wrappers around memory accesses */ /* There are two versions for each function, one that */ /* uses the physically indexed quick check (needed for */ /* cache mode, and possible performance), and one that doesn't */ STACK_SAVE_REGS; SAVE_SHADOW_REGS; CORRECT_OUTGOING_CC(a3); SPILL_FP_IF_ENABLED; v_ldui(a2, vss_base, MYNUM_OFF); v_setl(t9, (unsigned) mem_ref); v_jalp(ra, t9); v_movu(sim_t1, v0); /* translated address expected in sim_t1 */ CORRECT_INCOMING_CC; STACK_RESTORE_REGS; RESTORE_SHADOW_REGS; /* j ra */ return ((mr_ptr) v_end(used).v);}mar_ptr Genphys_mem_ref_wrapper(char *buf, int size, int *used){ static v_label_t pmr_label; v_lambda("phys_mem_ref_wrapper", "", 0, V_LEAF, (v_code *) buf, size); pmr_label = v_genlabel(); /* a0 contains virtual address of reference, a2 contains pc */ /* SIM_T1 contains the K0 address of reference, if possible */ /* SIM_T2 contains the rewind amount */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -