📄 locore.s
字号:
* possible break instructions that emulate_instr() laid down. If it * is one of those two break instructions set the resulting pc and * branch back to the caller of emulate_instr(). See emulate_instr() * for the interface of how and where all this happens. */ lw a2,u+PCB_BD_RA beq a2,zero,VEC_trap # handle as a trap lw k1,bd_nottaken_bp # check for the not taken branch bp bne k0,k1,3f or a3,s0,SR_IEC # enable interrupts .set noreorder nop mtc0 a3,C0_SR nop .set reorder sw zero,u+PCB_BD_RA # clear the branch delay emulation lw a3,u+PCB_BD_EPC # the resulting pc in this case is just addu a3,8 # the pc of the next instruction after # delay slot j a2 # return to caller of emulate_instr()bd_nottaken_bp: break BRK_BD_NOTTAKEN3: lw k1,bd_taken_bp # check for the taken branch bp bne k0,k1,VEC_trap # handle as a trap or a3,s0,SR_IEC # enable interrupts .set noreorder nop mtc0 a3,C0_SR nop .set reorder sw zero,u+PCB_BD_RA # clear the branch delay emulation lw a3,u+PCB_BD_EPC # the resulting pc in this case is the lw a1,u+PCB_BD_INSTR # the target of the emulated branch sll a1,16 # so add the sign extended offset to sra a1,16-2 # branch's pc for the resulting pc addu a3,a1 addu a3,4 j a2 # return to caller of emulate_instr()bd_taken_bp: break BRK_BD_TAKEN4: and k0,s0,SR_KUP beq k0,zero,5f # breakpoint from kernel mode lw k0,u+PCB_CPUPTR # get cpudata pointer sw zero,CPU_KSTACK(k0) lw gp,EF_GP*4(sp)5: lw a0,EF_A0*4(sp) lw a1,EF_A1*4(sp) lw a2,EF_A2*4(sp) lw a3,EF_A3*4(sp) lw s0,EF_S0*4(sp) lw ra,EF_RA*4(sp) lw k1,EF_SR*4(sp) .set noreorder nop mtc0 k1,C0_SR nop .set reorder lw k1,EF_K1*4(sp) lw k0,EF_AT*4(sp) # save AT in k0 lw sp,EF_SP*4(sp) .set noat lw AT,+RB_BPADDR # address of breakpoint handler j AT # enter breakpoint handler .set atkernelbp: break BRK_KERNELBP END(VEC_breakpoint)EXPORT(sstepbp) break BRK_SSTEPBP/* * Coprocessor unusable fault */VECTOR(VEC_cpfault, M_EXCSAVE) .set noreorder or a1,s0,SR_IEC # enable interrupts mtc0 a1,C0_SR .set reorder and a1,s0,SR_KUP beq a1,zero,coproc_panic # kernel tried to use coprocessor and a1,a3,CAUSE_CEMASK srl a1,CAUSE_CESHIFT bne a1,1,coproc_not1 # not coproc 1#ifdef ASSERTIONS and a1,s0,SR_IBIT6 bne a1,zero,1f # fp interrupts must be enabled! PANIC("VEC_cpfault")1:#endif ASSERTIONS /* * This is the floating-point coprocessor (coprocessor 1) unusable * fault handling code. During auto configuration fptype_word * is loaded from the floating-point coprocessor revision word or * zeroed if there is no floating-point coprocessor. */ sw gp,u+PCB_OWNEDFP # mark that fp has been touched lw a2,fptype_word # check for what type of fp coproc bne a2,zero,1f j softfp_unusable # no fp coproc (goto fp software)1: or a1,s0,SR_CU1 # enable coproc 1 for the user process sw a1,EF_SR*4(sp) lw a2,u+PCB_CPUPTR # get cpudata pointer lw a2,CPU_FPOWNER(a2) # current coproc 1 (fp) owner lw a1,u+U_PROCP # current process executing beq a2,a1,coproc_done # owned by the current process or a3,s0,SR_CU1|SR_IEC # enable fp and interrupts .set noreorder nop mtc0 a3,C0_SR nop .set reorder beq a2,zero,fp_notowned # coproc 1 not currently owned /* * Owned by someone other than the current process. * Save state (into the fpowner) before taking possession. */#ifdef ASSERTIONS lw a3,P_SCHED(a2) and a3,SLOAD bne a3,zero,1f PANIC("VEC_cpfault swapped out")1:#endif ASSERTIONS lw a3,P_ADDR(a2) # address of u page ptes lw a3,0(a3) # u page pte and a3,PG_PFNUM # isolate physical address of u page sll a3,8 # abstract format to R2000/R3000 or a3,K0BASE # change to virtual address#define SAVECP1REG(reg) \ swc1 $f/**/reg,PCB_FPREGS+reg*4(a3) /* * The floating-point control and status register must be * read first to force all fp operations to complete and insure * that all fp interrupts for this process have been delivered */ .set noreorder cfc1 a2,fpc_csr nop sw a2,PCB_FPC_CSR(a3) cfc1 a2,fpc_eir nop sw a2,PCB_FPC_EIR(a3) SAVECP1REG(31); SAVECP1REG(30); SAVECP1REG(29); SAVECP1REG(28) SAVECP1REG(27); SAVECP1REG(26); SAVECP1REG(25); SAVECP1REG(24) SAVECP1REG(23); SAVECP1REG(22); SAVECP1REG(21); SAVECP1REG(20) SAVECP1REG(19); SAVECP1REG(18); SAVECP1REG(17); SAVECP1REG(16) SAVECP1REG(15); SAVECP1REG(14); SAVECP1REG(13); SAVECP1REG(12) SAVECP1REG(11); SAVECP1REG(10); SAVECP1REG(9); SAVECP1REG(8) SAVECP1REG(7); SAVECP1REG(6); SAVECP1REG(5); SAVECP1REG(4) SAVECP1REG(3); SAVECP1REG(2); SAVECP1REG(1); SAVECP1REG(0) .set reorder /* Make the process affinity ALLCPU */ lw a2,u+PCB_CPUPTR # get cpudata pointer lw a2,CPU_FPOWNER(a2) # current coproc 1 (fp) owner la a3,ALLCPU # affinity of ALLCPU sw a3,P_AFFINITY(a2) # change affinity of currrent fpowner # to all cpus. What if there was # some other affinity restriction # before?? lw a2,smp # if smp defined beq a2,zero,fp_notowned # jal tfi_save jal alert_cpu # inform every processor jal tfi_restore lw a1,u+U_PROCP # restore current process in a1fp_notowned: /* * restore coprocessor state (from the current process) */ .set noreorder li a3,u#define RESTCP1REG(reg) \ lwc1 $f/**/reg,PCB_FPREGS+reg*4(a3) or a2,s0,SR_CU1 mtc0 a2,C0_SR # disable interrupts, fp enabled nop # before we can really use cp1 nop # before we can really use cp1 RESTCP1REG(0); RESTCP1REG(1); RESTCP1REG(2); RESTCP1REG(3) RESTCP1REG(4); RESTCP1REG(5); RESTCP1REG(6); RESTCP1REG(7) RESTCP1REG(8); RESTCP1REG(9); RESTCP1REG(10); RESTCP1REG(11) RESTCP1REG(12); RESTCP1REG(13); RESTCP1REG(14); RESTCP1REG(15) RESTCP1REG(16); RESTCP1REG(17); RESTCP1REG(18); RESTCP1REG(19) RESTCP1REG(20); RESTCP1REG(21); RESTCP1REG(22); RESTCP1REG(23) RESTCP1REG(24); RESTCP1REG(25); RESTCP1REG(26); RESTCP1REG(27) RESTCP1REG(28); RESTCP1REG(29); RESTCP1REG(30); RESTCP1REG(31) ctc1 zero,fpc_csr lw a2,PCB_FPC_EIR(a3) nop ctc1 a2,fpc_eir lw a2,PCB_FPC_CSR(a3) nop ctc1 a2,fpc_csr nop lw a2,u+PCB_CPUPTR # get cpudata pointer nop sw a1,CPU_FPOWNER(a2) # we now own fp lw a2,CPU_MASK(a2) # cpu mask for this cpu nop sw a2,P_AFFINITY(a1) # change the process's affinity to # the current cpu mtc0 s0,C0_SR # disable interrupt and clear SR_CU1 .set reorder b exception_exitcoproc_done: .set noreorder mtc0 s0,C0_SR # disable interrupts .set reorder b exception_exitcoproc_not1: li a1,SEXC_CPU # handle as software trap b VEC_trap # not soft_trap, must save regs yetcoproc_panic: PANIC("kernel used coprocessor") END(VEC_cpfault)/* * checkfp(procp, exiting) * procp = proc pointer of process exiting or being swapped out. * exiting = 1 if exiting. * Called from exit and swapout to release FP ownership. */LEAF(checkfp) lw v0,u+PCB_CPUPTR # get cpudata pointer lw v0,CPU_FPOWNER(v0) # current coproc 1 (fp) owner bne a0,v0,2f # not owned by us, just return bne a1,zero,1f # exiting, don't save state lw a3,fptype_word beq a3,zero,1f # no fp coprocessor lw a3,P_ADDR(a0) # address of u page ptes lw a3,0(a3) # u page pte and a3,PG_PFNUM # isolate physical address of u page sll a3,8 # abstract format to R2000/R3000 or a3,K0BASE # change to virtual address /* * The floating-point control and status register must be * read first so to stop the floating-point coprocessor. */ .set noreorder mfc0 v1,C0_SR # enable coproc 1 for the kernel nop or v0,v1,SR_CU1 mtc0 v0,C0_SR # PE BIT nop # before we can really use cp1 nop # before we can really use cp1 cfc1 v0,fpc_csr nop sw v0,PCB_FPC_CSR(a3) cfc1 v0,fpc_eir nop sw v0,PCB_FPC_EIR(a3) SAVECP1REG(31); SAVECP1REG(30); SAVECP1REG(29); SAVECP1REG(28) SAVECP1REG(27); SAVECP1REG(26); SAVECP1REG(25); SAVECP1REG(24) SAVECP1REG(23); SAVECP1REG(22); SAVECP1REG(21); SAVECP1REG(20) SAVECP1REG(19); SAVECP1REG(18); SAVECP1REG(17); SAVECP1REG(16) SAVECP1REG(15); SAVECP1REG(14); SAVECP1REG(13); SAVECP1REG(12) SAVECP1REG(11); SAVECP1REG(10); SAVECP1REG(9); SAVECP1REG(8) SAVECP1REG(7); SAVECP1REG(6); SAVECP1REG(5); SAVECP1REG(4) SAVECP1REG(3); SAVECP1REG(2); SAVECP1REG(1); SAVECP1REG(0) ctc1 zero,fpc_csr # clear any pending interrupts mtc0 v1,C0_SR # disable kernel fp access nop .set reorder1: lw v0,u+PCB_CPUPTR # get cpudata pointer sw zero,CPU_FPOWNER(v0) # Mark FP as unowned lw a1,CPU_FPE_EVENT(v0) # if psiganl call pending bne a1,zero,3f # then don't change affinity. this # will be done after psignal is called # in softnet. la a1,ALLCPU # change affinity to ALLCPU sw a1,P_AFFINITY(a0)3: lw a1,u+U_PROCP bne a1,a0,2f # not current process lw a1,KERNELSTACK-EF_SIZE+(4*EF_SR) # current user's sr and a1,~SR_CU1 # clear fp coprocessor usable bit sw a1,KERNELSTACK-EF_SIZE+(4*EF_SR)2: j ra END(checkfp)/* * tfi_save -- save enough state so that C routines can be called */LEAF(tfi_save) sw v0,EF_V0*4(sp) sw v1,EF_V1*4(sp) sw t0,EF_T0*4(sp) mflo t0 sw t1,EF_T1*4(sp) mfhi t1 sw t2,EF_T2*4(sp) sw t3,EF_T3*4(sp) sw t4,EF_T4*4(sp) sw t5,EF_T5*4(sp) sw t6,EF_T6*4(sp) sw t7,EF_T7*4(sp) sw t8,EF_T8*4(sp) sw t9,EF_T9*4(sp) sw t0,EF_MDLO*4(sp) sw t1,EF_MDHI*4(sp) j ra END(tfi_save)/* * tfi_restore -- restore state saved by tfi_save */LEAF(tfi_restore) lw v0,EF_MDLO*4(sp) lw v1,EF_MDHI*4(sp) mtlo v0 mthi v1 lw v0,EF_V0*4(sp) lw v1,EF_V1*4(sp) lw t0,EF_T0*4(sp) lw t1,EF_T1*4(sp) lw t2,EF_T2*4(sp) lw t3,EF_T3*4(sp) lw t4,EF_T4*4(sp) lw t5,EF_T5*4(sp) lw t6,EF_T6*4(sp) lw t7,EF_T7*4(sp) lw t8,EF_T8*4(sp) lw t9,EF_T9*4(sp) j ra END(tfi_restore)/* * End of exception processing. Interrupts should be disabled. */VECTOR(exception_exit, M_EXCEPT) /* * ENTRY CONDITIONS: * Interrupts Disabled * s0 contains sr at time of exception * * If we are returning to user mode, check to see if a resched is * desired. If so, fake a RESCHED cause bit and let trap save/restore * our state for us. */#ifdef DS5000_100#ifdef MIPS_ARCH_SPL_ORIG lw a0,mips_spl_arch_type beq a0,zero,1f#endif .set noreorder lw a0, EF_SYS1*4(sp) lw a3, EF_SR*4(sp) # get SR from exception frame sw a0, ipllevel sll a0, a0, 2 # multiply by 4 lw a1, kn02ba_sim(a0) # get system interrupt mask value lw a2, splm(a0) # get status register mask value sw a1, KN02BA_SIRM_K1ADDR # load mask register with value lw a1, KN02BA_SIRM_K1ADDR # reread address to flush write buffer andi a2, a2, 0xff00 # get interrupt mask bits only li k0, 0xffff00ff and a3, a3, k0 # turn off all mask bits or a2, a2, a3 # or in new mask bits sw a2, EF_SR*4(sp) # restore SR to exception frame .set reorder1: #endif and k0,s0,SR_KUP beq k0,zero,2f # returning to kernel mode lw k0,u+PCB_RESCHED beq k0,zero,1f # no resched requested move a0,sp li a1,SEXC_RESCHED # software exception lw a3,EF_CAUSE*4(sp) b VEC_trap1: lw k0,u+PCB_CPUPTR # get cpudata pointer sw zero,CPU_KSTACK(k0) lw gp,EF_GP*4(sp)2: lw a0,EF_A0*4(sp) lw a1,EF_A1*4(sp) lw a2,EF_A2*4(sp) lw a3,EF_A3*4(sp) lw s0,EF_S0*4(sp) lw ra,EF_RA*4(sp)#ifdef ROWEN_LEDS li k0,0xbe0800c1 sb k0,-190(k0)#endif lw k0,EF_EPC*4(sp) lw k1,EF_SR*4(sp) .set noreorder .set noat lw AT,EF_AT*4(sp) mtc0 k1,C0_SR # PE BIT lw sp,EF_SP*4(sp) j k0 c0 C0_RFE .set at .set reorder END(exception_exit)VECTOR(VEC_unexp, M_EXCEPT) PANIC("unexpected exception") END(VEC_unexp)/* * Primitives */ LEAF(clearcpe) .set noreorder mfc0 t0,C0_SR # get the Status Reg nop or t1,t0,SR_PE # OR in the Parity Err Bit to clear it mtc0 t1,C0_SR # write back the status reg j ra nop .set reorder END(clearcpe)#ifdef oldmips/* * Interrupts: (8, 1,2 soft ints, 3-8 hard ints) * 8 Bus error/timeout/sec/ded * 7 profiling clock. * 6 fp interrupt * 5 sched clock. * 4 uart. * 3 vectored devices * 2 softnet * 1 softclock */#else ultrix/* ALERT- REMOVE splcons() and spl0() if we can so we have ONE convention *//* * The following is true for PMAX: * Interrupts: (8, 1,2 soft ints, 3-8 hard ints) * 8 fpu interrupt splfpu()|splhigh() * 7 memory/video splmem() * 6 scheduling clock splclock() * 5 dz spltty()|splcons() * 4 lance splimp() * 3 sii splbio() * 2 softnet splnet() * 1 softclock splsoftclock() * 0 none splnone()|spl0() * * In order to change the order you just have to change the defines * in cpu.h and these LEAF routines (and hope the rest of the code works). * * MIPSFAIR uses a different interrupt scheme. To allow these splxxx * routine to do the right thing on different systems, the proper interrupt * masks are initialized at system startup time. Here we just fetch the * proper mask. */#endif ultrixIMPORT(splm,SPLMSIZE*4)IMPORT(cpu,4)#ifdef DS5000_100IMPORT(splm,SPLMSIZE*4)IMPORT(kn02ba_sim,SPLMSIZE*4)#endif/* * spl0: Don't block against anything. * This should work on all machines. */LEAF(spl0)#ifdef DS5000_100#ifdef MIPS_ARCH_SPL_ORIG lw v0,mips_spl_arch_type beq v0,zero,spl0_kn01#endif .set noreorder li v1, KN02BA_SPL_MASK # disable interrupts mtc0 v1, C0_SR nop lw v0, ipllevel # load return value with current ipl li v1, SPLNONE sw v1, ipllevel # store new ipl into ipllevel lw a0, kn02ba_sim+SPLNONE*4# get system interrupt mask value lw v1, splm+SPLNONE*4 # get status register mask value sw a0, KN02BA_SIRM_K1ADDR # load mask register with value lw a0, KN02BA_SIRM_K1ADDR # reread address to flush write buffer mtc0 v1, C0_SR # load status register with value j ra nop .set reorderspl0_kn01:#endif#ifdef MIPS_ARCH_SPL_ORIG .set noreorder mfc0 v0,C0_SR li v1,SR_IEC|SR_IMASK0 mtc0 v1,C0_SR j ra nop .set reorder#endif END(spl0)/* * splnone: Don't block against anything. * This should work on all machines. */LEAF(splnone)#ifdef DS5000_100#ifdef MIPS_ARCH_SPL_ORIG lw v0,mips_spl_arch_type beq v0,zero,splnone_kn01#endif .set noreorder li v1, KN02BA_SPL_MASK # disable interrupts mtc0 v1, C0_SR nop lw v0, ipllevel # load return value with current ipl li v1, SPLNONE sw v1, ipllevel # store new ipl into ipllevel lw a0, kn02ba_sim+SPLNONE*4# get system interrupt mask value lw v1, splm+SPLNONE*4 # get status register mask value sw a0, KN02BA_SIRM_K1ADDR # load mask register with value lw a0, KN02BA_SIRM_K1ADDR # reread address to flush write buffer mtc0 v1, C0_SR # load status register with value j ra nop .set reordersplnone_kn01:#endif#ifdef MIPS_ARCH_SPL_ORIG .set noreorder mfc0 v0,C0_SR li v1,SR_IEC|SR_IMASK0 mtc0 v1,C0_SR j ra nop .set reorder#endif END(splnone)/* * splsoftclock: block against clock software interrupts (level 1 softint). */LEAF(splsoftclock)#ifdef DS5000_100
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -