📄 except.s
字号:
and v0, k0, MODE_MASK // (v0) = thread's mode
// clear User mode, EXL & ERL bits in PSR
ori k0, (1<<PSR_EXL) | (1<<PSR_ERL) | (1<<PSR_PMODE)
xori k0, (1<<PSR_EXL) | (1<<PSR_ERL) | (1<<PSR_PMODE)
mtc0 k0, psr // enable interrupts
sw v0, TcxPsr(s0) // record the original mode in Thread struct
S_REG s1, TcxIntS1(s0)
S_REG s2, TcxIntS2(s0)
S_REG s3, TcxIntS3(s0)
S_REG s4, TcxIntS4(s0)
S_REG s5, TcxIntS5(s0)
S_REG s6, TcxIntS6(s0)
S_REG s7, TcxIntS7(s0)
S_REG s8, TcxIntS8(s0)
li t0, CONTEXT_CONTROL | CONTEXT_INTEGER
beq a1, zero, 50f
sw t0, TcxContextFlags(s0)
jal HandleException // jump to handler
move a0, s0 // (a0) = ptr to thread
bne v0, zero, 60f // resume current thread
nop
//
// The current thread has been blocked or a reschedule is pending.
// Save the remaining CPU state and call the scheduler to obtain the
// highest priority thread to run.
//
// (s0) = CurThdPtr.
//
resched:
50:
lh t0, ReschedFlag
beq zero, t0, 52f
nop
jal NextThread
sh zero, ReschedFlag // clear reschedule, still in kernel
52: lw s1, dwKCRes
beq zero, s1, 53f
nop
jal KCNextThread
sw zero, dwKCRes // clear KCreschedule
lw s1, dwKCRes
bne zero, s1, 50b
nop
53:
la t0, RunList
lw v0, 4(t0)
beq zero, v0, Idle // no threads to run, do idle processing
nop
beq s0, v0, 60f // resume current thread
move s0, v0 // (s0) = new current thread
lw t0, ThHandle(s0) // (t0) = thread's handle
sw s0, CurThdPtr // remember current THREAD pointer
sw t0, hCurThread // and current thread handle
lw t0, ThProc(s0) // (t0) = ptr to current process
lw t4, ThAKey(s0) // (t4) = thread's access key
lw v0, PrcHandle(t0) // (v0) = handle of current process
lb t1, PrcID(t0) // (t1) = process ID
sw t0, CurPrcPtr // remember current process pointer
sw v0, hCurProc // and current process handle
beq t1, zero, 58f // slot 1 special case
lw t3, ThTlsPtr(s0) // (t3) == tlsPtr (delay slot)
// not slot 1, use section table
lw t2, PrcVMBase(t0) // (t2) = memory section base address
srl t2, VA_SECTION-2 // (t2) = index into section table
b 59f
lw t2, SectionTable(t2) // (t2) = process's memory section (delay slot)
58: la t2, NKSection // (t2) = &NKSection
59: // rest of the common code
sw t3, lpvTls
sw t4, CurAKey // save access key for TLB handler
#if ENTRYHI_PID != 0
sll t1, ENTRYHI_PID
#endif
mtc0 t1, entryhi // set ASID
sw t2, SectionTable(zero) // swap in default process slot
// Restore the complete thread state.
//
// (s0) = ptr to thread structure
60: L_REG s1, TcxIntS1(s0) // Restore thread's permanent registers
L_REG s2, TcxIntS2(s0)
L_REG s3, TcxIntS3(s0)
L_REG s4, TcxIntS4(s0)
L_REG s5, TcxIntS5(s0)
L_REG s6, TcxIntS6(s0)
lw v0, TcxContextFlags(s0)
L_REG s7, TcxIntS7(s0)
andi v1, v0, CONTEXT_INTEGER & 0xFF
beq v1, zero, 65f
L_REG s8, TcxIntS8(s0)
L_REG t0, TcxIntHi(s0) // (t0) = HI mul/div register
L_REG t1, TcxIntLo(s0) // (t1) = LO mul/div register
mthi t0
mtlo t1
L_REG v1, TcxIntV1(s0)
L_REG t0, TcxIntT0(s0)
L_REG t1, TcxIntT1(s0)
L_REG t2, TcxIntT2(s0)
L_REG t3, TcxIntT3(s0)
L_REG t4, TcxIntT4(s0)
L_REG t5, TcxIntT5(s0)
L_REG t6, TcxIntT6(s0)
L_REG t7, TcxIntT7(s0)
L_REG t8, TcxIntT8(s0)
L_REG t9, TcxIntT9(s0)
L_REG AT, TcxIntAt(s0)
L_REG a0, TcxIntA0(s0)
L_REG a1, TcxIntA1(s0)
L_REG a2, TcxIntA2(s0)
L_REG a3, TcxIntA3(s0)
65:
CP0_STOP_PREFETCH(mtc0, zero, psr, v0);
nop
L_REG ra, TcxIntRa(s0)
L_REG v0, TcxIntV0(s0) // restore return value
lh k1, ReschedFlag // (k1) = resched + nested exception
li k0, 1
beq k1, k0, 68f // reschedule pending
addu k1, 256 // remove one level of nesting
sh k1, ReschedFlag
lw k0, BasePSR // (k0) = global default status value
lw k1, TcxPsr(s0) // (k1) = thread's default status
// MUST use lw instead of L_REG so sp get automatic sign extension.
// The reason is that we do arithmetics on SP during exception handling
// and SP can become a positive 64 bit value.
lw sp, TcxIntSp(s0) // restore stack pointer
or k1, k0 // (k1) = thread + global status
#ifdef MIPS_HAS_FPU
lw k0, g_CurFPUOwner
bne k0, s0, 66f
nop
la k0, dwNKCoProcEnableBits
lw k0, (k0)
or k1, k0
66:
#endif
mtc0 k1, psr // restore status
lw k0, TcxFir(s0) // (k0) = exception return address
L_REG gp, TcxIntGp(s0)
L_REG s0, TcxIntS0(s0) // Restore thread's permanent registers
mtc0 k0, epc // set continuation address for Eret
move k1, zero // (k1) = 0 (no atomic op in progress)
ssnop // super scalar core requires 4 integer
ssnop // instructions to guarantee
ssnop // a 2 cycle hazard
eret // restore user status
nop
nop
eret
//
// No threads to run. Check the resched flag and it is not set,
// then call OEMIdle.
//
Idle:
CP0_STOP_PREFETCH(mtc0, zero, psr, v0); // all interrupts off
nop //
nop // 3 cycle hazard
nop
lh v0, ReschedFlag // (v0) = resched + nested exception
lw a0, BasePSR // (a0) = global default status value
bgtz v0, 68f // reschedule pending
nop
jal OEMIdle // let OEM stop clocks, etc.
nop
li v1, 1
sb v1, ReschedFlag
//
// Pending reschedule found during final dispatch, re-enable
// interrupts and try again.
//
// (a0) = BasePSR
//
68: lw a0, BasePSR // (a0) = global default status value
move k1, zero // (k1) = 0 (no atomic op in progress)
ori a0, 1 << PSR_IE // (t0) = current status + int enable
j resched
mtc0 a0, psr // re-enable interrupts
// TLB load or store exception. These exceptions are routed to the general
// exception vector either because the TLBMiss handler could not translate
// the virtual address or because there is a matching but invalid entry loaded
// into the TLB. If a TLB probe suceeds then, this is due to an invalid entry.
// In that case, the page tables are examined to find a matching valid entry
// for the page causing the fault. If a matching entry is found, then the TLB
// is updated and the instruction is restarted. If no match is found or the
// TLB probe fails, the exception is processed via the normal exception path.
//
// (k0) = EPC (maybe updated for InterlockedXXX API)
// (k1) = cause register
// interrupted T0 saved in SaveT0.
100: mtc0 k0, epc // update resume address
nop
MFC_REG k0, badvaddr // (k0) = faulting virtual address
#ifdef _MIPS64
sll k1, k0, 0 // Sign extend the 32-bit address.
bne k0, k1, 125f // Invalid 32-bit address - generate exception.
#endif // _MIPS64
tlbp // 3 cycle hazard
bltz k0, 120f // kernel address
srl t0, k0, VA_SECTION-2 //
and t0, SECTION_MASK*4 // (t0) = section * 4
lw t0, SectionTable(t0) // (t0) = ptr to block table
101:
// (t0) = pscn (pointer to SECTION)
// (k0) = badvaddr
srl k1, k0, VA_BLOCK-2 //
and k1, BLOCK_MASK*4 // (k1) = block * 4
addu t0, k1 // (t0) = block table entry
lw t0, (t0) // (t0) = ptr to MEMBLOCK structure
srl k0, VA_PAGE-2 //
and k0, PAGE_MASK*4 // (k0) = page # * 4
bgez t0, 125f // unmapped memblock
addu k1, k0, t0 // (k1) = ptr to page entry
lw k1, mb_pages(k1) // (k1) = page table entry
and k0, 0xfff8 // (k1) = even page index
and k1, PG_VALID_MASK // (k1) = 0 if invalid page
beq zero, k1, 125f // the page is invalid
addu k0, t0 // (k0) = ptr to even page of even/odd pair
lw k1, mb_lock(t0) // (k1) = block access lock
lw t0, CurAKey //
and k1, t0 // (k1) = 0 if access not allowed
lw t0, mb_pages(k0) // (t0) = even page info
beq zero, k1, 140f // access not allowed
lw k0, mb_pages+4(k0) // (delay slot) (k0) = odd page info
102:
//
// entry is valid
// (t0) = even page entry
// (k0) = odd page entry
//
mfc0 k1, index // (k1) = index
mtc0 t0, entrylo0 // set even entry to write into TLB
mtc0 k0, entrylo1 // set odd entry to write into TLB
ssnop // super scalar core requires 4 integer
ssnop // instructions to guarantee
ssnop // a 2 cycle hazard
bltz k1, 103f // not an invalid entry fault
nop
tlbwi // write indexed entry into TLB
b 104f // 3 cycle hazard
nop //
103:
//
// no match entry in TLB, use random
//
tlbwr // write to random entry of TLB
nop // 3 cycle hazard
nop
104:
move k1, zero // no atomic op in progress
L_REG t0, SaveT0 // restore t0 value
eret //
nop // errata...
nop //
eret //
#define SECURE_VMBASE 0xc2000000
#define SHARED_SECTION 0x3e
120:
//
// Kernel address. Check if it's a mapped address
//
// (k0) = badaddr
//
// check if we're in IO space
li t0, NK_IO_BASE // (t0) = NK_IO_BASE
subu t0, k0, t0 // (t0) = badaddr - NK_IO_BASE
bgez t0, 150f // >= NK_IO_BASE, check IO addresses
li k1, SECURE_VMBASE // (k1) = SECURE_VMBASE (delay slot)
// check if we're in SECURE_SECTION
subu k1, k0, k1 // (k1) = badaddr - SECURE_VMBASE
la t0, NKSection // (t0) = &NKSection
bgez k1, 101b // in range? t0 already updated, get back to normal processing
nop
// Invalid access. Reload k0 & k1 and return to general exception processing.
125:
mfc0 k1, cause
b 5b
mfc0 k0, epc
140:
// everything okay except permission
// check if the address is in the shared section, grant read access if yes
// (t0) = even page entry
// (k0) = odd page entry
// check read access
mfc0 k1, cause
andi k1, k1, XCODE_MODIFY // (k1) = cause & XCODE_MODIFY
bnez k1, 125b // writing if non-zero --> fault
nop
// check if faulting address is in shared section
mfc0 k1, badvaddr // (k1) = fault address
srl k1, VA_SECTION // (k1) == (bva >> VA_SECTION)
sub k1, k1, SHARED_SECTION // (k1) -= SHARED_SECTION
bnez k1, 125b // not in shared section if k1 != 0
nop
// in shared section, asking read access, grant it
li k1, ~PG_PROT_WRITE // (k1) = bit mask for RO entry
and t0, t0, k1 // (t0) = even page with 'writable' bit cleared
b 102b // back to normal routine updating TLB
and k0, k0, k1 // (delay slot) (k0) = odd page with 'writable' bit cleared
150:
// (k0) = badaddr
li k1, NK_IO_END // (k1) = NK_IO_END
sltu t0, k0, k1 // (k1) = (badaddr < NK_IO_END)
beqz t0, 125b // fault if !(badaddr < NK_IO_END)
nop
// search map array (NOTE: static map array is sorted by VA),
// last entry is always (FFFFFFFF, 0, FFFFFFFF)
la t0, MapArray // (t0) = &MapArray[0]
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -