📄 except.s
字号:
lw k0, TcxFir(s0) // (k0) = exception return address
lw gp, TcxIntGp(s0)
lw s0, TcxIntS0(s0) // Restore thread's permanent registers
#if R4000
mtc0 k0, epc // set continuation address for Eret
#endif
move k1, zero // (k1) = 0 (no atomic op in progress)
#if R3000
j k0 // return to original caller
rfe // restore user status
#elif R4000
nop
eret // restore user status
nop
nop
eret
#else
#error Unknown processor type
#endif
// No threads to run. Check the resched flag and it is not set, then call OEMIdle.
Idle: mtc0 zero, psr // all interrupts off
nop //
nop // 3 cycle hazard
nop
lh v0, ReschedFlag // (v0) = resched + nested exception
lw a0, BasePSR // (a0) = global default status value
bgtz v0, 68f // reschedule pending
nop
jal OEMIdle // let OEM stop clocks, etc.
nop
li v1, 1
sb v1, ReschedFlag
// Pending reschedule found during final dispatch, re-enable interrupts and
// try again.
//
// (a0) = BasePSR
68: lw a0, BasePSR // (a0) = global default status value
move k1, zero // (k1) = 0 (no atomic op in progress)
#if R3000
ori a0, 1 << PSR_IEC // (t0) = base status + int enable
#else
ori a0, 1 << PSR_IE // (t0) = current status + int enable
#endif
j resched
mtc0 a0, psr // re-enable interrupts
//++++++
// The power off flag has been set. Call DoPowerOff() to notify the file system
// and window manager and invoke OEMPowerOff.
90: jal DoPowerOff // call power off handler
sb zero, BPowerOff // clear power off flag
b resched
move s0, zero // no current thread
#if R4000
// TLB load or store exception. These exceptions are routed to the general
// exception vector either because the TLBMiss handler could not translate
// the virtual address or because there is a matching but invalid entry loaded
// into the TLB. If a TLB probe suceeds then, this is due to an invalid entry.
// In that case, the page tables are examined to find a matching valid entry
// for the page causing the fault. If a matching entry is found, then the TLB
// is updated and the instruction is restarted. If no match is found or the
// TLB probe fails, the exception is processed via the normal exception path.
//
// (k0) = EPC (maybe updated for InterlockedXXX API)
// (k1) = cause register
// interrupted T0 saved in SaveT0.
100: mtc0 k0, epc // update resume address
nop
mfc0 k0, badvaddr // (k0) = faulting virtual address
tlbp // 3 cycle hazard
bltz k0, 105f // kernel address
srl t0, k0, VA_SECTION-2 //
and t0, SECTION_MASK*4 // (t0) = section * 4
mfc0 k1, index // (k1) = TLB index of invalid entry
lw t0, SectionTable(t0) // (t0) = ptr to block table
#if R4100
bltz k1, 109f // not an invalid entry fault
#else
bltz k1, 105f // not an invalid entry fault
#endif
101: srl k1, k0, VA_BLOCK-2 //
and k1, BLOCK_MASK*4 // (k1) = block * 4
addu t0, k1 // (t0) = block table entry
lw t0, (t0) // (t0) = ptr to MEMBLOCK structure
srl k0, VA_PAGE-2 //
and k0, PAGE_MASK*4 // (k0) = page # * 4
bgez t0, 105f // unmapped memblock
addu k1, k0, t0 // (k1) = ptr to page entry
lw k1, mb_pages(k1) // (k1) = page table entry
and k0, 0xfff8 // (k1) = even page index
and k1, PG_VALID_MASK // (k1) = 0 if invalid page
beq zero, k1, 105f // the page is invalid
addu k0, t0 // (k0) = ptr to even page of even/odd pair
lw k1, mb_lock(t0) // (k1) = block access lock
lw t0, CurAKey //
and k1, t0 // (k1) = 0 if access not allowed
lw t0, mb_pages(k0) // (t0) = even page info
beq zero, k1, 105f // access not allowed
lw k0, mb_pages+4(k0) // (k0) = odd page info
mtc0 t0, entrylo0 // set even entry to write into TLB
mtc0 k0, entrylo1 // set odd entry to write into TLB
nop
nop
tlbwi // write indexed entry into TLB
nop // 3 cycle hazard
nop //
nop
104:
move k1, zero // no atomic op in progress
lw t0, SaveT0 // restore t0 value
eret //
nop // errata...
nop //
eret //
// Invalid access. Reload k0 & k1 and return to general exception processing.
105: mfc0 k1, cause
b 5b
mfc0 k0, epc
#if R4100
// Sometimes TLB misses go to the general exception vector instead
// of the TLB miss vector. Select an entry to replace by copying random to index.
109: mfc0 k1, random
j 101b
mtc0 k1, index
#endif
#else // R3000 version
// TLB load or store exception. These exceptions are routed to the general
// exception vector either because the TLBMiss handler could not translate
// the virtual address or because there is a matching but invalid entry loaded
// into the TLB.
//
// (k0) = EPC (maybe updated for InterlockedXXX API)
// (k1) = cause register
// interrupted T0 saved in SaveT0.
100: sw k0, SaveK1 // Not K1 but ...
mfc0 k0, context // (k0) = faulting VPN * 4
li k1, (SECTION_MASK+1)*4
srl t0, k0, CTX_SECTION-2
sltu k1, t0, k1 // (k1) = 0 if address out of range
beq k1, zero, 105f // address out of range
and t0, SECTION_MASK*4 // (t0) = section * 4
lw t0, SectionTable(t0) // (t0) = ptr to block table
srl k1, k0, CTX_BLOCK-2
and k1, BLOCK_MASK*4 // (k1) = block * 4
addu t0, k1 // (t0) = block table entry
lw t0, (t0) // (t0) = ptr to MEMBLOCK structure
and k0, PAGE_MASK*4 // (k0) = page # * 4
bgez t0, 105f // unmapped memblock
addu k0, t0 // (k0) = ptr to page entry
lw k1, mb_lock(t0) // (k1) = block access lock
lw t0, CurAKey
lw k0, mb_pages(k0) // (k0) = page info
and k1, t0 // (k1) = 0 if access not allowed
beq zero, k1, 105f // access not allowed
mtc0 k0, entrylo // set info to write into TLB
blez k0, 105f // invalid entry
lw t0, SaveT0
tlbwr // write entry randomly into TLB
nop // 3 cycle hazzard
nop
nop
lw k0, SaveK1 // (k0) = resume address
move k1, zero // no atomic op in progress
j k0
rfe
// Invalid access. Reload k0 & k1 and return to general exception processing.
105: mfc0 k1, cause
b 5b
lw k0, SaveK1
#endif
.end GeneralExceptionP
// Stack structure during API call processing.
.struct 0
apiArg: .space 4 * 4 // argument register save area
apiSaveRet: .space 4 // return value or return address
apiMode: .space 4 // (pMode) argument
apiSaveGp: .space 4 // extra cpu dependent info (Global Pointer)
apiSaveRa: .space 4 // for unwinding
size_api_args: // length of stack frame
apiArg0: .space 4 // caller argument save area
apiArg1: .space 4
apiArg2: .space 4
apiArg3: .space 4
NESTED_ENTRY(APICall, 0, zero)
//++
// The following code is never executed. Its purpose is to support unwinding
// through the calls to ObjectCall or ServerCallReturn.
//--
.set noreorder
.set noat
subu sp, size_api_args
sw ra, apiSaveRa(sp) // unwinder: (ra) = APICallReturn
PROLOGUE_END
// Process an API Call or return.
//
// (k0) = EPC (encodes the API set & method index)
200: lb t0, KNest // (t0) = kernel nest depth
mfc0 t1, psr // (t1) = processor status
blez t0, 5b // non-preemtible, API Calls not allowed
subu t0, k0, FIRST_METHOD
lw t3, BasePSR
move k1, zero // reset atomic op. flag
or t3, 1 // (t3) = current interrupt mask + int enable
#ifdef MIPS_HAS_FPU
lw t8, g_CurFPUOwner
lw t7, CurThdPtr
bne t7, t8, 201f
lui t9, 0x2000
or t3, t9
201:
#endif
mtc0 t3, psr // enable interrupts
and t1, MODE_MASK // (t1) = thread's execution mode
subu sp, size_api_args // make room for new args + temps
#if APICALL_SCALE == 2
sra t0, 1 // (t0) = method index
#elif APICALL_SCALE == 4
sra t0, 2 // (t0) = method index
#else
#error Invalid value for APICALL_SCALE
#endif
//--- interrupts enabled and preemptible
li t9, SYSCALL_RETURN
sw t9, apiSaveRa(sp) // for the unwinder
addu t3, t0, 1 // (t3) = 0 iff API return
beq zero, t3, 250f // go process API return
sw t1, apiMode(sp)
// Save api arguments onto the stack
sw a0, apiArg0(sp)
sw a1, apiArg1(sp)
sw a2, apiArg2(sp)
sw a3, apiArg3(sp)
move a3, t0 // (a3) = method index
addu a2, sp, apiArg0 // (a2) = ptr to function args
move a1, ra // (a1) = return address
jal ObjectCall
addu a0, sp, apiMode // (a0) = pMode
// Invoke server function. If the thread is running in kernel mode, then
// we just call the function directly from here.
//
// (v0) = address of server function
lw t0, apiMode(sp) // (t0) = mode to invoke the function in
lw a0, apiArg0(sp) // reload argument registers.
lw a1, apiArg1(sp)
lw a2, apiArg2(sp)
and t1, t0, 1 << PSR_PMODE
beq t1, zero, 210f // invoke function in kernel mode
lw a3, apiArg3(sp)
// Call to user mode server. To do this: build a new PSR value from the thread's mode
// and BasePSR. This must be done with interrupts disabled so that BasePSR is not
// changing.
//
// (t0) = new mode bits
mtc0 zero, psr // all interrupts off
nop // 3 cycle hazard
li ra, SYSCALL_RETURN
addu sp, size_api_args
lw t1, BasePSR
#if R4000
mtc0 v0, epc
#endif
#ifdef MIPS_HAS_FPU
lw t8, g_CurFPUOwner
lw t7, CurThdPtr
bne t7, t8, 202f
lui t9, 0x2000
or t1, t9
202:
#endif
or t1, t0 // (t1) = merged status
mtc0 t1, psr // reload status
nop
#if R4000
nop
nop
eret
#elif R3000
j v0
rfe
#else
#error Unknown processor type
#endif
// Call to kernel mode server. Call the function directly to save a trap
// on the return.
//
// (v0) = address of fucntion
// (a0-a3) = function arguments
210: jal v0
addu sp, size_api_args // remove extra stuff from the stack
ALTERNATE_ENTRY(APICallReturn)
subu sp, size_api_args // recreate entry frame
li t0, KERNEL_MODE
sw t0, apiMode(sp)
// Fall through as if we had taken a trap to get here.
// Return from an API call. Pop the thread's CALLSTACK list and restore the
// thread's current process, access key, and mode.
//
// (v0) = api return value (must be preserved)
250: sw v0, apiSaveRet(sp) // save return value
jal ServerCallReturn
addu a0, sp, apiMode // (a0) = pMode
lw t0, apiMode(sp) // (t0) = mode to return to
move ra, v0 // (ra) = return address
and t1, t0, 1 << PSR_PMODE
beq t1, zero, 255f // returning to kernel mode
lw v0, apiSaveRet(sp)
// Return to user mode. To do this: build a new PSR value from the thread's mode
// and BasePSR. This must be done with interrupts disabled so that BasePSR is not
// changing.
//
// (t0) = new mode bits
mtc0 zero, psr // all interrupts off
nop // 3 cycle hazard
nop
addu sp, size_api_args
lw t1, BasePSR
#if R4000
mtc0 ra, epc
#endif
#ifdef MIPS_HAS_FPU
lw t8, g_CurFPUOwner
lw t7, CurThdPtr
bne t7, t8, 251f
lui t9, 0x2000
or t1, t9
251:
#endif
or t1, t0 // (t1) = merged status
mtc0 t1, psr // reload status
nop
#if R4000
nop
nop
eret
#elif R3000
j ra
rfe
#else
#error Unknown processor type
#endif
// Return to kernel mode.
255: j ra
addu sp, size_api_args // remove extra stuff from the stack
END_REGION(GeneralException_End)
.set at
.set reorder
.end APICall
LEAF_ENTRY(DisabledInterruptHandler)
// This routine services interrupts which have been disabled. It masks the
// interrupt enable bit in the PSR to prevent further interrupts and treats
// the interrupt as a NOP.
//
// Entry (a0) = interrupt level * 4
// Exit (v0) = SYSINTR_NOP
// Uses a0, a1, v0
.set noreorder
srl a0, 2
li v0, 0x400
sllv a0, v0, a0
li v0, -1
xor a0, v0 // (a0) = ~(intr. mask)
mfc0 v0, psr
lw a1, BasePSR
and v0, a0 // (v0) = psr w/intr disabled
mtc0 v0, psr
and a1, a0 // (a0) = base PSR w/intr disabled
sw a1, BasePSR
j ra
li v0, SYSINTR_NOP
.end DisabledIntrHandler
LEAF_ENTRY(FalseInterrupt)
.set noreorder
j ra
li v0, SYSINTR_NOP
.set reorder
.end FalseInterrupt
// CaptureContext is invoked in kernel context on the user thread's stack to
// build a context structure to be used for exception unwinding.
//
// (sp) = aligned stack pointer
LEAF_ENTRY(CaptureContext)
.set noreorder
.set noat
subu sp, ContextFrameLength // (sp) = ptr to CONTEXT buffer
sw zero, 0(sp) // make sure that the stack is addressable
.end CaptureContext
NESTED_ENTRY(xxCaptureContext, ContextFrameLength, zero)
.set noreorder
.set noat
sw sp, CxIntSp(sp) // fixed up by ExceptionDispatch
sw a0, CxIntA0(sp)
sw a1, CxIntA1(sp)
sw a2, CxIntA2(sp)
sw a3, CxIntA3(sp)
sw gp, CxIntGp(sp)
sw s0, CxIntS0(sp)
sw s1, CxIntS1(sp)
sw s2, CxIntS2(sp)
sw s3, CxIntS3(sp)
sw s4, CxIntS4(sp)
sw s5, CxIntS5(sp)
sw s6, CxIntS6(sp)
sw s7, CxIntS7(sp)
sw s8, CxIntS8(sp)
sw v0, CxIntV0(sp)
sw v1, CxIntV1(sp)
sw AT, CxIntAt(sp)
sw ra, CxIntRa(sp)
sw t0, CxIntT0(sp)
sw t1, CxIntT1(sp)
sw t2, CxIntT2(sp)
sw t3, CxIntT3(sp)
mfhi t0
mflo t1
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -