📄 hwi.h62
字号:
; Runtime initialization for HW ISRs
;
;#
;# Preconditions:
;# none
;#
;# Postconditions:
;# none
;#
;# Dependencies:
;# none
;#
;
.asg "a0,b1,icr,ier", HWI_init$regs
HWI_init .macro
.endm
;
;# ======== HWI_startup ========
;
;#
;# Preconditions:
;# none
;#
;# Postconditions:
;# GIE == 1
;#
;# Dependencies:
;# Must come before SWI_startup, as GIE = 1 is a precondition to
;# SWI_startup and interrupts must be enabled before software interrupts
;# are allowed to run.
;#
;# Must follow CLK_startup to allow setting of PRD and timer control
;# register before interrupts are enabled.
;# Must follow PIP_startup to allow pipes to be ready before ISRs are
;# taken and I/O starts.
;#
.asg ":HWI_enable$regs:", HWI_startup$regs
HWI_startup .macro
; globally enable interrupts
HWI_enable
.endm
.global $bss
.global HWI_D_ccmask ; defined in hwi.s62
;
; Offsets into stack frame created by HWI_enter/HWI_exit
;
HWI_NUMSTK .set 12 ; must be even for 8-byte align
HWI_STKA0 .set 1
HWI_STKA1 .set 2
HWI_STKA2 .set 3
HWI_STKA3 .set 4
HWI_STKB0 .set 5
HWI_STKB1 .set 6
HWI_STKB2 .set 7
HWI_STKB3 .set 8
HWI_STKDP .set 9
HWI_STKIRP .set 10
HWI_STKCSR .set 11
HWI_STKAMR .set 12
;
;# ======== HWI_enter ========
;
; Hardware ISR prologue
;
; HWI_enter ABMASK, CMASK, IEMASK, CCMASK
;
; ABMASK - Mask of A and B registers to save
; CMASK - Mask of control registers to save
; IEMASK - Mask of interrupts to disable in IER
; CCMASK - Mask of cache control
;
; For the C62, we need to maintain 8-byte stack alignment.
; Although C62_push does this, it does so at the expense of using twice
; as much stack space as necessary. We instead do a one-time stack
; adjustment followed by depositing elements into that stack frame.
;
;#
;# Preconditions:
;# interrupts are globally disabled, i.e. GIE == 0
;# b14 = pointer to start of .bss
;#
;# Postconditions:
;# none
;#
;# Constraints and Calling Environment:
;# This macro must be the first operation in an ISR that uses any
;# DSP/BIOS API calls that call the scheduler.
;#
;
.global _HWI_D_spsave
.global _HWI_STKTOP
.global _HWI_STKBOTTOM
.global _HWI_D_isrFramePtr
.asg "a0,a1,amr,b0,b1,b14,b15,b2,b3,csr,ier", HWI_enter$regs
.if .TMS320C6400
HWI_enter .macro AMASK, BMASK, CMASK, IEMASK, CCMASK
HWI_enter_body IEMASK, CCMASK
; Save user registers (except ISR registers)
C64_save :AMASK: & ~(C64_ISRA), :BMASK: & ~(C64_ISRB), :CMASK: & ~(C64_ISRC)
stw b3, *SP--[2] ; save old IER
.endm
.else
HWI_enter .macro ABMASK, CMASK, IEMASK, CCMASK
HWI_enter_body IEMASK, CCMASK
; Save user registers (except ISR registers)
C62_save :ABMASK: & ~(C62_ISRAB), :CMASK: & ~(C62_ISRC)
stw b3, *SP--[2] ; save old IER
.endm
.endif
;
;# ======== HWI_enter_body ========
;# Main body of HWI_enter macro.
;# The reason it moved here is because HWI_enter is different
;# for the C6400 but this part of code remains the same
;
HWI_enter_body .macro IEMASK, CCMASK
CHK_nargs "HWI_enter", CCMASK
.if ($symcmp(":CHK_status:", "error") = 0)
.emsg "HWI_enter CCMASK error"
.endif
; Note: global interrupts disabled by C62xx on entry into ISR
;
; Handle switchover to ISR stack.
;
; WARNING: The following code has a few cases of software
; pipelining, where a register is loaded with 'ldw' but the
; old (pre-ldw) value is still used in the 'ldw' latency
; slots.
;
stw b0, *SP--[2]
|| mvkl _HWI_STKBOTTOM, b0 ; highest address
stw b1, *+SP[1]
|| mvkh _HWI_STKBOTTOM, b0
cmpgt SP, b0, b0
[ b0] b notOnHWIStack?
[!b0] mvkl _HWI_STKTOP, b1 ; lowest address
[!b0] mvkh _HWI_STKTOP, b1
[!b0] cmplt SP, b1, b1
[ b0] mv SP, b1
[ b0] addaw SP, 2, SP ; 2 = # words alloced by us
[!b1] b onHWIStack?
[!b1] ldw *+SP[1], b1 ; old b1 can be used for 4 more cycles
[!b1] ldw *++SP[2], b0 ; ld early, b0 used 3 cycles after br
nop 2
[ b1] mv SP, b1
||[ b1] addaw SP, 2, SP ; 2 = # words alloced by us
notOnHWIStack?:
;
; The following operations below must be atomic:
; SP = HWI_STKBOTTOM
; HWI_D_spsave = SP
;
ldw *+b1[2], b0
|| mvkl _HWI_D_spsave, b0
ldw *+b1[1], b1
|| mvkh _HWI_D_spsave, b0
stw SP, *b0 ; HWI_D_spsave = SP
|| mvkl _HWI_STKBOTTOM, SP
mvkh _HWI_STKBOTTOM, SP
; no delay slots necessary to wait for above ldw's to
; complete since b0, b1 aren't needed below for 2 more
; cycles.
onHWIStack?:
;
; All GP registers are in their pre-interrupt state.
;
; Save ISR registers
subaw SP, HWI_NUMSTK, SP
stw a0, *+SP[HWI_STKA0] ; save A0
|| mv SP, a0 ; setup A-side stack pointer
stw b0, *+SP[HWI_STKB0]
|| stw a1, *+a0[HWI_STKA1]
|| mvc irp, b0
stw b1, *+a0[HWI_STKB1]
|| stw a2, *+SP[HWI_STKA2]
|| mv b0, a1
|| mvc amr, b0
stw b2, *+SP[HWI_STKB2]
|| stw a3, *+a0[HWI_STKA3]
|| mvkl :IEMASK:, b0 ; setup mask
|| mv b0, a2
stw b3, *+SP[HWI_STKB3]
|| stw a1, *+a0[HWI_STKIRP] ; actual save of AMR
|| zero b2 ; setup for AMR set
|| mvkh :IEMASK:, b0 ; setup mask
stw DP, *+SP[HWI_STKDP]
|| stw a2, *+a0[HWI_STKAMR] ; actual save of AMR
|| mvkl $bss, DP
mvkh $bss, DP
mvc b2, amr ; AMR = 0
|| ldw *+DP(SWI_D_lock), a1 ; load SWI_D_lock
; Disable maskable interrupts (no effect for NMIE)
.if (:CCMASK: = 0)
ldw *+DP(HWI_D_ccmask), b2 ; load HWI_D_ccmask
.else
.var CCMASKVAL
.asg 0, CCMASKVAL
.eval :CCMASK:, CCMASKVAL
mvk :CCMASKVAL:, b2 ; use the CCMASK value
.endif
mvc ier, b3 ; get current IER
|| xor -1, b0, b0 ; flip mask bits
and b0, b3, b1 ; disable IEMASK bits
|| mvc csr, b0 ; get CSR
mvc b1, ier ; set new IER
|| stw b0, *+SP[HWI_STKCSR] ; save CSR
|| or GIE, b0, b0 ; turn on GIE of CSR
mvk C62_CCFIELDS, b1
|| add a1, 1, a1 ; a1 = SWI_D_lock + 1
and b1, b0, b1 ; extract pcc+dcc fields of csr
|| stw a1, *+DP(SWI_D_lock) ; SWI_D_lock++
xor b1, b0, b0 ; clear pcc+dcc fields of csr
or b2, b0, b0 ; change pcc and dcc fields
mvc b0, csr ; globally enable interrupts
; to allow nested interrupts
.endm
;
;# ======== HWI_exit ========
; Hardware ISR epilogue
;
; HWI_exit ABMASK, CMASK, IERRESTOREMASK, CCMASK
;
; ABMASK - Mask of A and B registers to restore
; CMASK - Mask of control registers to restore
; IERRESTOREMASK - Mask of interrupts to restore in IER
; CCMASK - Mask of cache control
;
; For the C62, we need to maintain 8-byte stack alignment.
; Although C62_pop does this, it does so at the expense of using twice
; as much stack space as necessary. We instead do a one-time stack
; adjustment after loading elements from that stack frame.
;
;#
;# Preconditions:
;# b14 = pointer to start of .bss
;#
;# Postconditions:
;# none
;#
;# Constraints and Calling Environment:
;# This macro must be the last operation in an ISR that uses any BIOS
;# API calls.
;#
;
.asg "a0,a1,a2,a3,amr,b0,b1,b14,b15,b2,b3,csr,ier,irp",HWI_exit$regs
.if .TMS320C6400
HWI_exit .macro AMASK, BMASK, CMASK, IERRESTOREMASK, CCMASK
b disable1?
|| ldw *+DP(SWI_D_curmask), b1
ldw *+DP(SWI_D_curset), b2
|| mvkl :IERRESTOREMASK:, a1
ldw *+DP(SWI_D_lock), b0
ldw *++SP[2], a2
|| mvkh :IERRESTOREMASK:, a1
|| b disable2?
mvkl noSwi?, b3
mvkh noSwi?, b3
disable1?:
cmpgt b1, b2, b2
or b0, b2, b2 ; b2 = 0 if need to run SWI
[!b2] b exitISRexec?
||[!b2] ldw *+DP(SWI_D_runaddr), b3
||[ b2] b b3
disable2?:
and a1, a2, a2
|| mvc ier, b1
or a2, b1, b1
mvc b1, ier
nop 2
doswitch?:
C64_save C64_ATEMPS & ~(C64_ISRA | :AMASK:), C64_BTEMPS & ~(C64_ISRB | :BMASK:), C64_CTEMPS & ~(C64_ISRC | :CMASK:)
b b3 ; call SWI_run()
mvc csr, b3
or GIE, b3, b3
mvc b3, csr
mvkl runRet2?, b3
mvkh runRet2?, b3
runRet2?:
C64_restore C64_ATEMPS & ~(C64_ISRA | :AMASK:), C64_BTEMPS & ~(C64_ISRB | :BMASK:), C64_CTEMPS & ~(C64_ISRC | :CMASK:), 0
b noSwi?
|| ldw *+DP(_HWI_D_spsave), b0
zero b2
mvkl _HWI_STKBOTTOM, a0
mvkh _HWI_STKBOTTOM, a0
cmpeq SP, a0, b1
[ b1] stw b2, *+DP(_HWI_D_spsave)
||[ b1] mv b0, SP
exitISRexec?:
C64_save C64_ATEMPS & ~(C64_ISRA | :AMASK:), C64_BTEMPS & ~(C64_ISRB | :BMASK:), C64_CTEMPS & ~(C64_ISRC | :CMASK:)
b b3 ; call SWI_run()
mvkl runRet1?, b3
mvkh runRet1?, b3
nop 3
runRet1?:
C64_restore C64_ATEMPS & ~(C64_ISRA | :AMASK:), C64_BTEMPS & ~(C64_ISRB | :BMASK:), C64_CTEMPS & ~(C64_ISRC | :CMASK:), 0
noSwi?:
C64_restore :AMASK: & ~(C64_ISRA), :BMASK: & ~(C64_ISRB), :CMASK: & ~(C64_ISRC), NOSP
b disable3?
|| ldw *+DP(SWI_D_curmask), b1
|| mvc csr, b3
ldw *+DP(SWI_D_curset), b2
|| and ~GIE, b3, b3
|| mvkl _HWI_STKBOTTOM, a2
mvkh _HWI_STKBOTTOM, a2
|| ldw *+DP(SWI_D_lock), a3
mvc b3, csr
mvk C64_spoffset, b3 ; C64_spoffset set up by C64_restore
mvkl _HWI_STKTOP, a1
disable3?:
cmpgt b1, b2, b2 ; b2 = 0 if need to run SWI
|| mvkh _HWI_STKTOP, a1
|| shl b3, 2, b3
or a3, b2, b2
|| sub a3, 1, a3
[!b2] b doswitch?
||[!b2] cmpgt SP, a2, b0
||[!b2] ldw *+DP(SWI_D_runaddr), b3
||[ b2] add SP, b3, SP ; do SP restore from above C64_restore
[!b2] cmplt SP, a1, b1
||[ b2] zero b1 ; force NOP for b1 condition below
||[ b2] addaw SP, HWI_NUMSTK, SP
||[ b2] mv SP, a0
[!b2] or b0, b1, b1 ; b1 = 1 if we should switch SP
||[ b2] ldw *+a0[HWI_STKCSR], a1
||[ b2] ldw *+SP[HWI_STKIRP - HWI_NUMSTK], b1
||[ b2] cmpeq SP, a2, b0 ; b0 = 1 if we should switch SP
;
; at this point: b1 = 1 if not on ISR stack && need to run SWI
; b0 = 1 if not running SWI && SP is at HWI_STKBOTTOM
;
[!b2] zero b0 ; force NOP for b0 condition below
||[ b2] stw a3, *+DP(SWI_D_lock)
||[ b2] mvk SAT, a3
[ b0] ldw *+DP(_HWI_D_spsave), b0
||[ b2] ldw *+a0[HWI_STKAMR], a2
[ b1] stw SP, *+DP(_HWI_D_spsave)
||[ b1] mv a2, SP
ldw *+a0[HWI_STKA3], a3
|| ldw *+SP[HWI_STKB3 - HWI_NUMSTK], b3
|| mvk 4, b3
ldw *+a0[HWI_STKA1], a1
|| ldw *+SP[HWI_STKB1 - HWI_NUMSTK], b1
|| mvc b1, irp
|| mv a1, b2
ldw *+a0[HWI_STKB2], b2
|| and a1, a3, a1
|| mvc b2, csr
b irp
|| ldw *+a0[HWI_STKA0], a0
|| ldw *+SP[HWI_STKB0 - HWI_NUMSTK], b0
||[ a1] sat b3:b2, b1
ldw *+a0[HWI_STKA2], a2
|| ldw *+SP[HWI_STKDP - HWI_NUMSTK], DP
|| mv a2, b2
|| zero a0
[ b0] mv b0, SP
||[ b0] stw a0, *+DP(_HWI_D_spsave)
|| mvc b2, amr
nop 3
.endm
.else
HWI_exit .macro ABMASK, CMASK, IERRESTOREMASK, CCMASK
b disable1?
|| ldw *+DP(SWI_D_curmask), b1
ldw *+DP(SWI_D_curset), b2
|| mvkl :IERRESTOREMASK:, a1
ldw *+DP(SWI_D_lock), b0
ldw *++SP[2], a2
|| mvkh :IERRESTOREMASK:, a1
|| b disable2?
mvkl noSwi?, b3
mvkh noSwi?, b3
disable1?:
cmpgt b1, b2, b2
or b0, b2, b2 ; b2 = 0 if need to run SWI
[!b2] b exitISRexec?
||[!b2] ldw *+DP(SWI_D_runaddr), b3
||[ b2] b b3
disable2?:
and a1, a2, a2
|| mvc ier, b1
or a2, b1, b1
mvc b1, ier
nop 2
doswitch?:
C62_save C62_ABTEMPS & ~(C62_ISRAB | :ABMASK:), C62_CTEMPS & ~(C62_ISRC | :CMASK:)
b b3 ; call SWI_run()
mvc csr, b3
or GIE, b3, b3
mvc b3, csr
mvkl runRet2?, b3
mvkh runRet2?, b3
runRet2?:
C62_restore C62_ABTEMPS & ~(C62_ISRAB | :ABMASK:), C62_CTEMPS & ~(C62_ISRC | :CMASK:), 0
b noSwi?
|| ldw *+DP(_HWI_D_spsave), b0
zero b2
mvkl _HWI_STKBOTTOM, a0
mvkh _HWI_STKBOTTOM, a0
cmpeq SP, a0, b1
[ b1] stw b2, *+DP(_HWI_D_spsave)
||[ b1] mv b0, SP
exitISRexec?:
C62_save C62_ABTEMPS & ~(C62_ISRAB | :ABMASK:), C62_CTEMPS & ~(C62_ISRC | :CMASK:)
b b3 ; call SWI_run()
mvkl runRet1?, b3
mvkh runRet1?, b3
nop 3
runRet1?:
C62_restore C62_ABTEMPS & ~(C62_ISRAB | :ABMASK:), C62_CTEMPS & ~(C62_ISRC | :CMASK:), 0
noSwi?:
C62_restore :ABMASK: & ~(C62_ISRAB), :CMASK: & ~(C62_ISRC), NOSP
b disable3?
|| ldw *+DP(SWI_D_curmask), b1
|| mvc csr, b3
ldw *+DP(SWI_D_curset), b2
|| and ~GIE, b3, b3
|| mvkl _HWI_STKBOTTOM, a2
mvkh _HWI_STKBOTTOM, a2
|| ldw *+DP(SWI_D_lock), a3
mvc b3, csr
mvk C62_spoffset, b3 ; C62_spoffset set up by C62_restore
mvkl _HWI_STKTOP, a1
disable3?:
cmpgt b1, b2, b2 ; b2 = 0 if need to run SWI
|| mvkh _HWI_STKTOP, a1
|| shl b3, 2, b3
or a3, b2, b2
|| sub a3, 1, a3
[!b2] b doswitch?
||[!b2] cmpgt SP, a2, b0
||[!b2] ldw *+DP(SWI_D_runaddr), b3
||[ b2] add SP, b3, SP ; do SP restore from above C62_restore
[!b2] cmplt SP, a1, b1
||[ b2] zero b1 ; force NOP for b1 condition below
||[ b2] addaw SP, HWI_NUMSTK, SP
||[ b2] mv SP, a0
[!b2] or b0, b1, b1 ; b1 = 1 if we should switch SP
||[ b2] ldw *+a0[HWI_STKCSR], a1
||[ b2] ldw *+SP[HWI_STKIRP - HWI_NUMSTK], b1
||[ b2] cmpeq SP, a2, b0 ; b0 = 1 if we should switch SP
;
; at this point: b1 = 1 if not on ISR stack && need to run SWI
; b0 = 1 if not running SWI && SP is at HWI_STKBOTTOM
;
[!b2] zero b0 ; force NOP for b0 condition below
||[ b2] stw a3, *+DP(SWI_D_lock)
||[ b2] mvk SAT, a3
[ b0] ldw *+DP(_HWI_D_spsave), b0
||[ b2] ldw *+a0[HWI_STKAMR], a2
[ b1] stw SP, *+DP(_HWI_D_spsave)
||[ b1] mv a2, SP
ldw *+a0[HWI_STKA3], a3
|| ldw *+SP[HWI_STKB3 - HWI_NUMSTK], b3
|| mvk 4, b3
ldw *+a0[HWI_STKA1], a1
|| ldw *+SP[HWI_STKB1 - HWI_NUMSTK], b1
|| mvc b1, irp
|| mv a1, b2
ldw *+a0[HWI_STKB2], b2
|| and a1, a3, a1
|| mvc b2, csr
b irp
|| ldw *+a0[HWI_STKA0], a0
|| ldw *+SP[HWI_STKB0 - HWI_NUMSTK], b0
||[ a1] sat b3:b2, b1
ldw *+a0[HWI_STKA2], a2
|| ldw *+SP[HWI_STKDP - HWI_NUMSTK], DP
|| mv a2, b2
|| zero a0
[ b0] mv b0, SP
||[ b0] stw a0, *+DP(_HWI_D_spsave)
|| mvc b2, amr
nop 3
.endm
.endif
.endif ; HWI_ not defined
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -