📄 entry.s
字号:
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * arch/sh64/kernel/entry.S * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2004, 2005 Paul Mundt * Copyright (C) 2003, 2004 Richard Curnow * */#include <linux/config.h>#include <linux/errno.h>#include <linux/sys.h>#include <asm/processor.h>#include <asm/registers.h>#include <asm/unistd.h>#include <asm/thread_info.h>#include <asm/asm-offsets.h>/* * SR fields. */#define SR_ASID_MASK 0x00ff0000#define SR_FD_MASK 0x00008000#define SR_SS 0x08000000#define SR_BL 0x10000000#define SR_MD 0x40000000/* * Event code. */#define EVENT_INTERRUPT 0#define EVENT_FAULT_TLB 1#define EVENT_FAULT_NOT_TLB 2#define EVENT_DEBUG 3/* EXPEVT values */#define RESET_CAUSE 0x20#define DEBUGSS_CAUSE 0x980/* * Frame layout. Quad index. */#define FRAME_T(x) FRAME_TBASE+(x*8)#define FRAME_R(x) FRAME_RBASE+(x*8)#define FRAME_S(x) FRAME_SBASE+(x*8)#define FSPC 0#define FSSR 1#define FSYSCALL_ID 2/* Arrange the save frame to be a multiple of 32 bytes long */#define FRAME_SBASE 0#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */#define FP_FRAME_BASE 0#define SAVED_R2 0*8#define SAVED_R3 1*8#define SAVED_R4 2*8#define SAVED_R5 3*8#define SAVED_R18 4*8#define SAVED_R6 5*8#define SAVED_TR0 6*8/* These are the registers saved in the TLB path that aren't saved in the first level of the normal one. */#define TLB_SAVED_R25 7*8#define TLB_SAVED_TR1 8*8#define TLB_SAVED_TR2 9*8#define TLB_SAVED_TR3 10*8#define TLB_SAVED_TR4 11*8/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing breakage otherwise. */#define TLB_SAVED_R0 12*8#define TLB_SAVED_R1 13*8#define CLI() \ getcon SR, r6; \ ori r6, 0xf0, r6; \ putcon r6, SR;#define STI() \ getcon SR, r6; \ andi r6, ~0xf0, r6; \ putcon r6, SR;#ifdef CONFIG_PREEMPT# define preempt_stop() CLI()#else# define preempt_stop()# define resume_kernel restore_all#endif .section .data, "aw"#define FAST_TLBMISS_STACK_CACHELINES 4#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)/* Register back-up area for all exceptions */ .balign 32 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling * register saves etc. */ .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0/* This is 32 byte aligned by construction *//* Register back-up area for all exceptions */reg_save_area: .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .quad 0/* Save area for RESVEC exceptions. We cannot use reg_save_area because of * reentrancy. Note this area may be accessed via physical address. * Align so this fits a whole single cache line, for ease of purging. */ .balign 32,0,32resvec_save_area: .quad 0 .quad 0 .quad 0 .quad 0 .quad 0 .balign 32,0,32/* Jump table of 3rd level handlers */trap_jtable: .long do_exception_error /* 0x000 */ .long do_exception_error /* 0x020 */ .long tlb_miss_load /* 0x040 */ .long tlb_miss_store /* 0x060 */ ! ARTIFICIAL pseudo-EXPEVT setting .long do_debug_interrupt /* 0x080 */ .long tlb_miss_load /* 0x0A0 */ .long tlb_miss_store /* 0x0C0 */ .long do_address_error_load /* 0x0E0 */ .long do_address_error_store /* 0x100 */#ifdef CONFIG_SH_FPU .long do_fpu_error /* 0x120 */#else .long do_exception_error /* 0x120 */#endif .long do_exception_error /* 0x140 */ .long system_call /* 0x160 */ .long do_reserved_inst /* 0x180 */ .long do_illegal_slot_inst /* 0x1A0 */ .long do_NMI /* 0x1C0 */ .long do_exception_error /* 0x1E0 */ .rept 15 .long do_IRQ /* 0x200 - 0x3C0 */ .endr .long do_exception_error /* 0x3E0 */ .rept 32 .long do_IRQ /* 0x400 - 0x7E0 */ .endr .long fpu_error_or_IRQA /* 0x800 */ .long fpu_error_or_IRQB /* 0x820 */ .long do_IRQ /* 0x840 */ .long do_IRQ /* 0x860 */ .rept 6 .long do_exception_error /* 0x880 - 0x920 */ .endr .long do_software_break_point /* 0x940 */ .long do_exception_error /* 0x960 */ .long do_single_step /* 0x980 */ .rept 3 .long do_exception_error /* 0x9A0 - 0x9E0 */ .endr .long do_IRQ /* 0xA00 */ .long do_IRQ /* 0xA20 */ .long itlb_miss_or_IRQ /* 0xA40 */ .long do_IRQ /* 0xA60 */ .long do_IRQ /* 0xA80 */ .long itlb_miss_or_IRQ /* 0xAA0 */ .long do_exception_error /* 0xAC0 */ .long do_address_error_exec /* 0xAE0 */ .rept 8 .long do_exception_error /* 0xB00 - 0xBE0 */ .endr .rept 18 .long do_IRQ /* 0xC00 - 0xE20 */ .endr .section .text64, "ax"/* * --- Exception/Interrupt/Event Handling Section *//* * VBR and RESVEC blocks. * * First level handler for VBR-based exceptions. * * To avoid waste of space, align to the maximum text block size. * This is assumed to be at most 128 bytes or 32 instructions. * DO NOT EXCEED 32 instructions on the first level handlers ! * * Also note that RESVEC is contained within the VBR block * where the room left (1KB - TEXT_SIZE) allows placing * the RESVEC block (at most 512B + TEXT_SIZE). * * So first (and only) level handler for RESVEC-based exceptions. * * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss * and interrupt) we are a lot tight with register space until * saving onto the stack frame, which is done in handle_exception(). * */#define TEXT_SIZE 128#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */ .balign TEXT_SIZELVBR_block: .space 256, 0 /* Power-on class handler, */ /* not required here */not_a_tlb_miss: synco /* TAKum03020 (but probably a good idea anyway.) */ /* Save original stack pointer into KCR1 */ putcon SP, KCR1 /* Save other original registers into reg_save_area */ movi reg_save_area, SP st.q SP, SAVED_R2, r2 st.q SP, SAVED_R3, r3 st.q SP, SAVED_R4, r4 st.q SP, SAVED_R5, r5 st.q SP, SAVED_R6, r6 st.q SP, SAVED_R18, r18 gettr tr0, r3 st.q SP, SAVED_TR0, r3 /* Set args for Non-debug, Not a TLB miss class handler */ getcon EXPEVT, r2 movi ret_from_exception, r3 ori r3, 1, r3 movi EVENT_FAULT_NOT_TLB, r4 or SP, ZERO, r5 getcon KCR1, SP pta handle_exception, tr0 blink tr0, ZERO .balign 256 ! VBR+0x200 nop .balign 256 ! VBR+0x300 nop .balign 256 /* * Instead of the natural .balign 1024 place RESVEC here * respecting the final 1KB alignment. */ .balign TEXT_SIZE /* * Instead of '.space 1024-TEXT_SIZE' place the RESVEC * block making sure the final alignment is correct. */tlb_miss: synco /* TAKum03020 (but probably a good idea anyway.) */ putcon SP, KCR1 movi reg_save_area, SP /* SP is guaranteed 32-byte aligned. */ st.q SP, TLB_SAVED_R0 , r0 st.q SP, TLB_SAVED_R1 , r1 st.q SP, SAVED_R2 , r2 st.q SP, SAVED_R3 , r3 st.q SP, SAVED_R4 , r4 st.q SP, SAVED_R5 , r5 st.q SP, SAVED_R6 , r6 st.q SP, SAVED_R18, r18 /* Save R25 for safety; as/ld may want to use it to achieve the call to * the code in mm/tlbmiss.c */ st.q SP, TLB_SAVED_R25, r25 gettr tr0, r2 gettr tr1, r3 gettr tr2, r4 gettr tr3, r5 gettr tr4, r18 st.q SP, SAVED_TR0 , r2 st.q SP, TLB_SAVED_TR1 , r3 st.q SP, TLB_SAVED_TR2 , r4 st.q SP, TLB_SAVED_TR3 , r5 st.q SP, TLB_SAVED_TR4 , r18 pt do_fast_page_fault, tr0 getcon SSR, r2 getcon EXPEVT, r3 getcon TEA, r4 shlri r2, 30, r2 andi r2, 1, r2 /* r2 = SSR.MD */ blink tr0, LINK pt fixup_to_invoke_general_handler, tr1 /* If the fast path handler fixed the fault, just drop through quickly to the restore code right away to return to the excepting context. */ beqi/u r2, 0, tr1fast_tlb_miss_restore: ld.q SP, SAVED_TR0, r2 ld.q SP, TLB_SAVED_TR1, r3 ld.q SP, TLB_SAVED_TR2, r4 ld.q SP, TLB_SAVED_TR3, r5 ld.q SP, TLB_SAVED_TR4, r18 ptabs r2, tr0 ptabs r3, tr1 ptabs r4, tr2 ptabs r5, tr3 ptabs r18, tr4 ld.q SP, TLB_SAVED_R0, r0 ld.q SP, TLB_SAVED_R1, r1 ld.q SP, SAVED_R2, r2 ld.q SP, SAVED_R3, r3 ld.q SP, SAVED_R4, r4 ld.q SP, SAVED_R5, r5 ld.q SP, SAVED_R6, r6 ld.q SP, SAVED_R18, r18 ld.q SP, TLB_SAVED_R25, r25 getcon KCR1, SP rte nop /* for safety, in case the code is run on sh5-101 cut1.x */fixup_to_invoke_general_handler: /* OK, new method. Restore stuff that's not expected to get saved into the 'first-level' reg save area, then just fall through to setting up the registers and calling the second-level handler. */ /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore r25,tr1-4 and save r6 to get into the right state. */ ld.q SP, TLB_SAVED_TR1, r3 ld.q SP, TLB_SAVED_TR2, r4 ld.q SP, TLB_SAVED_TR3, r5 ld.q SP, TLB_SAVED_TR4, r18 ld.q SP, TLB_SAVED_R25, r25 ld.q SP, TLB_SAVED_R0, r0 ld.q SP, TLB_SAVED_R1, r1 ptabs/u r3, tr1 ptabs/u r4, tr2 ptabs/u r5, tr3 ptabs/u r18, tr4 /* Set args for Non-debug, TLB miss class handler */ getcon EXPEVT, r2 movi ret_from_exception, r3 ori r3, 1, r3 movi EVENT_FAULT_TLB, r4 or SP, ZERO, r5 getcon KCR1, SP pta handle_exception, tr0 blink tr0, ZERO/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE DOES END UP AT VBR+0x600 */ nop nop nop nop nop nop .balign 256 /* VBR + 0x600 */interrupt: synco /* TAKum03020 (but probably a good idea anyway.) */ /* Save original stack pointer into KCR1 */ putcon SP, KCR1 /* Save other original registers into reg_save_area */ movi reg_save_area, SP st.q SP, SAVED_R2, r2 st.q SP, SAVED_R3, r3 st.q SP, SAVED_R4, r4 st.q SP, SAVED_R5, r5 st.q SP, SAVED_R6, r6 st.q SP, SAVED_R18, r18 gettr tr0, r3 st.q SP, SAVED_TR0, r3 /* Set args for interrupt class handler */ getcon INTEVT, r2 movi ret_from_irq, r3 ori r3, 1, r3 movi EVENT_INTERRUPT, r4 or SP, ZERO, r5 getcon KCR1, SP pta handle_exception, tr0 blink tr0, ZERO .balign TEXT_SIZE /* let's waste the bare minimum */LVBR_block_end: /* Marker. Used for total checking */ .balign 256LRESVEC_block: /* Panic handler. Called with MMU off. Possible causes/actions: * - Reset: Jump to program start. * - Single Step: Turn off Single Step & return. * - Others: Call panic handler, passing PC as arg. * (this may need to be extended...) */reset_or_panic: synco /* TAKum03020 (but probably a good idea anyway.) */ putcon SP, DCR /* First save r0-1 and tr0, as we need to use these */ movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP st.q SP, 0, r0 st.q SP, 8, r1 gettr tr0, r0 st.q SP, 32, r0 /* Check cause */ getcon EXPEVT, r0 movi RESET_CAUSE, r1 sub r1, r0, r1 /* r1=0 if reset */ movi _stext-CONFIG_CACHED_MEMORY_OFFSET, r0 ori r0, 1, r0 ptabs r0, tr0 beqi r1, 0, tr0 /* Jump to start address if reset */ getcon EXPEVT, r0 movi DEBUGSS_CAUSE, r1 sub r1, r0, r1 /* r1=0 if single step */ pta single_step_panic, tr0 beqi r1, 0, tr0 /* jump if single step */ /* Now jump to where we save the registers. */ movi panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1 ptabs r1, tr0 blink tr0, r63single_step_panic: /* We are in a handler with Single Step set. We need to resume the * handler, by turning on MMU & turning off Single Step. */ getcon SSR, r0 movi SR_MMU, r1 or r0, r1, r0 movi ~SR_SS, r1 and r0, r1, r0 putcon r0, SSR /* Restore EXPEVT, as the rte won't do this */ getcon PEXPEVT, r0 putcon r0, EXPEVT /* Restore regs */ ld.q SP, 32, r0 ptabs r0, tr0 ld.q SP, 0, r0 ld.q SP, 8, r1 getcon DCR, SP synco rte .balign 256debug_exception: synco /* TAKum03020 (but probably a good idea anyway.) */ /* * Single step/software_break_point first level handler. * Called with MMU off, so the first thing we do is enable it * by doing an rte with appropriate SSR. */ putcon SP, DCR /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */ movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP /* With the MMU off, we are bypassing the cache, so purge any * data that will be made stale by the following stores. */ ocbp SP, 0 synco st.q SP, 0, r0 st.q SP, 8, r1 getcon SPC, r0 st.q SP, 16, r0 getcon SSR, r0 st.q SP, 24, r0 /* Enable MMU, block exceptions, set priv mode, disable single step */ movi SR_MMU | SR_BL | SR_MD, r1 or r0, r1, r0 movi ~SR_SS, r1 and r0, r1, r0 putcon r0, SSR /* Force control to debug_exception_2 when rte is executed */ movi debug_exeception_2, r0 ori r0, 1, r0 /* force SHmedia, just in case */ putcon r0, SPC getcon DCR, SP synco rtedebug_exeception_2: /* Restore saved regs */ putcon SP, KCR1
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -