📄 entry-armv.s
字号:
stmia sp, {r0 - lr} @ Save SVC r0 - lr [lr *should* be intact] ldr r4, .LCabt mov r1, #BAD_DATA b 1f__irq_invalid: sub sp, sp, #S_FRAME_SIZE @ Allocate space on stack for frame stmfd sp, {r0 - lr} @ Save r0 - lr ldr r4, .LCirq mov r1, #BAD_IRQ b 1f__und_invalid: sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - lr} ldr r4, .LCund mov r1, #BAD_UNDEFINSTR @ int reason1: zero_fp ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0 add r4, sp, #S_PC stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0 mov r0, sp and r2, r6, #31 @ int mode b SYMBOL_NAME(bad_mode)#ifdef CONFIG_NWFPE /* The FPE is always present */ .equ fpe_not_present, 0#elsewfs_mask_data: .word 0x0e200110 @ WFS/RFS .word 0x0fef0fff .word 0x0d0d0100 @ LDF [sp]/STF [sp] .word 0x0d0b0100 @ LDF [fp]/STF [fp] .word 0x0f0f0f00/* We get here if an undefined instruction happens and the floating * point emulator is not present. If the offending instruction was * a WFS, we just perform a normal return as if we had emulated the * operation. This is a hack to allow some basic userland binaries * to run so that the emulator module proper can be loaded. --philb */fpe_not_present: adr r10, wfs_mask_data ldmia r10, {r4, r5, r6, r7, r8} ldr r10, [sp, #S_PC] @ Load PC sub r10, r10, #4 mask_pc r10, r10 ldrt r10, [r10] @ get instruction and r5, r10, r5 teq r5, r4 @ Is it WFS? moveq pc, r9 and r5, r10, r8 teq r5, r6 @ Is it LDF/STF on sp or fp? teqne r5, r7 movne pc, lr tst r10, #0x00200000 @ Does it have WB moveq pc, r9 and r4, r10, #255 @ get offset and r6, r10, #0x000f0000 tst r10, #0x00800000 @ +/- ldr r5, [sp, r6, lsr #14] @ Load reg rsbeq r4, r4, #0 add r5, r5, r4, lsl #2 str r5, [sp, r6, lsr #14] @ Save reg mov pc, r9#endif/* * SVC mode handlers */ .align 5__dabt_svc: sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - r12} @ save r0 - r12 ldr r2, .LCabt add r0, sp, #S_FRAME_SIZE ldmia r2, {r2 - r4} @ get pc, cpsr add r5, sp, #S_SP mov r1, lr stmia r5, {r0 - r4} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro mrs r9, cpsr @ Enable interrupts if they were tst r3, #I_BIT biceq r9, r9, #I_BIT @ previously mov r0, r2/* * This routine must not corrupt r9 */#ifdef MULTI_CPU ldr r2, .LCprocfns mov lr, pc ldr pc, [r2] @ call processor specific code#else bl cpu_data_abort#endif msr cpsr_c, r9 mov r2, sp bl SYMBOL_NAME(do_DataAbort) mov r0, #I_BIT | MODE_SVC msr cpsr_c, r0 ldr r0, [sp, #S_PSR] msr spsr, r0 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .align 5__irq_svc: sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - r12} @ save r0 - r12 ldr r7, .LCirq add r5, sp, #S_FRAME_SIZE ldmia r7, {r7 - r9} add r4, sp, #S_SP mov r6, lr stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro1: get_irqnr_and_base r0, r6, r5, lr movne r1, sp @ @ routine called with r0 = irq number, r1 = struct pt_regs * @ adrsvc ne, lr, 1b bne do_IRQ ldr r0, [sp, #S_PSR] @ irqs are already disabled msr spsr, r0 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .align 5__und_svc: sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - r12} @ save r0 - r12 ldr r7, .LCund mov r6, lr ldmia r7, {r7 - r9} add r5, sp, #S_FRAME_SIZE add r4, sp, #S_SP stmia r4, {r5 - r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro adrsvc al, r9, 1f @ r9 = normal FP return bl call_fpe @ lr = undefined instr return mov r0, r5 @ unsigned long pc mov r1, sp @ struct pt_regs *regs bl SYMBOL_NAME(do_undefinstr)1: mov r0, #I_BIT | MODE_SVC msr cpsr_c, r0 ldr lr, [sp, #S_PSR] @ Get SVC cpsr msr spsr, lr ldmia sp, {r0 - pc}^ @ Restore SVC registers .align 5__pabt_svc: sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - r12} @ save r0 - r12 ldr r2, .LCabt add r0, sp, #S_FRAME_SIZE ldmia r2, {r2 - r4} @ get pc, cpsr add r5, sp, #S_SP mov r1, lr stmia r5, {r0 - r4} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro mrs r9, cpsr @ Enable interrupts if they were tst r3, #I_BIT biceq r9, r9, #I_BIT @ previously msr cpsr_c, r9 mov r0, r2 @ address (pc) mov r1, sp @ regs bl SYMBOL_NAME(do_PrefetchAbort) @ call abort handler mov r0, #I_BIT | MODE_SVC msr cpsr_c, r0 ldr r0, [sp, #S_PSR] msr spsr, r0 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .align 5.LCirq: .word __temp_irq.LCund: .word __temp_und.LCabt: .word __temp_abt#ifdef MULTI_CPU.LCprocfns: .word SYMBOL_NAME(processor)#endif.LCfp: .word SYMBOL_NAME(fp_enter)#ifdef CONFIG_ALIGNMENT_TRAP.LCswi: .word SYMBOL_NAME(cr_alignment)#endif irq_prio_table/* * User mode handlers */ .align 5__dabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go stmia sp, {r0 - r12} @ save r0 - r12 ldr r4, .LCabt add r3, sp, #S_PC ldmia r4, {r0 - r2} @ Get USR pc, cpsr stmia r3, {r0 - r2} @ Save USR pc, cpsr, old_r0 stmdb r3, {sp, lr}^ alignment_trap r4, r7, __temp_abt zero_fp#ifdef MULTI_CPU ldr r2, .LCprocfns mov lr, pc ldr pc, [r2] @ call processor specific code#else bl cpu_data_abort#endif mov r2, #MODE_SVC msr cpsr_c, r2 @ Enable interrupts mov r2, sp adrsvc al, lr, ret_from_sys_call b SYMBOL_NAME(do_DataAbort) .align 5__irq_usr: sub sp, sp, #S_FRAME_SIZE stmia sp, {r0 - r12} @ save r0 - r12 ldr r4, .LCirq add r8, sp, #S_PC ldmia r4, {r5 - r7} @ get saved PC, SPSR stmia r8, {r5 - r7} @ save pc, psr, old_r0 stmdb r8, {sp, lr}^ alignment_trap r4, r7, __temp_irq zero_fp1: get_irqnr_and_base r0, r6, r5, lr movne r1, sp adrsvc ne, lr, 1b @ @ routine called with r0 = irq number, r1 = struct pt_regs * @ bne do_IRQ mov r4, #0 get_current_task r5 b ret_with_reschedule .align 5__und_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go stmia sp, {r0 - r12} @ Save r0 - r12 ldr r4, .LCund add r8, sp, #S_PC ldmia r4, {r5 - r7} stmia r8, {r5 - r7} @ Save USR pc, cpsr, old_r0 stmdb r8, {sp, lr}^ @ Save user sp, lr alignment_trap r4, r7, __temp_und zero_fp adrsvc al, r9, ret_from_sys_call @ r9 = normal FP return adrsvc al, lr, fpundefinstr @ lr = undefined instr returncall_fpe: get_current_task r10 mov r8, #1 strb r8, [r10, #TSK_USED_MATH] @ set current->used_math ldr r4, .LCfp add r10, r10, #TSS_FPESAVE @ r10 = workspace ldr pc, [r4] @ Call FP module USR entry pointfpundefinstr: mov r0, #MODE_SVC msr cpsr_c, r0 @ Enable interrupts mov r0, lr mov r1, sp adrsvc al, lr, ret_from_sys_call b SYMBOL_NAME(do_undefinstr) .align 5__pabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go stmia sp, {r0 - r12} @ Save r0 - r12 ldr r4, .LCabt add r8, sp, #S_PC ldmia r4, {r5 - r7} @ Get USR pc, cpsr stmia r8, {r5 - r7} @ Save USR pc, cpsr, old_r0 stmdb r8, {sp, lr}^ @ Save sp_usr lr_usr alignment_trap r4, r7, __temp_abt zero_fp mov r0, #MODE_SVC msr cpsr_c, r0 @ Enable interrupts mov r0, r5 @ address (pc) mov r1, sp @ regs bl SYMBOL_NAME(do_PrefetchAbort) @ call abort handler teq r0, #0 @ Does this still apply??? bne ret_from_sys_call @ Return from exception#ifdef DEBUG_UNDEF adr r0, t bl SYMBOL_NAME(printk)#endif mov r0, r5 mov r1, sp and r2, r6, #31 bl SYMBOL_NAME(do_undefinstr) ldr lr, [sp, #S_PSR] @ Get USR cpsr msr spsr, lr ldmia sp, {r0 - pc}^ @ Restore USR registers#ifdef DEBUG_UNDEFt: .ascii "Prefetch -> undefined instruction\n\0" .align#endif#include "entry-common.S" .text#ifndef __ARM_ARCH_4__.Larm700bug: ldr r0, [sp, #S_PSR] @ Get calling cpsr str lr, [r8] msr spsr, r0 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr mov r0, r0 ldr lr, [sp, #S_PC] @ Get PC add sp, sp, #S_FRAME_SIZE movs pc, lr#endif/* * Register switch for ARMv3 and ARMv4 processors * r0 = previous, r1 = next, return previous. * previous and next are guaranteed not to be the same. */ENTRY(__switch_to) stmfd sp!, {r4 - sl, fp, lr} @ Store most regs on stack mrs ip, cpsr str ip, [sp, #-4]! @ Save cpsr_SVC str sp, [r0, #TSS_SAVE] @ Save sp_SVC ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC ldr r2, [r1, #TSS_DOMAIN] ldr ip, [sp], #4 mcr p15, 0, r2, c3, c0 @ Set domain register msr spsr, ip @ Save tasks CPSR into SPSR for this return ldmfd sp!, {r4 - sl, fp, pc}^ @ Load all regs saved previously .section ".text.init",#alloc,#execinstr/* * Vector stubs. NOTE that we only align 'vector_IRQ' to a cache line boundary, * and we rely on each stub being exactly 48 (1.5 cache lines) in size. This * means that we only ever load two cache lines for this code, or one if we're * lucky. We also copy this code to 0x200 so that we can use branches in the * vectors, rather than ldr's. */ .align 5__stubs_start:/* * Interrupt dispatcher * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */vector_IRQ: @ @ save mode specific registers @ ldr r13, .LCsirq sub lr, lr, #4 str lr, [r13] @ save lr_IRQ mrs lr, spsr str lr, [r13, #4] @ save spsr_IRQ @ @ now branch to the relevent MODE handling routine @ mov r13, #I_BIT | MODE_SVC msr spsr_c, r13 @ switch to SVC_32 mode and lr, lr, #15 ldr lr, [pc, lr, lsl #2] movs pc, lr @ Changes mode and branches.LCtab_irq: .word __irq_usr @ 0 (USR_26 / USR_32) .word __irq_invalid @ 1 (FIQ_26 / FIQ_32) .word __irq_invalid @ 2 (IRQ_26 / IRQ_32) .word __irq_svc @ 3 (SVC_26 / SVC_32) .word __irq_invalid @ 4 .word __irq_invalid @ 5 .word __irq_invalid @ 6 .word __irq_invalid @ 7 .word __irq_invalid @ 8 .word __irq_invalid @ 9 .word __irq_invalid @ a .word __irq_invalid @ b .word __irq_invalid @ c .word __irq_invalid @ d .word __irq_invalid @ e .word __irq_invalid @ f .align 5/* * Data abort dispatcher - dispatches it to the correct handler for the processor mode * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */vector_data: @ @ save mode specific registers @ ldr r13, .LCsabt sub lr, lr, #8 str lr, [r13] mrs lr, spsr str lr, [r13, #4] @ @ now branch to the relevent MODE handling routine @ mov r13, #I_BIT | MODE_SVC msr spsr_c, r13 @ switch to SVC_32 mode and lr, lr, #15 ldr lr, [pc, lr, lsl #2] movs pc, lr @ Changes mode and branches.LCtab_dabt: .word __dabt_usr @ 0 (USR_26 / USR_32) .word __dabt_invalid @ 1 (FIQ_26 / FIQ_32) .word __dabt_invalid @ 2 (IRQ_26 / IRQ_32) .word __dabt_svc @ 3 (SVC_26 / SVC_32) .word __dabt_invalid @ 4 .word __dabt_invalid @ 5 .word __dabt_invalid @ 6 .word __dabt_invalid @ 7 .word __dabt_invalid @ 8 .word __dabt_invalid @ 9 .word __dabt_invalid @ a .word __dabt_invalid @ b .word __dabt_invalid @ c .word __dabt_invalid @ d .word __dabt_invalid @ e .word __dabt_invalid @ f .align 5/* * Prefetch abort dispatcher - dispatches it to the correct handler for the processor mode * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */vector_prefetch: @ @ save mode specific registers @ ldr r13, .LCsabt sub lr, lr, #4 str lr, [r13] @ save lr_ABT mrs lr, spsr str lr, [r13, #4] @ save spsr_ABT @ @ now branch to the relevent MODE handling routine @ mov r13, #I_BIT | MODE_SVC msr spsr_c, r13 @ switch to SVC_32 mode ands lr, lr, #15 ldr lr, [pc, lr, lsl #2] movs pc, lr.LCtab_pabt: .word __pabt_usr @ 0 (USR_26 / USR_32) .word __pabt_invalid @ 1 (FIQ_26 / FIQ_32) .word __pabt_invalid @ 2 (IRQ_26 / IRQ_32) .word __pabt_svc @ 3 (SVC_26 / SVC_32) .word __pabt_invalid @ 4 .word __pabt_invalid @ 5 .word __pabt_invalid @ 6 .word __pabt_invalid @ 7 .word __pabt_invalid @ 8 .word __pabt_invalid @ 9 .word __pabt_invalid @ a .word __pabt_invalid @ b .word __pabt_invalid @ c .word __pabt_invalid @ d .word __pabt_invalid @ e .word __pabt_invalid @ f .align 5/* * Undef instr entry dispatcher - dispatches it to the correct handler for the processor mode * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */vector_undefinstr: @ @ save mode specific registers @ ldr r13, .LCsund str lr, [r13] @ save lr_UND mrs lr, spsr str lr, [r13, #4] @ save spsr_UND @ @ now branch to the relevent MODE handling routine @ mov r13, #I_BIT | MODE_SVC msr spsr_c, r13 @ switch to SVC_32 mode and lr, lr, #15 ldr lr, [pc, lr, lsl #2] movs pc, lr @ Changes mode and branches.LCtab_und: .word __und_usr @ 0 (USR_26 / USR_32) .word __und_invalid @ 1 (FIQ_26 / FIQ_32) .word __und_invalid @ 2 (IRQ_26 / IRQ_32) .word __und_svc @ 3 (SVC_26 / SVC_32) .word __und_invalid @ 4 .word __und_invalid @ 5 .word __und_invalid @ 6 .word __und_invalid @ 7 .word __und_invalid @ 8 .word __und_invalid @ 9 .word __und_invalid @ a .word __und_invalid @ b .word __und_invalid @ c .word __und_invalid @ d .word __und_invalid @ e .word __und_invalid @ f .align 5/*============================================================================= * Undefined FIQs *----------------------------------------------------------------------------- * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. * Basically to switch modes, we *HAVE* to clobber one register... brain * damage alert! I don't think that we can execute any code in here in any * other mode than FIQ... Ok you can switch to another mode, but you can't * get out of that mode without clobbering one register. */vector_FIQ: disable_fiq subs pc, lr, #4/*============================================================================= * Address exception handler *----------------------------------------------------------------------------- * These aren't too critical. * (they're not supposed to happen, and won't happen in 32-bit data mode). */vector_addrexcptn: b vector_addrexcptn/* * We group all the following data together to optimise * for CPUs with separate I & D caches. */ .align 5.LCvswi: .word vector_swi.LCsirq: .word __temp_irq.LCsund: .word __temp_und.LCsabt: .word __temp_abt__stubs_end: .equ __real_stubs_start, .LCvectors + 0x200.LCvectors: swi SYS_ERROR0 b __real_stubs_start + (vector_undefinstr - __stubs_start) ldr pc, __real_stubs_start + (.LCvswi - __stubs_start) b __real_stubs_start + (vector_prefetch - __stubs_start) b __real_stubs_start + (vector_data - __stubs_start) b __real_stubs_start + (vector_addrexcptn - __stubs_start) b __real_stubs_start + (vector_IRQ - __stubs_start) b __real_stubs_start + (vector_FIQ - __stubs_start)ENTRY(__trap_init) stmfd sp!, {r4 - r6, lr} adr r1, .LCvectors @ set up the vectors mov r0, #0 ldmia r1, {r1, r2, r3, r4, r5, r6, ip, lr} stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr} add r2, r0, #0x200 adr r0, __stubs_start @ copy stubs to 0x200 adr r1, __stubs_end1: ldr r3, [r0], #4 str r3, [r2], #4 cmp r0, r1 blt 1b LOADREGS(fd, sp!, {r4 - r6, pc}) .data/* * Do not reorder these, and do not insert extra data between... */__temp_irq: .word 0 @ saved lr_irq .word 0 @ saved spsr_irq .word -1 @ old_r0__temp_und: .word 0 @ Saved lr_und .word 0 @ Saved spsr_und .word -1 @ old_r0__temp_abt: .word 0 @ Saved lr_abt .word 0 @ Saved spsr_abt .word -1 @ old_r0 .globl SYMBOL_NAME(cr_alignment) .globl SYMBOL_NAME(cr_no_alignment)SYMBOL_NAME(cr_alignment): .space 4SYMBOL_NAME(cr_no_alignment): .space 4
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -