entry-armv.s

来自「h内核」· S 代码 · 共 741 行 · 第 1/2 页

S
741
字号
/* *  linux/arch/arm/kernel/entry-armv.S * *  Copyright (C) 1996,1997,1998 Russell King. *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * *  Low-level vector interface routines * *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes *  it to save wrong values...  Be aware! */#include <linux/config.h>#include <linux/init.h>#include <asm/thread_info.h>#include <asm/glue.h>#include <asm/ptrace.h>#include <asm/vfpmacros.h>#include "entry-header.S"/* * Invalid mode handlers */	.macro	inv_entry, sym, reason	sub	sp, sp, #S_FRAME_SIZE		@ Allocate frame size in one go	stmia	sp, {r0 - lr}			@ Save XXX r0 - lr	ldr	r4, .LC\sym	mov	r1, #\reason	.endm__pabt_invalid:	inv_entry abt, BAD_PREFETCH	b	1f__dabt_invalid:	inv_entry abt, BAD_DATA	b	1f__irq_invalid:	inv_entry irq, BAD_IRQ	b	1f__und_invalid:	inv_entry und, BAD_UNDEFINSTR1:	zero_fp	ldmia	r4, {r5 - r7}			@ Get XXX pc, cpsr, old_r0	add	r4, sp, #S_PC	stmia	r4, {r5 - r7}			@ Save XXX pc, cpsr, old_r0	mov	r0, sp	and	r2, r6, #31			@ int mode	b	bad_mode/* * SVC mode handlers */	.macro	svc_entry, sym	sub	sp, sp, #S_FRAME_SIZE	stmia	sp, {r0 - r12}			@ save r0 - r12	ldr	r2, .LC\sym	add	r0, sp, #S_FRAME_SIZE	ldmia	r2, {r2 - r4}			@ get pc, cpsr	add	r5, sp, #S_SP	mov	r1, lr	@	@ We are now ready to fill in the remaining blanks on the stack:	@	@  r0 - sp_svc	@  r1 - lr_svc	@  r2 - lr_<exception>, already fixed up for correct return/restart	@  r3 - spsr_<exception>	@  r4 - orig_r0 (see pt_regs definition in ptrace.h)	@	stmia	r5, {r0 - r4}	.endm	.align	5__dabt_svc:	svc_entry abt	@	@ get ready to re-enable interrupts if appropriate	@	mrs	r9, cpsr	tst	r3, #PSR_I_BIT	biceq	r9, r9, #PSR_I_BIT	@	@ Call the processor-specific abort handler:	@	@  r2 - aborted context pc	@  r3 - aborted context cpsr	@	@ The abort handler must return the aborted address in r0, and	@ the fault status register in r1.  r9 must be preserved.	@#ifdef MULTI_ABORT	ldr	r4, .LCprocfns	mov	lr, pc	ldr	pc, [r4]#else	bl	CPU_ABORT_HANDLER#endif	@	@ set desired IRQ state, then call main handler	@	msr	cpsr_c, r9	mov	r2, sp	bl	do_DataAbort	@	@ IRQs off again before pulling preserved data off the stack	@	disable_irq r0	@	@ restore SPSR and restart the instruction	@	ldr	r0, [sp, #S_PSR]	msr	spsr_cxsf, r0	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr	.align	5__irq_svc:	svc_entry irq#ifdef CONFIG_PREEMPT	get_thread_info r8	ldr	r9, [r8, #TI_PREEMPT]		@ get preempt count	add	r7, r9, #1			@ increment it	str	r7, [r8, #TI_PREEMPT]#endif1:	get_irqnr_and_base r0, r6, r5, lr	movne	r1, sp	@	@ routine called with r0 = irq number, r1 = struct pt_regs *	@	adrne	lr, 1b	bne	asm_do_IRQ#ifdef CONFIG_PREEMPT	ldr	r0, [r8, #TI_FLAGS]		@ get flags	tst	r0, #_TIF_NEED_RESCHED	blne	svc_preemptpreempt_return:	ldr	r0, [r8, #TI_PREEMPT]		@ read preempt value	teq	r0, r7	str	r9, [r8, #TI_PREEMPT]		@ restore preempt count	strne	r0, [r0, -r0]			@ bug()#endif	ldr	r0, [sp, #S_PSR]		@ irqs are already disabled	msr	spsr_cxsf, r0	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr	.ltorg#ifdef CONFIG_PREEMPTsvc_preempt:	teq	r9, #0				@ was preempt count = 0	ldreq	r6, .LCirq_stat	movne	pc, lr				@ no	ldr	r0, [r6, #4]			@ local_irq_count	ldr	r1, [r6, #8]			@ local_bh_count	adds	r0, r0, r1	movne	pc, lr	mov	r7, #0				@ preempt_schedule_irq	str	r7, [r8, #TI_PREEMPT]		@ expects preempt_count == 01:	bl	preempt_schedule_irq		@ irq en/disable is done inside	ldr	r0, [r8, #TI_FLAGS]		@ get new tasks TI_FLAGS	tst	r0, #_TIF_NEED_RESCHED	beq	preempt_return			@ go again	b	1b#endif	.align	5__und_svc:	svc_entry und	@	@ call emulation code, which returns using r9 if it has emulated	@ the instruction, or the more conventional lr if we are to treat	@ this as a real undefined instruction	@	@  r0 - instruction	@	ldr	r0, [r2, #-4]	adr	r9, 1f	bl	call_fpe	mov	r0, sp				@ struct pt_regs *regs	bl	do_undefinstr	@	@ IRQs off again before pulling preserved data off the stack	@1:	disable_irq r0	@	@ restore SPSR and restart the instruction	@	ldr	lr, [sp, #S_PSR]		@ Get SVC cpsr	msr	spsr_cxsf, lr	ldmia	sp, {r0 - pc}^			@ Restore SVC registers	.align	5__pabt_svc:	svc_entry abt	@	@ re-enable interrupts if appropriate	@	mrs	r9, cpsr	tst	r3, #PSR_I_BIT	biceq	r9, r9, #PSR_I_BIT	msr	cpsr_c, r9	@	@ set args, then call main handler	@	@  r0 - address of faulting instruction	@  r1 - pointer to registers on stack	@	mov	r0, r2				@ address (pc)	mov	r1, sp				@ regs	bl	do_PrefetchAbort		@ call abort handler	@	@ IRQs off again before pulling preserved data off the stack	@	disable_irq r0	@	@ restore SPSR and restart the instruction	@	ldr	r0, [sp, #S_PSR]	msr	spsr_cxsf, r0	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr	.align	5.LCirq:	.word	__temp_irq.LCund:	.word	__temp_und.LCabt:	.word	__temp_abt#ifdef MULTI_ABORT.LCprocfns:	.word	processor#endif.LCfp:	.word	fp_enter#ifdef CONFIG_PREEMPT.LCirq_stat:	.word	irq_stat#endif/* * User mode handlers */	.macro	usr_entry, sym	sub	sp, sp, #S_FRAME_SIZE		@ Allocate frame size in one go	stmia	sp, {r0 - r12}			@ save r0 - r12	ldr	r7, .LC\sym	add	r5, sp, #S_PC	ldmia	r7, {r2 - r4}			@ Get USR pc, cpsr	@	@ We are now ready to fill in the remaining blanks on the stack:	@	@  r2 - lr_<exception>, already fixed up for correct return/restart	@  r3 - spsr_<exception>	@  r4 - orig_r0 (see pt_regs definition in ptrace.h)	@	@ Also, separately save sp_usr and lr_usr	@	stmia	r5, {r2 - r4}	stmdb	r5, {sp, lr}^	.endm	.align	5__dabt_usr:	usr_entry abt	alignment_trap r7, r0, __temp_abt	zero_fp	@	@ Call the processor-specific abort handler:	@	@  r2 - aborted context pc	@  r3 - aborted context cpsr	@	@ The abort handler must return the aborted address in r0, and	@ the fault status register in r1.	@#ifdef MULTI_ABORT	ldr	r4, .LCprocfns	mov	lr, pc	ldr	pc, [r4]#else	bl	CPU_ABORT_HANDLER#endif	@	@ IRQs on, then call the main handler	@	enable_irq r2	mov	r2, sp	adr	lr, ret_from_exception	b	do_DataAbort	.align	5__irq_usr:	usr_entry irq	alignment_trap r7, r0, __temp_irq	zero_fp#ifdef CONFIG_PREEMPT	get_thread_info r8	ldr	r9, [r8, #TI_PREEMPT]		@ get preempt count	add	r7, r9, #1			@ increment it	str	r7, [r8, #TI_PREEMPT]#endif1:	get_irqnr_and_base r0, r6, r5, lr	movne	r1, sp	adrne	lr, 1b	@	@ routine called with r0 = irq number, r1 = struct pt_regs *	@	bne	asm_do_IRQ#ifdef CONFIG_PREEMPT	ldr	r0, [r8, #TI_PREEMPT]	teq	r0, r7	str	r9, [r8, #TI_PREEMPT]	strne	r0, [r0, -r0]	mov	tsk, r8#else	get_thread_info tsk#endif	mov	why, #0	b	ret_to_user	.ltorg	.align	5__und_usr:	usr_entry und	alignment_trap r7, r0, __temp_und	zero_fp	tst	r3, #PSR_T_BIT			@ Thumb mode?	bne	fpundefinstr			@ ignore FP	sub	r4, r2, #4	@	@ fall through to the emulation code, which returns using r9 if	@ it has emulated the instruction, or the more conventional lr	@ if we are to treat this as a real undefined instruction	@	@  r0 - instruction	@1:	ldrt	r0, [r4]	adr	r9, ret_from_exception	adr	lr, fpundefinstr	@	@ fallthrough to call_fpe	@/*

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?