entry-armv.s

来自「h内核」· S 代码 · 共 741 行 · 第 1/2 页

S
741
字号
 * The out of line fixup for the ldrt above. */	.section .fixup, "ax"2:	mov	pc, r9	.previous	.section __ex_table,"a"	.long	1b, 2b	.previous/* * Check whether the instruction is a co-processor instruction. * If yes, we need to call the relevant co-processor handler. * * Note that we don't do a full check here for the co-processor * instructions; all instructions with bit 27 set are well * defined.  The only instructions that should fault are the * co-processor instructions.  However, we have to watch out * for the ARM6/ARM7 SWI bug. * * Emulators may wish to make use of the following registers: *  r0  = instruction opcode. *  r2  = PC+4 *  r10 = this threads thread_info structure. */call_fpe:	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)	and	r8, r0, #0x0f000000		@ mask out op-code bits	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?#endif	moveq	pc, lr	get_thread_info r10			@ get current thread	and	r8, r0, #0x00000f00		@ mask out CP number	mov	r7, #1	add	r6, r10, #TI_USED_CP	strb	r7, [r6, r8, lsr #8]		@ set appropriate used_cp[]#ifdef CONFIG_IWMMXT	@ Test if we need to give access to iWMMXt coprocessors	ldr	r5, [r10, #TI_FLAGS]	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)	bcs	iwmmxt_task_enable#endif	enable_irq r7	add	pc, pc, r8, lsr #6	mov	r0, r0	mov	pc, lr				@ CP#0	b	do_fpe				@ CP#1 (FPE)	b	do_fpe				@ CP#2 (FPE)	mov	pc, lr				@ CP#3	mov	pc, lr				@ CP#4	mov	pc, lr				@ CP#5	mov	pc, lr				@ CP#6	mov	pc, lr				@ CP#7	mov	pc, lr				@ CP#8	mov	pc, lr				@ CP#9#ifdef CONFIG_VFP	b	do_vfp				@ CP#10 (VFP)	b	do_vfp				@ CP#11 (VFP)#else	mov	pc, lr				@ CP#10 (VFP)	mov	pc, lr				@ CP#11 (VFP)#endif	mov	pc, lr				@ CP#12	mov	pc, lr				@ CP#13	mov	pc, lr				@ CP#14 (Debug)	mov	pc, lr				@ CP#15 (Control)do_fpe:	ldr	r4, .LCfp	add	r10, r10, #TI_FPSTATE		@ r10 = workspace	ldr	pc, [r4]			@ Call FP module USR entry point/* * The FP module is called with these registers set: *  r0  = instruction *  r2  = PC+4 *  r9  = normal "successful" return address *  r10 = FP workspace *  lr  = unrecognised FP instruction return address */	.dataENTRY(fp_enter)	.word	fpundefinstr	.textfpundefinstr:	mov	r0, sp	adr	lr, ret_from_exception	b	do_undefinstr	.align	5__pabt_usr:	usr_entry abt	alignment_trap r7, r0, __temp_abt	zero_fp	enable_irq r0				@ Enable interrupts	mov	r0, r2				@ address (pc)	mov	r1, sp				@ regs	bl	do_PrefetchAbort		@ call abort handler	/* fall through *//* * This is the return code to user mode for abort handlers */ENTRY(ret_from_exception)	get_thread_info tsk	mov	why, #0	b	ret_to_user/* * Register switch for ARMv3 and ARMv4 processors * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info * previous and next are guaranteed not to be the same. */ENTRY(__switch_to)	add	ip, r1, #TI_CPU_SAVE	ldr	r3, [r2, #TI_TP_VALUE]	stmia	ip!, {r4 - sl, fp, sp, lr}	@ Store most regs on stack	ldr	r6, [r2, #TI_CPU_DOMAIN]!#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)	mra	r4, r5, acc0	stmia   ip, {r4, r5}#endif	mov	r4, #0xffff0fff	str	r3, [r4, #-3]			@ Set TLS ptr	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register#ifdef CONFIG_VFP	@ Always disable VFP so we can lazily save/restore the old	@ state. This occurs in the context of the previous thread.	VFPFMRX	r4, FPEXC	bic	r4, r4, #FPEXC_ENABLE	VFPFMXR	FPEXC, r4#endif#if defined(CONFIG_IWMMXT)	bl	iwmmxt_task_switch#elif defined(CONFIG_CPU_XSCALE)	add	r4, r2, #40			@ cpu_context_save->extra	ldmib	r4, {r4, r5}	mar	acc0, r4, r5#endif	ldmib	r2, {r4 - sl, fp, sp, pc}	@ Load all regs saved previously	__INIT/* * Vector stubs. * * This code is copied to 0x200 or 0xffff0200 so we can use branches in the * vectors, rather than ldr's. * * Common stub entry macro: *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */	.macro	vector_stub, name, sym, correction=0	.align	5vector_\name:	ldr	r13, .LCs\sym	.if \correction	sub	lr, lr, #\correction	.endif	str	lr, [r13]			@ save lr_IRQ	mrs	lr, spsr	str	lr, [r13, #4]			@ save spsr_IRQ	@	@ now branch to the relevant MODE handling routine	@	mrs	r13, cpsr	bic	r13, r13, #MODE_MASK	orr	r13, r13, #MODE_SVC	msr	spsr_cxsf, r13			@ switch to SVC_32 mode	and	lr, lr, #15	ldr	lr, [pc, lr, lsl #2]	movs	pc, lr				@ Changes mode and branches	.endm__stubs_start:/* * Interrupt dispatcher */	vector_stub	irq, irq, 4	.long	__irq_usr			@  0  (USR_26 / USR_32)	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)	.long	__irq_svc			@  3  (SVC_26 / SVC_32)	.long	__irq_invalid			@  4	.long	__irq_invalid			@  5	.long	__irq_invalid			@  6	.long	__irq_invalid			@  7	.long	__irq_invalid			@  8	.long	__irq_invalid			@  9	.long	__irq_invalid			@  a	.long	__irq_invalid			@  b	.long	__irq_invalid			@  c	.long	__irq_invalid			@  d	.long	__irq_invalid			@  e	.long	__irq_invalid			@  f/* * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */	vector_stub	dabt, abt, 8	.long	__dabt_usr			@  0  (USR_26 / USR_32)	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)	.long	__dabt_invalid			@  4	.long	__dabt_invalid			@  5	.long	__dabt_invalid			@  6	.long	__dabt_invalid			@  7	.long	__dabt_invalid			@  8	.long	__dabt_invalid			@  9	.long	__dabt_invalid			@  a	.long	__dabt_invalid			@  b	.long	__dabt_invalid			@  c	.long	__dabt_invalid			@  d	.long	__dabt_invalid			@  e	.long	__dabt_invalid			@  f/* * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */	vector_stub	pabt, abt, 4	.long	__pabt_usr			@  0 (USR_26 / USR_32)	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)	.long	__pabt_invalid			@  4	.long	__pabt_invalid			@  5	.long	__pabt_invalid			@  6	.long	__pabt_invalid			@  7	.long	__pabt_invalid			@  8	.long	__pabt_invalid			@  9	.long	__pabt_invalid			@  a	.long	__pabt_invalid			@  b	.long	__pabt_invalid			@  c	.long	__pabt_invalid			@  d	.long	__pabt_invalid			@  e	.long	__pabt_invalid			@  f/* * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */	vector_stub	und, und	.long	__und_usr			@  0 (USR_26 / USR_32)	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)	.long	__und_svc			@  3 (SVC_26 / SVC_32)	.long	__und_invalid			@  4	.long	__und_invalid			@  5	.long	__und_invalid			@  6	.long	__und_invalid			@  7	.long	__und_invalid			@  8	.long	__und_invalid			@  9	.long	__und_invalid			@  a	.long	__und_invalid			@  b	.long	__und_invalid			@  c	.long	__und_invalid			@  d	.long	__und_invalid			@  e	.long	__und_invalid			@  f	.align	5/*============================================================================= * Undefined FIQs *----------------------------------------------------------------------------- * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. * Basically to switch modes, we *HAVE* to clobber one register...  brain * damage alert!  I don't think that we can execute any code in here in any * other mode than FIQ...  Ok you can switch to another mode, but you can't * get out of that mode without clobbering one register. */vector_fiq:	disable_fiq	subs	pc, lr, #4/*============================================================================= * Address exception handler *----------------------------------------------------------------------------- * These aren't too critical. * (they're not supposed to happen, and won't happen in 32-bit data mode). */vector_addrexcptn:	b	vector_addrexcptn/* * We group all the following data together to optimise * for CPUs with separate I & D caches. */	.align	5.LCvswi:	.word	vector_swi.LCsirq:	.word	__temp_irq.LCsund:	.word	__temp_und.LCsabt:	.word	__temp_abt__stubs_end:	.equ	__real_stubs_start, .LCvectors + 0x200.LCvectors:	swi	SYS_ERROR0	b	__real_stubs_start + (vector_und - __stubs_start)	ldr	pc, __real_stubs_start + (.LCvswi - __stubs_start)	b	__real_stubs_start + (vector_pabt - __stubs_start)	b	__real_stubs_start + (vector_dabt - __stubs_start)	b	__real_stubs_start + (vector_addrexcptn - __stubs_start)	b	__real_stubs_start + (vector_irq - __stubs_start)	b	__real_stubs_start + (vector_fiq - __stubs_start)ENTRY(__trap_init)	stmfd	sp!, {r4 - r6, lr}	mov	r0, #0xff000000	orr	r0, r0, #0x00ff0000		@ high vectors position	adr	r1, .LCvectors			@ set up the vectors	ldmia	r1, {r1, r2, r3, r4, r5, r6, ip, lr}	stmia	r0, {r1, r2, r3, r4, r5, r6, ip, lr}	add	r2, r0, #0x200	adr	r0, __stubs_start		@ copy stubs to 0x200	adr	r1, __stubs_end1:	ldr	r3, [r0], #4	str	r3, [r2], #4	cmp	r0, r1	blt	1b	LOADREGS(fd, sp!, {r4 - r6, pc})	.data/* * Do not reorder these, and do not insert extra data between... */__temp_irq:	.word	0				@ saved lr_irq	.word	0				@ saved spsr_irq	.word	-1				@ old_r0__temp_und:	.word	0				@ Saved lr_und	.word	0				@ Saved spsr_und	.word	-1				@ old_r0__temp_abt:	.word	0				@ Saved lr_abt	.word	0				@ Saved spsr_abt	.word	-1				@ old_r0	.globl	cr_alignment	.globl	cr_no_alignmentcr_alignment:	.space	4cr_no_alignment:	.space	4

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?