⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry-armv.s

📁 linux-2.6.15.6
💻 S
📖 第 1 页 / 共 2 页
字号:
	.align	5__pabt_usr:	usr_entry	enable_irq				@ Enable interrupts	mov	r0, r2				@ address (pc)	mov	r1, sp				@ regs	bl	do_PrefetchAbort		@ call abort handler	/* fall through *//* * This is the return code to user mode for abort handlers */ENTRY(ret_from_exception)	get_thread_info tsk	mov	why, #0	b	ret_to_user/* * Register switch for ARMv3 and ARMv4 processors * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info * previous and next are guaranteed not to be the same. */ENTRY(__switch_to)	add	ip, r1, #TI_CPU_SAVE	ldr	r3, [r2, #TI_TP_VALUE]	stmia	ip!, {r4 - sl, fp, sp, lr}	@ Store most regs on stack	ldr	r6, [r2, #TI_CPU_DOMAIN]!#if __LINUX_ARM_ARCH__ >= 6#ifdef CONFIG_CPU_MPCORE	clrex#else	strex	r5, r4, [ip]			@ Clear exclusive monitor#endif#endif#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)	mra	r4, r5, acc0	stmia   ip, {r4, r5}#endif#if defined(CONFIG_HAS_TLS_REG)	mcr	p15, 0, r3, c13, c0, 3		@ set TLS register#elif !defined(CONFIG_TLS_REG_EMUL)	mov	r4, #0xffff0fff	str	r3, [r4, #-15]			@ TLS val at 0xffff0ff0#endif	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register#ifdef CONFIG_VFP	@ Always disable VFP so we can lazily save/restore the old	@ state. This occurs in the context of the previous thread.	VFPFMRX	r4, FPEXC	bic	r4, r4, #FPEXC_ENABLE	VFPFMXR	FPEXC, r4#endif#if defined(CONFIG_IWMMXT)	bl	iwmmxt_task_switch#elif defined(CONFIG_CPU_XSCALE)	add	r4, r2, #40			@ cpu_context_save->extra	ldmib	r4, {r4, r5}	mar	acc0, r4, r5#endif	ldmib	r2, {r4 - sl, fp, sp, pc}	@ Load all regs saved previously	__INIT/* * User helpers. * * These are segment of kernel provided user code reachable from user space * at a fixed address in kernel memory.  This is used to provide user space * with some operations which require kernel help because of unimplemented * native feature and/or instructions in many ARM CPUs. The idea is for * this code to be executed directly in user mode for best efficiency but * which is too intimate with the kernel counter part to be left to user * libraries.  In fact this code might even differ from one CPU to another * depending on the available  instruction set and restrictions like on * SMP systems.  In other words, the kernel reserves the right to change * this code as needed without warning. Only the entry points and their * results are guaranteed to be stable. * * Each segment is 32-byte aligned and will be moved to the top of the high * vector page.  New segments (if ever needed) must be added in front of * existing ones.  This mechanism should be used only for things that are * really small and justified, and not be abused freely. * * User space is expected to implement those things inline when optimizing * for a processor that has the necessary native support, but only if such * resulting binaries are already to be incompatible with earlier ARM * processors due to the use of unsupported instructions other than what * is provided here.  In other words don't make binaries unable to run on * earlier processors just for the sake of not using these kernel helpers * if your compiled code is not going to use the new instructions for other * purpose. */	.align	5	.globl	__kuser_helper_start__kuser_helper_start:/* * Reference prototype: * *	void __kernel_memory_barrier(void) * * Input: * *	lr = return address * * Output: * *	none * * Clobbered: * *	the Z flag might be lost * * Definition and user space usage example: * *	typedef void (__kernel_dmb_t)(void); *	#define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) * * Apply any needed memory barrier to preserve consistency with data modified * manually and __kuser_cmpxchg usage. * * This could be used as follows: * * #define __kernel_dmb() \ *         asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ *	        : : : "lr","cc" ) */__kuser_memory_barrier:				@ 0xffff0fa0#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)	mcr	p15, 0, r0, c7, c10, 5	@ dmb#endif	mov	pc, lr	.align	5/* * Reference prototype: * *	int __kernel_cmpxchg(int oldval, int newval, int *ptr) * * Input: * *	r0 = oldval *	r1 = newval *	r2 = ptr *	lr = return address * * Output: * *	r0 = returned value (zero or non-zero) *	C flag = set if r0 == 0, clear if r0 != 0 * * Clobbered: * *	r3, ip, flags * * Definition and user space usage example: * *	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); *	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) * * Atomically store newval in *ptr if *ptr is equal to oldval for user space. * Return zero if *ptr was changed or non-zero if no exchange happened. * The C flag is also set if *ptr was changed to allow for assembly * optimization in the calling code. * * Note: this routine already includes memory barriers as needed. * * For example, a user space atomic_add implementation could look like this: * * #define atomic_add(ptr, val) \ *	({ register unsigned int *__ptr asm("r2") = (ptr); \ *	   register unsigned int __result asm("r1"); \ *	   asm volatile ( \ *	       "1: @ atomic_add\n\t" \ *	       "ldr	r0, [r2]\n\t" \ *	       "mov	r3, #0xffff0fff\n\t" \ *	       "add	lr, pc, #4\n\t" \ *	       "add	r1, r0, %2\n\t" \ *	       "add	pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ *	       "bcc	1b" \ *	       : "=&r" (__result) \ *	       : "r" (__ptr), "rIL" (val) \ *	       : "r0","r3","ip","lr","cc","memory" ); \ *	   __result; }) */__kuser_cmpxchg:				@ 0xffff0fc0#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)	/*	 * Poor you.  No fast solution possible...	 * The kernel itself must perform the operation.	 * A special ghost syscall is used for that (see traps.c).	 */	swi	#0x9ffff0	mov	pc, lr#elif __LINUX_ARM_ARCH__ < 6	/*	 * Theory of operation:	 *	 * We set the Z flag before loading oldval. If ever an exception	 * occurs we can not be sure the loaded value will still be the same	 * when the exception returns, therefore the user exception handler	 * will clear the Z flag whenever the interrupted user code was	 * actually from the kernel address space (see the usr_entry macro).	 *	 * The post-increment on the str is used to prevent a race with an	 * exception happening just after the str instruction which would	 * clear the Z flag although the exchange was done.	 */	teq	ip, ip			@ set Z flag	ldr	ip, [r2]		@ load current val	add	r3, r2, #1		@ prepare store ptr	teqeq	ip, r0			@ compare with oldval if still allowed	streq	r1, [r3, #-1]!		@ store newval if still allowed	subs	r0, r2, r3		@ if r2 == r3 the str occured	mov	pc, lr#else#ifdef CONFIG_SMP	mcr	p15, 0, r0, c7, c10, 5	@ dmb#endif	ldrex	r3, [r2]	subs	r3, r3, r0	strexeq	r3, r1, [r2]	rsbs	r0, r3, #0#ifdef CONFIG_SMP	mcr	p15, 0, r0, c7, c10, 5	@ dmb#endif	mov	pc, lr#endif	.align	5/* * Reference prototype: * *	int __kernel_get_tls(void) * * Input: * *	lr = return address * * Output: * *	r0 = TLS value * * Clobbered: * *	the Z flag might be lost * * Definition and user space usage example: * *	typedef int (__kernel_get_tls_t)(void); *	#define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) * * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. * * This could be used as follows: * * #define __kernel_get_tls() \ *	({ register unsigned int __val asm("r0"); \ *         asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ *	        : "=r" (__val) : : "lr","cc" ); \ *	   __val; }) */__kuser_get_tls:				@ 0xffff0fe0#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)	ldr	r0, [pc, #(16 - 8)]		@ TLS stored at 0xffff0ff0	mov	pc, lr#else	mrc	p15, 0, r0, c13, c0, 3		@ read TLS register	mov	pc, lr#endif	.rep	5	.word	0			@ pad up to __kuser_helper_version	.endr/* * Reference declaration: * *	extern unsigned int __kernel_helper_version; * * Definition and user space usage example: * *	#define __kernel_helper_version (*(unsigned int *)0xffff0ffc) * * User space may read this to determine the curent number of helpers * available. */__kuser_helper_version:				@ 0xffff0ffc	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)	.globl	__kuser_helper_end__kuser_helper_end:/* * Vector stubs. * * This code is copied to 0xffff0200 so we can use branches in the * vectors, rather than ldr's.  Note that this code must not * exceed 0x300 bytes. * * Common stub entry macro: *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC * * SP points to a minimal amount of processor-private memory, the address * of which is copied into r0 for the mode specific abort handler. */	.macro	vector_stub, name, mode, correction=0	.align	5vector_\name:	.if \correction	sub	lr, lr, #\correction	.endif	@	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>	@ (parent CPSR)	@	stmia	sp, {r0, lr}		@ save r0, lr	mrs	lr, spsr	str	lr, [sp, #8]		@ save spsr	@	@ Prepare for SVC32 mode.  IRQs remain disabled.	@	mrs	r0, cpsr	eor	r0, r0, #(\mode ^ SVC_MODE)	msr	spsr_cxsf, r0	@	@ the branch table must immediately follow this code	@	and	lr, lr, #0x0f	mov	r0, sp	ldr	lr, [pc, lr, lsl #2]	movs	pc, lr			@ branch to handler in SVC mode	.endm	.globl	__stubs_start__stubs_start:/* * Interrupt dispatcher */	vector_stub	irq, IRQ_MODE, 4	.long	__irq_usr			@  0  (USR_26 / USR_32)	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)	.long	__irq_svc			@  3  (SVC_26 / SVC_32)	.long	__irq_invalid			@  4	.long	__irq_invalid			@  5	.long	__irq_invalid			@  6	.long	__irq_invalid			@  7	.long	__irq_invalid			@  8	.long	__irq_invalid			@  9	.long	__irq_invalid			@  a	.long	__irq_invalid			@  b	.long	__irq_invalid			@  c	.long	__irq_invalid			@  d	.long	__irq_invalid			@  e	.long	__irq_invalid			@  f/* * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */	vector_stub	dabt, ABT_MODE, 8	.long	__dabt_usr			@  0  (USR_26 / USR_32)	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)	.long	__dabt_invalid			@  4	.long	__dabt_invalid			@  5	.long	__dabt_invalid			@  6	.long	__dabt_invalid			@  7	.long	__dabt_invalid			@  8	.long	__dabt_invalid			@  9	.long	__dabt_invalid			@  a	.long	__dabt_invalid			@  b	.long	__dabt_invalid			@  c	.long	__dabt_invalid			@  d	.long	__dabt_invalid			@  e	.long	__dabt_invalid			@  f/* * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */	vector_stub	pabt, ABT_MODE, 4	.long	__pabt_usr			@  0 (USR_26 / USR_32)	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)	.long	__pabt_invalid			@  4	.long	__pabt_invalid			@  5	.long	__pabt_invalid			@  6	.long	__pabt_invalid			@  7	.long	__pabt_invalid			@  8	.long	__pabt_invalid			@  9	.long	__pabt_invalid			@  a	.long	__pabt_invalid			@  b	.long	__pabt_invalid			@  c	.long	__pabt_invalid			@  d	.long	__pabt_invalid			@  e	.long	__pabt_invalid			@  f/* * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */	vector_stub	und, UND_MODE	.long	__und_usr			@  0 (USR_26 / USR_32)	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)	.long	__und_svc			@  3 (SVC_26 / SVC_32)	.long	__und_invalid			@  4	.long	__und_invalid			@  5	.long	__und_invalid			@  6	.long	__und_invalid			@  7	.long	__und_invalid			@  8	.long	__und_invalid			@  9	.long	__und_invalid			@  a	.long	__und_invalid			@  b	.long	__und_invalid			@  c	.long	__und_invalid			@  d	.long	__und_invalid			@  e	.long	__und_invalid			@  f	.align	5/*============================================================================= * Undefined FIQs *----------------------------------------------------------------------------- * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. * Basically to switch modes, we *HAVE* to clobber one register...  brain * damage alert!  I don't think that we can execute any code in here in any * other mode than FIQ...  Ok you can switch to another mode, but you can't * get out of that mode without clobbering one register. */vector_fiq:	disable_fiq	subs	pc, lr, #4/*============================================================================= * Address exception handler *----------------------------------------------------------------------------- * These aren't too critical. * (they're not supposed to happen, and won't happen in 32-bit data mode). */vector_addrexcptn:	b	vector_addrexcptn/* * We group all the following data together to optimise * for CPUs with separate I & D caches. */	.align	5.LCvswi:	.word	vector_swi	.globl	__stubs_end__stubs_end:	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start	.globl	__vectors_start__vectors_start:	swi	SYS_ERROR0	b	vector_und + stubs_offset	ldr	pc, .LCvswi + stubs_offset	b	vector_pabt + stubs_offset	b	vector_dabt + stubs_offset	b	vector_addrexcptn + stubs_offset	b	vector_irq + stubs_offset	b	vector_fiq + stubs_offset	.globl	__vectors_end__vectors_end:	.data	.globl	cr_alignment	.globl	cr_no_alignmentcr_alignment:	.space	4cr_no_alignment:	.space	4

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -