⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry-armv.s

📁 linux 内核源代码
💻 S
📖 第 1 页 / 共 2 页
字号:
 */	.dataENTRY(fp_enter)	.word	no_fp	.textno_fp:	mov	pc, lr__und_usr_unknown:	mov	r0, sp	adr	lr, ret_from_exception	b	do_undefinstr	.align	5__pabt_usr:	usr_entry	enable_irq				@ Enable interrupts	mov	r0, r2				@ address (pc)	mov	r1, sp				@ regs	bl	do_PrefetchAbort		@ call abort handler	/* fall through *//* * This is the return code to user mode for abort handlers */ENTRY(ret_from_exception)	get_thread_info tsk	mov	why, #0	b	ret_to_user/* * Register switch for ARMv3 and ARMv4 processors * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info * previous and next are guaranteed not to be the same. */ENTRY(__switch_to)	add	ip, r1, #TI_CPU_SAVE	ldr	r3, [r2, #TI_TP_VALUE]	stmia	ip!, {r4 - sl, fp, sp, lr}	@ Store most regs on stack#ifdef CONFIG_MMU	ldr	r6, [r2, #TI_CPU_DOMAIN]#endif#if __LINUX_ARM_ARCH__ >= 6#ifdef CONFIG_CPU_32v6K	clrex#else	strex	r5, r4, [ip]			@ Clear exclusive monitor#endif#endif#if defined(CONFIG_HAS_TLS_REG)	mcr	p15, 0, r3, c13, c0, 3		@ set TLS register#elif !defined(CONFIG_TLS_REG_EMUL)	mov	r4, #0xffff0fff	str	r3, [r4, #-15]			@ TLS val at 0xffff0ff0#endif#ifdef CONFIG_MMU	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register#endif	mov	r5, r0	add	r4, r2, #TI_CPU_SAVE	ldr	r0, =thread_notify_head	mov	r1, #THREAD_NOTIFY_SWITCH	bl	atomic_notifier_call_chain	mov	r0, r5	ldmia	r4, {r4 - sl, fp, sp, pc}	@ Load all regs saved previously	__INIT/* * User helpers. * * These are segment of kernel provided user code reachable from user space * at a fixed address in kernel memory.  This is used to provide user space * with some operations which require kernel help because of unimplemented * native feature and/or instructions in many ARM CPUs. The idea is for * this code to be executed directly in user mode for best efficiency but * which is too intimate with the kernel counter part to be left to user * libraries.  In fact this code might even differ from one CPU to another * depending on the available  instruction set and restrictions like on * SMP systems.  In other words, the kernel reserves the right to change * this code as needed without warning. Only the entry points and their * results are guaranteed to be stable. * * Each segment is 32-byte aligned and will be moved to the top of the high * vector page.  New segments (if ever needed) must be added in front of * existing ones.  This mechanism should be used only for things that are * really small and justified, and not be abused freely. * * User space is expected to implement those things inline when optimizing * for a processor that has the necessary native support, but only if such * resulting binaries are already to be incompatible with earlier ARM * processors due to the use of unsupported instructions other than what * is provided here.  In other words don't make binaries unable to run on * earlier processors just for the sake of not using these kernel helpers * if your compiled code is not going to use the new instructions for other * purpose. */	.macro	usr_ret, reg#ifdef CONFIG_ARM_THUMB	bx	\reg#else	mov	pc, \reg#endif	.endm	.align	5	.globl	__kuser_helper_start__kuser_helper_start:/* * Reference prototype: * *	void __kernel_memory_barrier(void) * * Input: * *	lr = return address * * Output: * *	none * * Clobbered: * *	none * * Definition and user space usage example: * *	typedef void (__kernel_dmb_t)(void); *	#define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) * * Apply any needed memory barrier to preserve consistency with data modified * manually and __kuser_cmpxchg usage. * * This could be used as follows: * * #define __kernel_dmb() \ *         asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ *	        : : : "r0", "lr","cc" ) */__kuser_memory_barrier:				@ 0xffff0fa0#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)	mcr	p15, 0, r0, c7, c10, 5	@ dmb#endif	usr_ret	lr	.align	5/* * Reference prototype: * *	int __kernel_cmpxchg(int oldval, int newval, int *ptr) * * Input: * *	r0 = oldval *	r1 = newval *	r2 = ptr *	lr = return address * * Output: * *	r0 = returned value (zero or non-zero) *	C flag = set if r0 == 0, clear if r0 != 0 * * Clobbered: * *	r3, ip, flags * * Definition and user space usage example: * *	typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); *	#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) * * Atomically store newval in *ptr if *ptr is equal to oldval for user space. * Return zero if *ptr was changed or non-zero if no exchange happened. * The C flag is also set if *ptr was changed to allow for assembly * optimization in the calling code. * * Notes: * *    - This routine already includes memory barriers as needed. * * For example, a user space atomic_add implementation could look like this: * * #define atomic_add(ptr, val) \ *	({ register unsigned int *__ptr asm("r2") = (ptr); \ *	   register unsigned int __result asm("r1"); \ *	   asm volatile ( \ *	       "1: @ atomic_add\n\t" \ *	       "ldr	r0, [r2]\n\t" \ *	       "mov	r3, #0xffff0fff\n\t" \ *	       "add	lr, pc, #4\n\t" \ *	       "add	r1, r0, %2\n\t" \ *	       "add	pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ *	       "bcc	1b" \ *	       : "=&r" (__result) \ *	       : "r" (__ptr), "rIL" (val) \ *	       : "r0","r3","ip","lr","cc","memory" ); \ *	   __result; }) */__kuser_cmpxchg:				@ 0xffff0fc0#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)	/*	 * Poor you.  No fast solution possible...	 * The kernel itself must perform the operation.	 * A special ghost syscall is used for that (see traps.c).	 */	stmfd	sp!, {r7, lr}	mov	r7, #0xff00		@ 0xfff0 into r7 for EABI	orr	r7, r7, #0xf0	swi	#0x9ffff0	ldmfd	sp!, {r7, pc}#elif __LINUX_ARM_ARCH__ < 6#ifdef CONFIG_MMU	/*	 * The only thing that can break atomicity in this cmpxchg	 * implementation is either an IRQ or a data abort exception	 * causing another process/thread to be scheduled in the middle	 * of the critical sequence.  To prevent this, code is added to	 * the IRQ and data abort exception handlers to set the pc back	 * to the beginning of the critical section if it is found to be	 * within that critical section (see kuser_cmpxchg_fixup).	 */1:	ldr	r3, [r2]			@ load current val	subs	r3, r3, r0			@ compare with oldval2:	streq	r1, [r2]			@ store newval if eq	rsbs	r0, r3, #0			@ set return val and C flag	usr_ret	lr	.textkuser_cmpxchg_fixup:	@ Called from kuser_cmpxchg_check macro.	@ r2 = address of interrupted insn (must be preserved).	@ sp = saved regs. r7 and r8 are clobbered.	@ 1b = first critical insn, 2b = last critical insn.	@ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.	mov	r7, #0xffff0fff	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))	subs	r8, r2, r7	rsbcss	r8, r8, #(2b - 1b)	strcs	r7, [sp, #S_PC]	mov	pc, lr	.previous#else#warning "NPTL on non MMU needs fixing"	mov	r0, #-1	adds	r0, r0, #0	usr_ret	lr#endif#else#ifdef CONFIG_SMP	mcr	p15, 0, r0, c7, c10, 5	@ dmb#endif1:	ldrex	r3, [r2]	subs	r3, r3, r0	strexeq	r3, r1, [r2]	teqeq	r3, #1	beq	1b	rsbs	r0, r3, #0	/* beware -- each __kuser slot must be 8 instructions max */#ifdef CONFIG_SMP	b	__kuser_memory_barrier#else	usr_ret	lr#endif#endif	.align	5/* * Reference prototype: * *	int __kernel_get_tls(void) * * Input: * *	lr = return address * * Output: * *	r0 = TLS value * * Clobbered: * *	none * * Definition and user space usage example: * *	typedef int (__kernel_get_tls_t)(void); *	#define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) * * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. * * This could be used as follows: * * #define __kernel_get_tls() \ *	({ register unsigned int __val asm("r0"); \ *         asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ *	        : "=r" (__val) : : "lr","cc" ); \ *	   __val; }) */__kuser_get_tls:				@ 0xffff0fe0#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)	ldr	r0, [pc, #(16 - 8)]		@ TLS stored at 0xffff0ff0#else	mrc	p15, 0, r0, c13, c0, 3		@ read TLS register#endif	usr_ret	lr	.rep	5	.word	0			@ pad up to __kuser_helper_version	.endr/* * Reference declaration: * *	extern unsigned int __kernel_helper_version; * * Definition and user space usage example: * *	#define __kernel_helper_version (*(unsigned int *)0xffff0ffc) * * User space may read this to determine the curent number of helpers * available. */__kuser_helper_version:				@ 0xffff0ffc	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)	.globl	__kuser_helper_end__kuser_helper_end:/* * Vector stubs. * * This code is copied to 0xffff0200 so we can use branches in the * vectors, rather than ldr's.  Note that this code must not * exceed 0x300 bytes. * * Common stub entry macro: *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC * * SP points to a minimal amount of processor-private memory, the address * of which is copied into r0 for the mode specific abort handler. */	.macro	vector_stub, name, mode, correction=0	.align	5vector_\name:	.if \correction	sub	lr, lr, #\correction	.endif	@	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>	@ (parent CPSR)	@	stmia	sp, {r0, lr}		@ save r0, lr	mrs	lr, spsr	str	lr, [sp, #8]		@ save spsr	@	@ Prepare for SVC32 mode.  IRQs remain disabled.	@	mrs	r0, cpsr	eor	r0, r0, #(\mode ^ SVC_MODE)	msr	spsr_cxsf, r0	@	@ the branch table must immediately follow this code	@	and	lr, lr, #0x0f	mov	r0, sp	ldr	lr, [pc, lr, lsl #2]	movs	pc, lr			@ branch to handler in SVC mode	.endm	.globl	__stubs_start__stubs_start:/* * Interrupt dispatcher */	vector_stub	irq, IRQ_MODE, 4	.long	__irq_usr			@  0  (USR_26 / USR_32)	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)	.long	__irq_svc			@  3  (SVC_26 / SVC_32)	.long	__irq_invalid			@  4	.long	__irq_invalid			@  5	.long	__irq_invalid			@  6	.long	__irq_invalid			@  7	.long	__irq_invalid			@  8	.long	__irq_invalid			@  9	.long	__irq_invalid			@  a	.long	__irq_invalid			@  b	.long	__irq_invalid			@  c	.long	__irq_invalid			@  d	.long	__irq_invalid			@  e	.long	__irq_invalid			@  f/* * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */	vector_stub	dabt, ABT_MODE, 8	.long	__dabt_usr			@  0  (USR_26 / USR_32)	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)	.long	__dabt_invalid			@  4	.long	__dabt_invalid			@  5	.long	__dabt_invalid			@  6	.long	__dabt_invalid			@  7	.long	__dabt_invalid			@  8	.long	__dabt_invalid			@  9	.long	__dabt_invalid			@  a	.long	__dabt_invalid			@  b	.long	__dabt_invalid			@  c	.long	__dabt_invalid			@  d	.long	__dabt_invalid			@  e	.long	__dabt_invalid			@  f/* * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */	vector_stub	pabt, ABT_MODE, 4	.long	__pabt_usr			@  0 (USR_26 / USR_32)	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)	.long	__pabt_invalid			@  4	.long	__pabt_invalid			@  5	.long	__pabt_invalid			@  6	.long	__pabt_invalid			@  7	.long	__pabt_invalid			@  8	.long	__pabt_invalid			@  9	.long	__pabt_invalid			@  a	.long	__pabt_invalid			@  b	.long	__pabt_invalid			@  c	.long	__pabt_invalid			@  d	.long	__pabt_invalid			@  e	.long	__pabt_invalid			@  f/* * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */	vector_stub	und, UND_MODE	.long	__und_usr			@  0 (USR_26 / USR_32)	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)	.long	__und_svc			@  3 (SVC_26 / SVC_32)	.long	__und_invalid			@  4	.long	__und_invalid			@  5	.long	__und_invalid			@  6	.long	__und_invalid			@  7	.long	__und_invalid			@  8	.long	__und_invalid			@  9	.long	__und_invalid			@  a	.long	__und_invalid			@  b	.long	__und_invalid			@  c	.long	__und_invalid			@  d	.long	__und_invalid			@  e	.long	__und_invalid			@  f	.align	5/*============================================================================= * Undefined FIQs *----------------------------------------------------------------------------- * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. * Basically to switch modes, we *HAVE* to clobber one register...  brain * damage alert!  I don't think that we can execute any code in here in any * other mode than FIQ...  Ok you can switch to another mode, but you can't * get out of that mode without clobbering one register. */vector_fiq:	disable_fiq	subs	pc, lr, #4/*============================================================================= * Address exception handler *----------------------------------------------------------------------------- * These aren't too critical. * (they're not supposed to happen, and won't happen in 32-bit data mode). */vector_addrexcptn:	b	vector_addrexcptn/* * We group all the following data together to optimise * for CPUs with separate I & D caches. */	.align	5.LCvswi:	.word	vector_swi	.globl	__stubs_end__stubs_end:	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start	.globl	__vectors_start__vectors_start:	swi	SYS_ERROR0	b	vector_und + stubs_offset	ldr	pc, .LCvswi + stubs_offset	b	vector_pabt + stubs_offset	b	vector_dabt + stubs_offset	b	vector_addrexcptn + stubs_offset	b	vector_irq + stubs_offset	b	vector_fiq + stubs_offset	.globl	__vectors_end__vectors_end:	.data	.globl	cr_alignment	.globl	cr_no_alignmentcr_alignment:	.space	4cr_no_alignment:	.space	4

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -