entry.s

来自「linux-2.4.29操作系统的源码」· S 代码 · 共 2,106 行 · 第 1/4 页

S
2,106
字号
	ptabs	r18, tr0	blink	tr0, r63	.global poke_real_address_qpoke_real_address_q:	/* Two args:	   r2 : real mode address to poke	   r3 : quadword value to write.	   This is provided as a cheapskate way of manipulating device	   registers for debugging (to avoid the need to onchip_remap the debug	   module, and to avoid the need to onchip_remap the watchpoint	   controller in a way that identity maps sufficient bits to avoid the	   SH5-101 cut2 silicon defect).	   This code is not performance critical	*/	add.l	r2, r63, r2	/* sign extend address */	getcon	sr, r0		/* r0 = saved original SR */	movi	1, r1	shlli	r1, 28, r1	or	r0, r1, r1	/* r0 with block bit set */	putcon	r1, sr		/* now in critical section */	movi	1, r36	shlli	r36, 31, r36	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */	putcon	r1, ssr	_loada	.poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */	_loada	1f, r37		/* virtual mode return addr */	putcon	r36, spc	synco	rte	nop.poke0:	/* come here in real mode, don't touch caches!!           still in critical section (sr.bl==1) */	putcon	r0, ssr	putcon	r37, spc	/* Here's the actual poke.  If the address is bad, all bets are now off	 * what will happen (handlers invoked in real-mode = bad news) */	st.q	r2, 0, r3	synco	rte	/* Back to virtual mode */	nop1:	ptabs	r18, tr0	blink	tr0, r63/* * --- User Access Handling Section *//* * User Access support. It all moved to non inlined Assembler * functions in here. * * __kernel_size_t __copy_user(void *__to, const void *__from, *			       __kernel_size_t __n) * * Inputs: * (r2)  target address * (r3)  source address * (r4)  size in bytes * * Ouputs: * (*r2) target data * (r2)  non-copied bytes * * If a fault occurs on the user pointer, bail out early and return the * number of bytes not copied in r2. * Strategy : for large blocks, call a real memcpy function which can * move >1 byte at a time using unaligned ld/st instructions, and can * manipulate the cache using prefetch + alloco to improve the speed * further.  If a fault occurs in that function, just revert to the * byte-by-byte approach used for small blocks; this is rare so the * performance hit for that case does not matter. * * For small blocks it's not worth the overhead of setting up and calling * the memcpy routine; do the copy a byte at a time. * */	.global	__copy_user__copy_user:	_ptar	__copy_user_byte_by_byte, t1	movi	16, r0 ! this value is a best guess, should tune it by benchmarking	bge/u	r0, r4, t1	_ptar copy_user_memcpy, t0	addi	r15, -32, r15	/* Save arguments in case we have to fix-up unhandled page fault */	st.q	r15, 0, r2	st.q	r15, 8, r3	st.q	r15, 16, r4	st.q	r15, 24, r35 ! r35 is callee-save	/* Save LINK in a register to reduce RTS time later (otherwise	   ld r15,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */	ori	LINK, 0, r35	blink	t0, LINK	/* Copy completed normally if we get back here */	ptabs	r35, tr0	ld.q	r15, 24, r35	/* don't restore r2-r4, pointless */	/* set result=r2 to zero as the copy must have succeeded. */	or	r63, r63, r2	addi	r15, 32, r15	blink	tr0, r63 ! RTS	.global __copy_user_fixup__copy_user_fixup:	/* Restore stack frame */	ori	r35, 0, LINK	ld.q	r15, 24, r35	ld.q	r15, 16, r4	ld.q	r15,  8, r3	ld.q	r15,  0, r2	addi	r15, 32, r15	/* Fall through to original code, in the 'same' state we entered with *//* The slow byte-by-byte method is used if the fast copy traps due to a bad   user address.  In that rare case, the speed drop can be tolerated. */__copy_user_byte_by_byte:	_ptar	___copy_user_exit, t1	_ptar	___copy_user1, t0	beq/u	r4, r63, t1	/* early exit for zero length copy */	sub	r2, r3, r0	addi	r0, -1, r0___copy_user1:	ld.b	r3, 0, r5		/* Fault address 1 */	/* Could rewrite this to use just 1 add, but the second comes 'free'	   due to load latency */	addi	r3, 1, r3	addi	r4, -1, r4		/* No real fixup required */___copy_user2:	stx.b	r3, r0, r5		/* Fault address 2 */	bne     r4, ZERO, t0___copy_user_exit:	or	r4, ZERO, r2	ptabs	LINK, t0	blink	t0, ZERO/* * __kernel_size_t __clear_user(void *addr, __kernel_size_t size) * * Inputs: * (r2)  target address * (r3)  size in bytes * * Ouputs: * (*r2) zero-ed target data * (r2)  non-zero-ed bytes */	.global	__clear_user__clear_user:	_ptar	___clear_user_exit, t1	_ptar	___clear_user1, t0	beq/u	r3, r63, t1___clear_user1:	st.b	r2, 0, ZERO		/* Fault address */	addi	r2, 1, r2	addi	r3, -1, r3		/* No real fixup required */	bne     r3, ZERO, t0___clear_user_exit:	or	r3, ZERO, r2	ptabs	LINK, t0	blink	t0, ZERO/* * int __strncpy_from_user(unsigned long __dest, unsigned long __src, *			   int __count) * * Inputs: * (r2)  target address * (r3)  source address * (r4)  maximum size in bytes * * Ouputs: * (*r2) copied data * (r2)  -EFAULT (in case of faulting) *       copied data (otherwise) */	.global	__strncpy_from_user__strncpy_from_user:	_ptar	___strncpy_from_user1, t0	_ptar	___strncpy_from_user_done, t1	or	r4, ZERO, r5		/* r5 = original count */	beq/u	r4, r63, t1		/* early exit if r4==0 */	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */___strncpy_from_user1:	ld.b	r3, 0, r7		/* Fault address: only in reading */	st.b	r2, 0, r7	addi	r2, 1, r2	addi	r3, 1, r3	beq/u	ZERO, r7, t1	addi	r4, -1, r4		/* return real number of copied bytes */	bne/l	ZERO, r4, t0___strncpy_from_user_done:	sub	r5, r4, r6		/* If done, return copied */	___strncpy_from_user_exit:	or	r6, ZERO, r2	ptabs	LINK, t0	blink	t0, ZERO/* * extern long __strnlen_user(const char *__s, long __n) * * Inputs: * (r2)  source address * (r3)  source size in bytes * * Ouputs: * (r2)  -EFAULT (in case of faulting) *       string length (otherwise) */	.global	__strnlen_user__strnlen_user:	_ptar	___strnlen_user_set_reply, t0	_ptar	___strnlen_user1, t1	or	ZERO, ZERO, r5		/* r5 = counter */	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */	beq	r3, ZERO, t0___strnlen_user1:	ldx.b	r2, r5, r7		/* Fault address: only in reading */	addi	r3, -1, r3		/* No real fixup */	addi	r5, 1, r5	beq	r3, ZERO, t0	bne	r7, ZERO, t1! The line below used to be active.  This meant led to a junk byte lying between each pair! of entries in the argv & envp structures in memory.  Whilst the program saw the right data! via the argv and envp arguments to main, it meant the 'flat' representation visible through! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.!	addi	r5, 1, r5		/* Include '\0' */___strnlen_user_set_reply:	or	r5, ZERO, r6		/* If done, return counter */	___strnlen_user_exit:	or	r6, ZERO, r2	ptabs	LINK, t0	blink	t0, ZERO/* * extern long __get_user_asm_?(void *val, long addr) * * Inputs: * (r2)  dest address * (r3)  source address (in User Space) * * Ouputs: * (r2)  -EFAULT (faulting) *       0 	 (not faulting) */	.global	__get_user_asm_b__get_user_asm_b:	or	r2, ZERO, r4	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */	___get_user_asm_b1:	ld.b	r3, 0, r5		/* r5 = data */	st.b	r4, 0, r5	or	ZERO, ZERO, r2	___get_user_asm_b_exit:	ptabs	LINK, t0	blink	t0, ZERO	.global	__get_user_asm_w__get_user_asm_w:	or	r2, ZERO, r4	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */	___get_user_asm_w1:	ld.w	r3, 0, r5		/* r5 = data */	st.w	r4, 0, r5	or	ZERO, ZERO, r2	___get_user_asm_w_exit:	ptabs	LINK, t0	blink	t0, ZERO	.global	__get_user_asm_l__get_user_asm_l:	or	r2, ZERO, r4	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */	___get_user_asm_l1:	ld.l	r3, 0, r5		/* r5 = data */	st.l	r4, 0, r5	or	ZERO, ZERO, r2	___get_user_asm_l_exit:	ptabs	LINK, t0	blink	t0, ZERO	.global	__get_user_asm_q__get_user_asm_q:	or	r2, ZERO, r4	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */	___get_user_asm_q1:	ld.q	r3, 0, r5		/* r5 = data */	st.q	r4, 0, r5	or	ZERO, ZERO, r2	___get_user_asm_q_exit:	ptabs	LINK, t0	blink	t0, ZERO/* * extern long __put_user_asm_?(void *pval, long addr) * * Inputs: * (r2)  kernel pointer to value * (r3)  dest address (in User Space) * * Ouputs: * (r2)  -EFAULT (faulting) *       0 	 (not faulting) */	.global	__put_user_asm_b__put_user_asm_b:	ld.b	r2, 0, r4		/* r4 = data */	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */	___put_user_asm_b1:	st.b	r3, 0, r4	or	ZERO, ZERO, r2	___put_user_asm_b_exit:	ptabs	LINK, t0	blink	t0, ZERO	.global	__put_user_asm_w__put_user_asm_w:	ld.w	r2, 0, r4		/* r4 = data */	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */	___put_user_asm_w1:	st.w	r3, 0, r4	or	ZERO, ZERO, r2	___put_user_asm_w_exit:	ptabs	LINK, t0	blink	t0, ZERO	.global	__put_user_asm_l__put_user_asm_l:	ld.l	r2, 0, r4		/* r4 = data */	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */	___put_user_asm_l1:	st.l	r3, 0, r4	or	ZERO, ZERO, r2	___put_user_asm_l_exit:	ptabs	LINK, t0	blink	t0, ZERO	.global	__put_user_asm_q__put_user_asm_q:	ld.q	r2, 0, r4		/* r4 = data */	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */	___put_user_asm_q1:	st.q	r3, 0, r4	or	ZERO, ZERO, r2	___put_user_asm_q_exit:	ptabs	LINK, t0	blink	t0, ZERO/* * --- Signal Handling Section *//* * extern long long _sa_default_rt_restorer * extern long long _sa_default_restorer * *		 or, better, * * extern void _sa_default_rt_restorer(void) * extern void _sa_default_restorer(void) * * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn() * from user space. Copied into user space by signal management. * Both must be quad aligned and 2 quad long (4 instructions). * */	.balign 8	.global sa_default_rt_restorersa_default_rt_restorer:	movi	0x10, r9	shori	__NR_rt_sigreturn, r9	trapa	r9	nop		.balign 8	.global sa_default_restorersa_default_restorer:	movi	0x10, r9	shori	__NR_sigreturn, r9	trapa	r9	nop	/* * --- __ex_table Section *//* * User Access Exception Table. */	.section	__ex_table,  "a"	.global asm_uaccess_start	/* Just a marker */asm_uaccess_start:	.long	___copy_user1, ___copy_user_exit	.long	___copy_user2, ___copy_user_exit	.long	___clear_user1, ___clear_user_exit	.long	___strncpy_from_user1, ___strncpy_from_user_exit	.long	___strnlen_user1, ___strnlen_user_exit	.long	___get_user_asm_b1, ___get_user_asm_b_exit	.long	___get_user_asm_w1, ___get_user_asm_w_exit	.long	___get_user_asm_l1, ___get_user_asm_l_exit	.long	___get_user_asm_q1, ___get_user_asm_q_exit	.long	___put_user_asm_b1, ___put_user_asm_b_exit	.long	___put_user_asm_w1, ___put_user_asm_w_exit	.long	___put_user_asm_l1, ___put_user_asm_l_exit	.long	___put_user_asm_q1, ___put_user_asm_q_exit	.global asm_uaccess_end		/* Just a marker */asm_uaccess_end:	/* * --- .text.init Section */	.section	.text.init, "ax"/* * void trap_init (void) * */	.global	trap_inittrap_init:	addi	SP, -24, SP			/* Room to save r28/r29/r30 */	st.q	SP, 0, r28	st.q	SP, 8, r29	st.q	SP, 16, r30	/* Set VBR and RESVEC */	_loada	LVBR_block, r19	andi	r19, -4, r19			/* reset MMUOFF + reserved */	/* For RESVEC exceptions we force the MMU off, which means we need the	   physical address. */	_loada	LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20	andi	r20, -4, r20			/* reset reserved */	ori	r20, 1, r20			/* set MMUOFF */	putcon	r19, VBR	putcon	r20, RESVEC	/* Sanity check */	_loada	LVBR_block_end, r21	andi	r21, -4, r21	movi	BLOCK_SIZE, r29			/* r29 = expected size */	or	r19, ZERO, r30	add	r19, r29, r19	/*	 * Ugly, but better loop forever now than crash afterwards.	 * We should print a message, but if we touch LVBR or	 * LRESVEC blocks we should not be surprised if we get stuck	 * in trap_init().	 */	_ptar	trap_init_loop, t1	gettr	t1, r28				/* r28 = trap_init_loop */	sub	r21, r30, r30			/* r30 = actual size */	/*	 * VBR/RESVEC handlers overlap by being bigger than	 * allowed. Very bad. Just loop forever.	 * (r28) panic/loop address	 * (r29) expected size	 * (r30) actual size	 */trap_init_loop:	bne	r19, r21, t1	/* Now that exception vectors are set up reset SR.BL */	getcon 	SR, r22	movi	SR_UNBLOCK_EXC, r23	and	r22, r23, r22	putcon	r22, SR	addi	SP, 24, SP	ptabs	LINK, t0	blink	t0, ZERO

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?