head_e500.s

来自「优龙2410linux2.6.8内核源代码」· S 代码 · 共 1,332 行 · 第 1/3 页

S
1,332
字号
	EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE)#endif /* CONFIG_SPE */	/* SPE Floating Point Data */#ifdef CONFIG_SPE	EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);#else	EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE)#endif /* CONFIG_SPE */	/* SPE Floating Point Round */	EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE)	/* Performance Monitor */	EXCEPTION(0x2060, PerformanceMonitor, UnknownException, EXC_XFER_EE)/* Check for a single step debug exception while in an exception * handler before state has been saved.  This is to catch the case * where an instruction that we are trying to single step causes * an exception (eg ITLB/DTLB miss) and thus the first instruction of * the exception handler generates a single step debug exception. * * If we get a debug trap on the first instruction of an exception handler, * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). * The exception handler was handling a non-critical interrupt, so it will * save (and later restore) the MSR via SPRN_SRR1, which will still have * the MSR_DE bit set. */	/* Debug Interrupt */	START_EXCEPTION(Debug)	CRITICAL_EXCEPTION_PROLOG	/*	 * If this is a single step or branch-taken exception in an	 * exception entry sequence, it was probably meant to apply to	 * the code where the exception occurred (since exception entry	 * doesn't turn off DE automatically).  We simulate the effect	 * of turning off DE on entry to an exception handler by turning	 * off DE in the CSRR1 value and clearing the debug status.	 */	mfspr	r10,SPRN_DBSR		/* check single-step/branch taken */	andis.	r10,r10,(DBSR_IC|DBSR_BT)@h	beq+	1f	andi.	r0,r9,MSR_PR		/* check supervisor */	beq	2f			/* branch if we need to fix it up... */	/* continue normal handling for a critical exception... */1:	mfspr	r4,SPRN_DBSR	addi	r3,r1,STACK_FRAME_OVERHEAD	EXC_XFER_TEMPLATE(DebugException, 0x2002, \		(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \		NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)	/* here it looks like we got an inappropriate debug exception. */2:	rlwinm	r9,r9,0,~MSR_DE		/* clear DE in the CSRR1 value */	mtspr	SPRN_DBSR,r10		/* clear the IC/BT debug intr status */	/* restore state and get out */	lwz	r10,_CCR(r11)	lwz	r0,GPR0(r11)	lwz	r1,GPR1(r11)	mtcrf	0x80,r10	mtspr	CSRR0,r12	mtspr	CSRR1,r9	lwz	r9,GPR9(r11)	mtspr	SPRG2,r8;		/* SPRG2 only used in criticals */	lis	r8,crit_save@ha;	lwz	r10,crit_r10@l(r8)	lwz	r11,crit_r11@l(r8)	mfspr	r8,SPRG2	rfci	b	./* * Local functions */	/*	 * Data TLB exceptions will bail out to this point	 * if they can't resolve the lightweight TLB fault.	 */data_access:	NORMAL_EXCEPTION_PROLOG	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */	stw	r5,_ESR(r11)	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */	andis.	r10,r5,(ESR_ILK|ESR_DLK)@h	bne	1f	EXC_XFER_EE_LITE(0x0300, handle_page_fault)1:	addi	r3,r1,STACK_FRAME_OVERHEAD	EXC_XFER_EE_LITE(0x0300, CacheLockingException)/* * Both the instruction and data TLB miss get to this * point to load the TLB. * 	r10 - EA of fault * 	r11 - TLB (info from Linux PTE) * 	r12, r13 - available to use * 	CR5 - results of addr < TASK_SIZE *	MAS0, MAS1 - loaded with proper value when we get here *	MAS2, MAS3 - will need additional info from Linux PTE *	Upon exit, we reload everything and RFI. */finish_tlb_load:	/*	 * We set execute, because we don't have the granularity to	 * properly set this at the page level (Linux problem).	 * Many of these bits are software only.  Bits we don't set	 * here we (properly should) assume have the appropriate value.	 */	mfspr	r12, SPRN_MAS2	rlwimi	r12, r11, 26, 27, 31	/* extract WIMGE from pte */	mtspr	SPRN_MAS2, r12	bge	5, 1f	/* addr > TASK_SIZE */	li	r10, (MAS3_UX | MAS3_UW | MAS3_UR)	andi.	r13, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)	andi.	r12, r11, _PAGE_USER	/* Test for _PAGE_USER */	iseleq	r12, 0, r10	and	r10, r12, r13	srwi	r12, r10, 1	or	r12, r12, r10	/* Copy user perms into supervisor */	b	2f	/* addr <= TASK_SIZE */1:	rlwinm	r12, r11, 31, 29, 29	/* Extract _PAGE_HWWRITE into SW */	ori	r12, r12, (MAS3_SX | MAS3_SR)2:	rlwimi	r11, r12, 0, 20, 31	/* Extract RPN from PTE and merge with perms */	mtspr	SPRN_MAS3, r11	tlbwe	/* Done...restore registers and get out of here.  */	mfspr	r11, SPRG7R	mtcr	r11	mfspr	r13, SPRG5R	mfspr	r12, SPRG4R	mfspr	r11, SPRG1	mfspr	r10, SPRG0	rfi					/* Force context change */#ifdef CONFIG_SPE/* Note that the SPE support is closely modeled after the AltiVec * support.  Changes to one are likely to be applicable to the * other!  */load_up_spe:/* * Disable SPE for the task which had SPE previously, * and save its SPE registers in its thread_struct. * Enables SPE for use in the kernel on return. * On SMP we know the SPE units are free, since we give it up every * switch.  -- Kumar */	mfmsr	r5	oris	r5,r5,MSR_SPE@h	mtmsr	r5			/* enable use of SPE now */	isync/* * For SMP, we don't do lazy SPE switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another.  Instead we call giveup_spe in switch_to. */#ifndef CONFIG_SMP	lis	r3,last_task_used_spe@ha	lwz	r4,last_task_used_spe@l(r3)	cmpi	0,r4,0	beq	1f	addi	r4,r4,THREAD	/* want THREAD of last_task_used_spe */	SAVE_32EVR(0,r10,r4)   	evxor	evr10, evr10, evr10	/* clear out evr10 */	evmwumiaa evr10, evr10, evr10	/* evr10 <- ACC = 0 * 0 + ACC */	li	r5,THREAD_ACC   	evstddx	evr10, r4, r5		/* save off accumulator */	lwz	r5,PT_REGS(r4)	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r10,MSR_SPE@h	andc	r4,r4,r10	/* disable SPE for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */	/* enable use of SPE after return */	oris	r9,r9,MSR_SPE@h	mfspr	r5,SPRG3		/* current task's THREAD (phys) */	li	r4,1	li	r10,THREAD_ACC	stw	r4,THREAD_USED_SPE(r5)	evlddx	evr4,r10,r5	evmra	evr4,evr4	REST_32EVR(0,r10,r5)#ifndef CONFIG_SMP	subi	r4,r5,THREAD	stw	r4,last_task_used_spe@l(r3)#endif /* CONFIG_SMP */	/* restore registers and return */2:	REST_4GPRS(3, r11)	lwz	r10,_CCR(r11)	REST_GPR(1, r11)	mtcr	r10	lwz	r10,_LINK(r11)	mtlr	r10	REST_GPR(10, r11)	mtspr	SRR1,r9	mtspr	SRR0,r12	REST_GPR(9, r11)	REST_GPR(12, r11)	lwz	r11,GPR11(r11)	SYNC	rfi/* * SPE unavailable trap from kernel - print a message, but let * the task use SPE in the kernel until it returns to user mode. */KernelSPE:	lwz	r3,_MSR(r1)	oris	r3,r3,MSR_SPE@h	stw	r3,_MSR(r1)	/* enable use of SPE after return */	lis	r3,87f@h	ori	r3,r3,87f@l	mr	r4,r2		/* current */	lwz	r5,_NIP(r1)	bl	printk	b	ret_from_except87:	.string	"SPE used in kernel  (task=%p, pc=%x)  \n"	.align	4,0#endif /* CONFIG_SPE *//* * Global functions *//* * extern void loadcam_entry(unsigned int index) * * Load TLBCAM[index] entry in to the L2 CAM MMU */_GLOBAL(loadcam_entry)	lis	r4,TLBCAM@ha	addi	r4,r4,TLBCAM@l	mulli	r5,r3,20	add	r3,r5,r4	lwz	r4,0(r3)	mtspr	SPRN_MAS0,r4	lwz	r4,4(r3)	mtspr	SPRN_MAS1,r4	lwz	r4,8(r3)	mtspr	SPRN_MAS2,r4	lwz	r4,12(r3)	mtspr	SPRN_MAS3,r4	tlbwe	isync	blr/* * extern void giveup_altivec(struct task_struct *prev) * * The e500 core does not have an AltiVec unit. */_GLOBAL(giveup_altivec)	blr#ifdef CONFIG_SPE/* * extern void giveup_spe(struct task_struct *prev) * */_GLOBAL(giveup_spe)	mfmsr	r5	oris	r5,r5,MSR_SPE@h	SYNC	mtmsr	r5			/* enable use of SPE now */	isync	cmpi	0,r3,0	beqlr-				/* if no previous owner, done */	addi	r3,r3,THREAD		/* want THREAD of task */	lwz	r5,PT_REGS(r3)	cmpi	0,r5,0	SAVE_32EVR(0, r4, r3)   	evxor	evr6, evr6, evr6	/* clear out evr6 */	evmwumiaa evr6, evr6, evr6	/* evr6 <- ACC = 0 * 0 + ACC */	li	r4,THREAD_ACC   	evstddx	evr6, r4, r3		/* save off accumulator */	mfspr	r6,SPRN_SPEFSCR	stw	r6,THREAD_SPEFSCR(r3)	/* save spefscr register value */	beq	1f	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r3,MSR_SPE@h	andc	r4,r4,r3		/* disable SPE for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP	li	r5,0	lis	r4,last_task_used_spe@ha	stw	r5,last_task_used_spe@l(r4)#endif /* CONFIG_SMP */	blr#endif /* CONFIG_SPE *//* * extern void giveup_fpu(struct task_struct *prev) * * The e500 core does not have an FPU. */_GLOBAL(giveup_fpu)	blr/* * extern void abort(void) * * At present, this routine just applies a system reset. */_GLOBAL(abort)	li	r13,0        mtspr   SPRN_DBCR0,r13		/* disable all debug events */	mfmsr	r13	ori	r13,r13,MSR_DE@l	/* Enable Debug Events */	mtmsr	r13        mfspr   r13,SPRN_DBCR0        lis	r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h        mtspr   SPRN_DBCR0,r13_GLOBAL(set_context)#ifdef CONFIG_BDI_SWITCH	/* Context switch the PTE pointer for the Abatron BDI2000.	 * The PGDIR is the second parameter.	 */	lis	r5, abatron_pteptrs@h	ori	r5, r5, abatron_pteptrs@l	stw	r4, 0x4(r5)#endif	mtspr	SPRN_PID,r3	isync			/* Force context change */	blr/* * We put a few things here that have to be page-aligned. This stuff * goes at the beginning of the data segment, which is page-aligned. */	.data_GLOBAL(sdata)_GLOBAL(empty_zero_page)	.space	4096_GLOBAL(swapper_pg_dir)	.space	4096	.section .bss/* Stack for handling critical exceptions from kernel mode */critical_stack_bottom:	.space 4096critical_stack_top:	.previous/* Stack for handling machine check exceptions from kernel mode */mcheck_stack_bottom:	.space 4096mcheck_stack_top:	.previous/* * This area is used for temporarily saving registers during the * critical and machine check exception prologs. It must always * follow the page aligned allocations, so it starts on a page * boundary, ensuring that all crit_save areas are in a single * page. *//* crit_save */_GLOBAL(crit_save)	.space  4_GLOBAL(crit_r10)	.space	4_GLOBAL(crit_r11)	.space	4_GLOBAL(crit_sprg0)	.space	4_GLOBAL(crit_sprg1)	.space	4_GLOBAL(crit_sprg4)	.space	4_GLOBAL(crit_sprg5)	.space	4_GLOBAL(crit_sprg7)	.space	4_GLOBAL(crit_pid)	.space	4_GLOBAL(crit_srr0)	.space	4_GLOBAL(crit_srr1)	.space	4/* mcheck_save */_GLOBAL(mcheck_save)	.space  4_GLOBAL(mcheck_r10)	.space	4_GLOBAL(mcheck_r11)	.space	4_GLOBAL(mcheck_sprg0)	.space	4_GLOBAL(mcheck_sprg1)	.space	4_GLOBAL(mcheck_sprg4)	.space	4_GLOBAL(mcheck_sprg5)	.space	4_GLOBAL(mcheck_sprg7)	.space	4_GLOBAL(mcheck_pid)	.space	4_GLOBAL(mcheck_srr0)	.space	4_GLOBAL(mcheck_srr1)	.space	4_GLOBAL(mcheck_csrr0)	.space	4_GLOBAL(mcheck_csrr1)	.space	4/* * This space gets a copy of optional info passed to us by the bootstrap * which is used to pass parameters into the kernel like root=/dev/sda1, etc. */_GLOBAL(cmd_line)	.space	512/* * Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. */abatron_pteptrs:	.space	8

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?