⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 head.s

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 S
📖 第 1 页 / 共 4 页
字号:
	STD_EXCEPTION(0xb00, Trap_0b, UnknownException)/* System call */	. = 0xc00SystemCall:	EXCEPTION_PROLOG	stw	r3,ORIG_GPR3(r21)	li	r20,MSR_KERNEL	rlwimi	r20,r23,0,16,16		/* copy EE bit from saved MSR */	bl	transfer_to_handler	.long	DoSyscall	.long	ret_from_except/* Single step - not used on 601 */	STD_EXCEPTION(0xd00, SingleStep, SingleStepException)	STD_EXCEPTION(0xe00, Trap_0e, UnknownException)/* * The Altivec unavailable trap is at 0x0f20.  Foo. * We effectively remap it to 0x3000. */	. = 0xf00	b	Trap_0ftrap_0f_cont:	addi	r3,r1,STACK_FRAME_OVERHEAD	li	r20,MSR_KERNEL	bl	transfer_to_handler	.long	UnknownException	.long	ret_from_except	. = 0xf20#ifdef CONFIG_ALTIVEC	b	AltiVecUnavailable#endifTrap_0f:	EXCEPTION_PROLOG	b	trap_0f_cont	/* * Handle TLB miss for instruction on 603/603e. * Note: we get an alternate set of r0 - r3 to use automatically. */	. = 0x1000InstructionTLBMiss:/* * r0:	stored ctr * r1:	linux style pte ( later becomes ppc hardware pte ) * r2:	ptr to linux-style pte * r3:	scratch */	mfctr	r0	/* Get PTE (linux-style) and check access */	mfspr	r3,IMISS	lis	r1,KERNELBASE@h		/* check if kernel address */	cmplw	0,r3,r1	mfspr	r2,SPRG3	li	r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */	lwz	r2,PGDIR(r2)	blt+	112f	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */	mfspr	r1,SRR1			/* and MSR_PR bit from SRR1 */	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */112:	tophys(r2,r2)	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */	lwz	r2,0(r2)		/* get pmd entry */	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */	beq-	InstructionAddressInvalid	/* return if no mapping */	tophys(r2,r2)	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */	lwz	r3,0(r2)		/* get linux-style pte */	andc.	r1,r1,r3		/* check access & ~permission */	bne-	InstructionAddressInvalid /* return if access not permitted */	ori	r3,r3,_PAGE_ACCESSED	/* set _PAGE_ACCESSED in pte */	/*	 * NOTE! We are assuming this is not an SMP system, otherwise	 * we would need to update the pte atomically with lwarx/stwcx.	 */	stw	r3,0(r2)		/* update PTE (accessed bit) */	/* Convert linux-style PTE to low word of PPC-style PTE */	rlwinm	r1,r3,32-10,31,31	/* _PAGE_RW -> PP lsb */	rlwinm	r2,r3,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */	and	r1,r1,r2		/* writable if _RW and _DIRTY */	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */	rlwimi	r3,r3,32-1,31,31	/* _PAGE_USER -> PP lsb */	ori	r1,r1,0xe14		/* clear out reserved bits and M */	andc	r1,r3,r1		/* PP = user? (rw&dirty? 2: 3): 0 */	mtspr	RPA,r1	mfspr	r3,IMISS	tlbli	r3	mfspr	r3,SRR1		/* Need to restore CR0 */	mtcrf	0x80,r3	rfiInstructionAddressInvalid:	mfspr	r3,SRR1	rlwinm	r1,r3,9,6,6	/* Get load/store bit */	addis	r1,r1,0x2000	mtspr	DSISR,r1	/* (shouldn't be needed) */	mtctr	r0		/* Restore CTR */	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */	or	r2,r2,r1	mtspr	SRR1,r2	mfspr	r1,IMISS	/* Get failing address */	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */	xor	r1,r1,r2	mtspr	DAR,r1		/* Set fault address */	mfmsr	r0		/* Restore "normal" registers */	xoris	r0,r0,MSR_TGPR>>16	mtcrf	0x80,r3		/* Restore CR0 */	mtmsr	r0	b	InstructionAccess/* * Handle TLB miss for DATA Load operation on 603/603e */	. = 0x1100DataLoadTLBMiss:/* * r0:	stored ctr * r1:	linux style pte ( later becomes ppc hardware pte ) * r2:	ptr to linux-style pte * r3:	scratch */	mfctr	r0	/* Get PTE (linux-style) and check access */	mfspr	r3,DMISS	lis	r1,KERNELBASE@h		/* check if kernel address */	cmplw	0,r3,r1	mfspr	r2,SPRG3	li	r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */	lwz	r2,PGDIR(r2)	blt+	112f	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */	mfspr	r1,SRR1			/* and MSR_PR bit from SRR1 */	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */112:	tophys(r2,r2)	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */	lwz	r2,0(r2)		/* get pmd entry */	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */	beq-	DataAddressInvalid	/* return if no mapping */	tophys(r2,r2)	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */	lwz	r3,0(r2)		/* get linux-style pte */	andc.	r1,r1,r3		/* check access & ~permission */	bne-	DataAddressInvalid	/* return if access not permitted */	ori	r3,r3,_PAGE_ACCESSED	/* set _PAGE_ACCESSED in pte */	/*	 * NOTE! We are assuming this is not an SMP system, otherwise	 * we would need to update the pte atomically with lwarx/stwcx.	 */	stw	r3,0(r2)		/* update PTE (accessed bit) */	/* Convert linux-style PTE to low word of PPC-style PTE */	rlwinm	r1,r3,32-10,31,31	/* _PAGE_RW -> PP lsb */	rlwinm	r2,r3,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */	and	r1,r1,r2		/* writable if _RW and _DIRTY */	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */	rlwimi	r3,r3,32-1,31,31	/* _PAGE_USER -> PP lsb */	ori	r1,r1,0xe14		/* clear out reserved bits and M */	andc	r1,r3,r1		/* PP = user? (rw&dirty? 2: 3): 0 */	mtspr	RPA,r1	mfspr	r3,DMISS	tlbld	r3	mfspr	r3,SRR1		/* Need to restore CR0 */	mtcrf	0x80,r3	rfiDataAddressInvalid:	mfspr	r3,SRR1	rlwinm	r1,r3,9,6,6	/* Get load/store bit */	addis	r1,r1,0x2000	mtspr	DSISR,r1	mtctr	r0		/* Restore CTR */	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */	mtspr	SRR1,r2	mfspr	r1,DMISS	/* Get failing address */	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */	beq	20f		/* Jump if big endian */	xori	r1,r1,320:	mtspr	DAR,r1		/* Set fault address */	mfmsr	r0		/* Restore "normal" registers */	xoris	r0,r0,MSR_TGPR>>16	mtcrf	0x80,r3		/* Restore CR0 */	mtmsr	r0	b	DataAccess	/* * Handle TLB miss for DATA Store on 603/603e */	. = 0x1200DataStoreTLBMiss:/* * r0:	stored ctr * r1:	linux style pte ( later becomes ppc hardware pte ) * r2:	ptr to linux-style pte * r3:	scratch */	mfctr	r0	/* Get PTE (linux-style) and check access */	mfspr	r3,DMISS	lis	r1,KERNELBASE@h		/* check if kernel address */	cmplw	0,r3,r1	mfspr	r2,SPRG3	li	r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */	lwz	r2,PGDIR(r2)	blt+	112f	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */	mfspr	r1,SRR1			/* and MSR_PR bit from SRR1 */	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */112:	tophys(r2,r2)	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */	lwz	r2,0(r2)		/* get pmd entry */	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */	beq-	DataAddressInvalid	/* return if no mapping */	tophys(r2,r2)	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */	lwz	r3,0(r2)		/* get linux-style pte */	andc.	r1,r1,r3		/* check access & ~permission */	bne-	DataAddressInvalid	/* return if access not permitted */	ori	r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY	/*	 * NOTE! We are assuming this is not an SMP system, otherwise	 * we would need to update the pte atomically with lwarx/stwcx.	 */	stw	r3,0(r2)		/* update PTE (accessed/dirty bits) */	/* Convert linux-style PTE to low word of PPC-style PTE */	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */	li	r1,0xe15		/* clear out reserved bits and M */	andc	r1,r3,r1		/* PP = user? 2: 0 */	mtspr	RPA,r1	mfspr	r3,DMISS	tlbld	r3	mfspr	r3,SRR1		/* Need to restore CR0 */	mtcrf	0x80,r3	rfi	STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)	STD_EXCEPTION(0x1400, SMI, SMIException)	STD_EXCEPTION(0x1500, Trap_15, UnknownException)	STD_EXCEPTION(0x1600, Trap_16, UnknownException)	STD_EXCEPTION(0x1700, Trap_17, TAUException)	STD_EXCEPTION(0x1800, Trap_18, UnknownException)	STD_EXCEPTION(0x1900, Trap_19, UnknownException)	STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)	STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)	STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)	STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)	STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)	STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)	STD_EXCEPTION(0x2000, RunMode, RunModeException)	STD_EXCEPTION(0x2100, Trap_21, UnknownException)	STD_EXCEPTION(0x2200, Trap_22, UnknownException)	STD_EXCEPTION(0x2300, Trap_23, UnknownException)	STD_EXCEPTION(0x2400, Trap_24, UnknownException)	STD_EXCEPTION(0x2500, Trap_25, UnknownException)	STD_EXCEPTION(0x2600, Trap_26, UnknownException)	STD_EXCEPTION(0x2700, Trap_27, UnknownException)	STD_EXCEPTION(0x2800, Trap_28, UnknownException)	STD_EXCEPTION(0x2900, Trap_29, UnknownException)	STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)	STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)	STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)	STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)	STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)	STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)	. = 0x3000#ifdef CONFIG_ALTIVECAltiVecUnavailable:	EXCEPTION_PROLOG	bne	load_up_altivec		/* if from user, just load it up */	li	r20,MSR_KERNEL	bl	transfer_to_handler	/* if from kernel, take a trap */	.long	KernelAltiVec	.long	ret_from_except#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_PPC64BRIDGEDataAccess:	EXCEPTION_PROLOG	b	DataAccessContInstructionAccess:	EXCEPTION_PROLOG	b	InstructionAccessContDataSegment:	EXCEPTION_PROLOG	b	DataSegmentContInstructionSegment:	EXCEPTION_PROLOG	b	InstructionSegmentCont#endif /* CONFIG_PPC64BRIDGE *//* * This code finishes saving the registers to the exception frame * and jumps to the appropriate handler for the exception, turning * on address translation. */	.globl	transfer_to_handlertransfer_to_handler:	stw	r22,_NIP(r21)	stw	r23,_MSR(r21)	SAVE_4GPRS(8, r21)	SAVE_8GPRS(12, r21)	SAVE_8GPRS(24, r21)	andi.	r23,r23,MSR_PR	mfspr	r23,SPRG3		/* if from user, fix up THREAD.regs */	beq	2f	addi	r24,r1,STACK_FRAME_OVERHEAD	stw	r24,PT_REGS(r23)#ifdef CONFIG_ALTIVECBEGIN_FTR_SECTION	mfspr	r22,SPRN_VRSAVE		/* if G4, save vrsave register value */	stw	r22,THREAD_VRSAVE(r23)END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)#endif /* CONFIG_ALTIVEC */2:	addi	r2,r23,-THREAD		/* set r2 to current */	tovirt(r2,r2)	mflr	r23	andi.	r24,r23,0x3f00		/* get vector offset */	stw	r24,TRAP(r21)	li	r22,0	stw	r22,RESULT(r21)	mtspr	SPRG2,r22		/* r1 is now kernel sp */	addi	r24,r2,TASK_STRUCT_SIZE	/* check for kernel stack overflow */	cmplw	0,r1,r2	cmplw	1,r1,r24	crand	1,1,4	bgt-	stack_ovf		/* if r2 < r1 < r2+TASK_STRUCT_SIZE */	lwz	r24,0(r23)		/* virtual address of handler */	lwz	r23,4(r23)		/* where to go when done */	FIX_SRR1(r20,r22)	mtspr	SRR0,r24	mtspr	SRR1,r20	mtlr	r23	SYNC	RFI				/* jump to handler, enable MMU *//* * On kernel stack overflow, load up an initial stack pointer * and call StackOverflow(regs), which should not return. */stack_ovf:	addi	r3,r1,STACK_FRAME_OVERHEAD	lis	r1,init_task_union@ha	addi	r1,r1,init_task_union@l	addi	r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD	lis	r24,StackOverflow@ha	addi	r24,r24,StackOverflow@l	li	r20,MSR_KERNEL	FIX_SRR1(r20,r22)	mtspr	SRR0,r24	mtspr	SRR1,r20	SYNC	RFI/* * Disable FP for the task which had the FPU previously, * and save its floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. * On SMP we know the fpu is free, since we give it up every * switch.  -- Cort */load_up_fpu:	mfmsr	r5	ori	r5,r5,MSR_FP#ifdef CONFIG_PPC64BRIDGE	clrldi	r5,r5,1			/* turn off 64-bit mode */#endif /* CONFIG_PPC64BRIDGE */	SYNC	MTMSRD(r5)			/* enable use of fpu now */	isync/* * For SMP, we don't do lazy FPU switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another.  Instead we call giveup_fpu in switch_to. */#ifndef CONFIG_SMP	lis	r6,0                    /* get __pa constant */	tophys(r6,r6)	addis	r3,r6,last_task_used_math@ha	lwz	r4,last_task_used_math@l(r3)	cmpi	0,r4,0	beq	1f	add	r4,r4,r6	addi	r4,r4,THREAD	        /* want THREAD of last_task_used_math */	SAVE_32FPRS(0, r4)	mffs	fr0	stfd	fr0,THREAD_FPSCR-4(r4)	lwz	r5,PT_REGS(r4)	add	r5,r5,r6	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	li	r20,MSR_FP|MSR_FE0|MSR_FE1	andc	r4,r4,r20		/* disable FP for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */	/* enable use of FP after return */	ori	r23,r23,MSR_FP|MSR_FE0|MSR_FE1	mfspr	r5,SPRG3		/* current task's THREAD (phys) */	lfd	fr0,THREAD_FPSCR-4(r5)	mtfsf	0xff,fr0	REST_32FPRS(0, r5)#ifndef CONFIG_SMP	subi	r4,r5,THREAD	sub	r4,r4,r6	stw	r4,last_task_used_math@l(r3)#endif /* CONFIG_SMP */	/* restore registers and return */	lwz	r3,_CCR(r21)	lwz	r4,_LINK(r21)	mtcrf	0xff,r3	mtlr	r4	REST_GPR(1, r21)	REST_4GPRS(3, r21)	/* we haven't used ctr or xer */	mtspr	SRR1,r23	mtspr	SRR0,r22	REST_GPR(20, r21)	REST_2GPRS(22, r21)	lwz	r21,GPR21(r21)	SYNC	RFI	/* * FP unavailable trap from kernel - print a message, but let * the task use FP in the kernel until it returns to user mode. */KernelFP:	lwz	r3,_MSR(r1)	ori	r3,r3,MSR_FP	stw	r3,_MSR(r1)		/* enable use of FP after return */	lis	r3,86f@h	ori	r3,r3,86f@l	mr	r4,r2			/* current */	lwz	r5,_NIP(r1)	bl	printk	b	ret_from_except86:	.string	"floating point used in kernel (task=%p, pc=%x)\n"	.align	4#ifdef CONFIG_ALTIVEC/* Note that the AltiVec support is closely modeled after the FP * support.  Changes to one are likely to be applicable to the * other!  */load_up_altivec:/* * Disable AltiVec for the task which had AltiVec previously, * and save its AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -