⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 head.s

📁 一个2.4.21版本的嵌入式linux内核
💻 S
📖 第 1 页 / 共 3 页
字号:
	xor	r1,r1,r2	mtspr	DAR,r1		/* Set fault address */	mfmsr	r0		/* Restore "normal" registers */	xoris	r0,r0,MSR_TGPR>>16	mtcrf	0x80,r3		/* Restore CR0 */	mtmsr	r0	b	InstructionAccess/* * Handle TLB miss for DATA Load operation on 603/603e */	. = 0x1100DataLoadTLBMiss:/* * r0:	stored ctr * r1:	linux style pte ( later becomes ppc hardware pte ) * r2:	ptr to linux-style pte * r3:	scratch */	mfctr	r0	/* Get PTE (linux-style) and check access */	mfspr	r3,DMISS	lis	r1,KERNELBASE@h		/* check if kernel address */	cmplw	0,r3,r1	mfspr	r2,SPRG3	li	r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */	lwz	r2,PGDIR(r2)	blt+	112f	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */	mfspr	r1,SRR1			/* and MSR_PR bit from SRR1 */	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */112:	tophys(r2,r2)	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */	lwz	r2,0(r2)		/* get pmd entry */	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */	beq-	DataAddressInvalid	/* return if no mapping */	tophys(r2,r2)	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */	lwz	r3,0(r2)		/* get linux-style pte */	andc.	r1,r1,r3		/* check access & ~permission */	bne-	DataAddressInvalid	/* return if access not permitted */	ori	r3,r3,_PAGE_ACCESSED	/* set _PAGE_ACCESSED in pte */	/*	 * NOTE! We are assuming this is not an SMP system, otherwise	 * we would need to update the pte atomically with lwarx/stwcx.	 */	stw	r3,0(r2)		/* update PTE (accessed bit) */	/* Convert linux-style PTE to low word of PPC-style PTE */	rlwinm	r1,r3,32-10,31,31	/* _PAGE_RW -> PP lsb */	rlwinm	r2,r3,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */	and	r1,r1,r2		/* writable if _RW and _DIRTY */	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */	rlwimi	r3,r3,32-1,31,31	/* _PAGE_USER -> PP lsb */	ori	r1,r1,0xe14		/* clear out reserved bits and M */	andc	r1,r3,r1		/* PP = user? (rw&dirty? 2: 3): 0 */	mtspr	RPA,r1	mfspr	r3,DMISS	tlbld	r3	mfspr	r3,SRR1		/* Need to restore CR0 */	mtcrf	0x80,r3	rfiDataAddressInvalid:	mfspr	r3,SRR1	rlwinm	r1,r3,9,6,6	/* Get load/store bit */	addis	r1,r1,0x2000	mtspr	DSISR,r1	mtctr	r0		/* Restore CTR */	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */	mtspr	SRR1,r2	mfspr	r1,DMISS	/* Get failing address */	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */	beq	20f		/* Jump if big endian */	xori	r1,r1,320:	mtspr	DAR,r1		/* Set fault address */	mfmsr	r0		/* Restore "normal" registers */	xoris	r0,r0,MSR_TGPR>>16	mtcrf	0x80,r3		/* Restore CR0 */	mtmsr	r0	b	DataAccess/* * Handle TLB miss for DATA Store on 603/603e */	. = 0x1200DataStoreTLBMiss:/* * r0:	stored ctr * r1:	linux style pte ( later becomes ppc hardware pte ) * r2:	ptr to linux-style pte * r3:	scratch */	mfctr	r0	/* Get PTE (linux-style) and check access */	mfspr	r3,DMISS	lis	r1,KERNELBASE@h		/* check if kernel address */	cmplw	0,r3,r1	mfspr	r2,SPRG3	li	r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */	lwz	r2,PGDIR(r2)	blt+	112f	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */	mfspr	r1,SRR1			/* and MSR_PR bit from SRR1 */	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */112:	tophys(r2,r2)	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */	lwz	r2,0(r2)		/* get pmd entry */	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */	beq-	DataAddressInvalid	/* return if no mapping */	tophys(r2,r2)	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */	lwz	r3,0(r2)		/* get linux-style pte */	andc.	r1,r1,r3		/* check access & ~permission */	bne-	DataAddressInvalid	/* return if access not permitted */	ori	r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY	/*	 * NOTE! We are assuming this is not an SMP system, otherwise	 * we would need to update the pte atomically with lwarx/stwcx.	 */	stw	r3,0(r2)		/* update PTE (accessed/dirty bits) */	/* Convert linux-style PTE to low word of PPC-style PTE */	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */	li	r1,0xe15		/* clear out reserved bits and M */	andc	r1,r3,r1		/* PP = user? 2: 0 */	mtspr	RPA,r1	mfspr	r3,DMISS	tlbld	r3	mfspr	r3,SRR1		/* Need to restore CR0 */	mtcrf	0x80,r3	rfi	STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)	STD_EXCEPTION(0x1400, SMI, SMIException)	STD_EXCEPTION(0x1500, Trap_15, UnknownException)	STD_EXCEPTION(0x1600, Trap_16, UnknownException)	STD_EXCEPTION(0x1700, Trap_17, TAUException)	STD_EXCEPTION(0x1800, Trap_18, UnknownException)	STD_EXCEPTION(0x1900, Trap_19, UnknownException)	STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)	STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)	STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)	STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)	STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)	STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)	STD_EXCEPTION(0x2000, RunMode, RunModeException)	STD_EXCEPTION(0x2100, Trap_21, UnknownException)	STD_EXCEPTION(0x2200, Trap_22, UnknownException)	STD_EXCEPTION(0x2300, Trap_23, UnknownException)	STD_EXCEPTION(0x2400, Trap_24, UnknownException)	STD_EXCEPTION(0x2500, Trap_25, UnknownException)	STD_EXCEPTION(0x2600, Trap_26, UnknownException)	STD_EXCEPTION(0x2700, Trap_27, UnknownException)	STD_EXCEPTION(0x2800, Trap_28, UnknownException)	STD_EXCEPTION(0x2900, Trap_29, UnknownException)	STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)	STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)	STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)	STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)	STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)	STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)	. = 0x3000#ifdef CONFIG_ALTIVECAltiVecUnavailable:	EXCEPTION_PROLOG	bne	load_up_altivec		/* if from user, just load it up */	li	r20,MSR_KERNEL	bl	transfer_to_handler	/* if from kernel, take a trap */	.long	KernelAltiVec	.long	ret_from_except#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_PPC64BRIDGEDataAccess:	EXCEPTION_PROLOG	b	DataAccessContInstructionAccess:	EXCEPTION_PROLOG	b	InstructionAccessContDataSegment:	EXCEPTION_PROLOG	b	DataSegmentContInstructionSegment:	EXCEPTION_PROLOG	b	InstructionSegmentCont#endif /* CONFIG_PPC64BRIDGE *//* * This code finishes saving the registers to the exception frame * and jumps to the appropriate handler for the exception, turning * on address translation. */	.globl	transfer_to_handlertransfer_to_handler:	stw	r22,_NIP(r21)	stw	r23,_MSR(r21)	SAVE_4GPRS(8, r21)	SAVE_8GPRS(12, r21)	SAVE_8GPRS(24, r21)	andi.	r23,r23,MSR_PR	mfspr	r23,SPRG3		/* if from user, fix up THREAD.regs */	addi	r2,r23,-THREAD		/* set r2 to current */	beq	2f	addi	r24,r1,STACK_FRAME_OVERHEAD	stw	r24,PT_REGS(r23)#ifdef CONFIG_ALTIVECBEGIN_FTR_SECTION	mfspr	r22,SPRN_VRSAVE		/* if G4, save vrsave register value */	stw	r22,THREAD_VRSAVE(r23)END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)#endif /* CONFIG_ALTIVEC */#ifndef CONFIG_6xx2:#endif	.globl transfer_to_handler_conttransfer_to_handler_cont:	tovirt(r2,r2)	mflr	r23	andi.	r24,r23,0x3f00		/* get vector offset */	stw	r24,TRAP(r21)	li	r22,0	stw	r22,RESULT(r21)	mtspr	SPRG2,r22		/* r1 is now kernel sp */	addi	r24,r2,TASK_STRUCT_SIZE	/* check for kernel stack overflow */	cmplw	0,r1,r2	cmplw	1,r1,r24	crand	1,1,4	bgt-	stack_ovf		/* if r2 < r1 < r2+TASK_STRUCT_SIZE */	lwz	r24,0(r23)		/* virtual address of handler */	lwz	r23,4(r23)		/* where to go when done */	FIX_SRR1(r20,r22)	mtspr	SRR0,r24	mtspr	SRR1,r20	mtlr	r23	SYNC	RFI				/* jump to handler, enable MMU */#ifdef CONFIG_6xx2:	/* Out of line case when returning to kernel,	 * check return from power_save_6xx	 */	mfspr	r24,SPRN_HID0	mtcr	r24BEGIN_FTR_SECTION	bt-	8,power_save_6xx_restore	/* Check DOZE */END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)BEGIN_FTR_SECTION	bt-	9,power_save_6xx_restore	/* Check NAP */END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)	b	transfer_to_handler_cont#endif /* CONFIG_6xx *//* * On kernel stack overflow, load up an initial stack pointer * and call StackOverflow(regs), which should not return. */stack_ovf:	addi	r3,r1,STACK_FRAME_OVERHEAD	lis	r1,init_task_union@ha	addi	r1,r1,init_task_union@l	addi	r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD	lis	r24,StackOverflow@ha	addi	r24,r24,StackOverflow@l	li	r20,MSR_KERNEL	FIX_SRR1(r20,r22)	mtspr	SRR0,r24	mtspr	SRR1,r20	SYNC	RFI/* * This task wants to use the FPU now. * On UP, disable FP for the task which had the FPU previously, * and save its floating-point registers in its thread_struct. * Load up this task's FP registers from its thread_struct, * enable the FPU for the current task and return to the task. */load_up_fpu:	mfmsr	r5	ori	r5,r5,MSR_FP#ifdef CONFIG_PPC64BRIDGE	clrldi	r5,r5,1			/* turn off 64-bit mode */#endif /* CONFIG_PPC64BRIDGE */	SYNC	MTMSRD(r5)			/* enable use of fpu now */	isync/* * For SMP, we don't do lazy FPU switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another.  Instead we call giveup_fpu in switch_to. */#ifndef CONFIG_SMP	tophys(r6,0)			/* get __pa constant */	addis	r3,r6,last_task_used_math@ha	lwz	r4,last_task_used_math@l(r3)	cmpi	0,r4,0	beq	1f	add	r4,r4,r6	addi	r4,r4,THREAD		/* want last_task_used_math->thread */	SAVE_32FPRS(0, r4)	mffs	fr0	stfd	fr0,THREAD_FPSCR-4(r4)	lwz	r5,PT_REGS(r4)	add	r5,r5,r6	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	li	r20,MSR_FP|MSR_FE0|MSR_FE1	andc	r4,r4,r20		/* disable FP for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */	/* enable use of FP after return */	mfspr	r5,SPRG3		/* current task's THREAD (phys) */	lwz	r4,THREAD_FPEXC_MODE(r5)	ori	r23,r23,MSR_FP		/* enable FP for current */	or	r23,r23,r4	lfd	fr0,THREAD_FPSCR-4(r5)	mtfsf	0xff,fr0	REST_32FPRS(0, r5)#ifndef CONFIG_SMP	subi	r4,r5,THREAD	sub	r4,r4,r6	stw	r4,last_task_used_math@l(r3)#endif /* CONFIG_SMP */	/* restore registers and return */	lwz	r3,_CCR(r21)	lwz	r4,_LINK(r21)	mtcrf	0xff,r3	mtlr	r4	REST_GPR(1, r21)	REST_4GPRS(3, r21)	/* we haven't used ctr or xer */	mtspr	SRR1,r23	mtspr	SRR0,r22	REST_GPR(20, r21)	REST_2GPRS(22, r21)	lwz	r21,GPR21(r21)	SYNC	RFI/* * FP unavailable trap from kernel - print a message, but let * the task use FP in the kernel until it returns to user mode. */KernelFP:	lwz	r3,_MSR(r1)	ori	r3,r3,MSR_FP	stw	r3,_MSR(r1)		/* enable use of FP after return */	lis	r3,86f@h	ori	r3,r3,86f@l	mr	r4,r2			/* current */	lwz	r5,_NIP(r1)	bl	printk	b	ret_from_except86:	.string	"floating point used in kernel (task=%p, pc=%x)\n"	.align	4#ifdef CONFIG_ALTIVEC/* Note that the AltiVec support is closely modeled after the FP * support.  Changes to one are likely to be applicable to the * other!  */load_up_altivec:/* * Disable AltiVec for the task which had AltiVec previously, * and save its AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. * On SMP we know the AltiVec units are free, since we give it up every * switch.  -- Kumar */	mfmsr	r5	oris	r5,r5,MSR_VEC@h	mtmsr	r5			/* enable use of AltiVec now */	isync/* * For SMP, we don't do lazy AltiVec switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another.  Instead we call giveup_altivec in switch_to. */#ifndef CONFIG_SMP#ifndef CONFIG_APUS	lis	r6,-KERNELBASE@h#else	lis	r6,CYBERBASEp@h	lwz	r6,0(r6)#endif	addis	r3,r6,last_task_used_altivec@ha	lwz	r4,last_task_used_altivec@l(r3)	cmpi	0,r4,0	beq	1f	add	r4,r4,r6	addi	r4,r4,THREAD	/* want THREAD of last_task_used_altivec */	SAVE_32VR(0,r20,r4)	MFVSCR(vr0)	li	r20,THREAD_VSCR	STVX(vr0,r20,r4)	lwz	r5,PT_REGS(r4)	add	r5,r5,r6	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r20,MSR_VEC@h	andc	r4,r4,r20	/* disable altivec for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */	/* enable use of AltiVec after return */	oris	r23,r23,MSR_VEC@h	mfspr	r5,SPRG3		/* current task's THREAD (phys) */	li	r20,THREAD_VSCR	LVX(vr0,r20,r5)	MTVSCR(vr0)	REST_32VR(0,r20,r5)#ifndef CONFIG_SMP	subi	r4,r5,THREAD	sub	r4,r4,r6	stw	r4,last_task_used_altivec@l(r3)#endif /* CONFIG_SMP */	/* restore registers and return */	lwz	r3,_CCR(r21)	lwz	r4,_LINK(r21)	mtcrf	0xff,r3	mtlr	r4	REST_GPR(1, r21)	REST_4GPRS(3, r21)	/* we haven't used ctr or xer */	mtspr	SRR1,r23	mtspr	SRR0,r22	REST_GPR(20, r21)	REST_2GPRS(22, r21)	lwz	r21,GPR21(r21)	SYNC	RFI/* * AltiVec unavailable trap from kernel - print a message, but let * the task use AltiVec in the kernel until it returns to user mode. */KernelAltiVec:	lwz	r3,_MSR(r1)	oris	r3,r3,MSR_VEC@h	stw	r3,_MSR(r1)	/* enable use of AltiVec after return */	lis	r3,87f@h	ori	r3,r3,87f@l	mr	r4,r2		/* current */	lwz	r5,_NIP(r1)	bl	printk	b	ret_from_except87:	.string	"AltiVec used in kernel  (task=%p, pc=%x)  \n"	.align	4/* * giveup_altivec(tsk) * Disable AltiVec for the task given as the argument, * and save the AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. */	.globl	giveup_altivecgiveup_altivec:	mfmsr	r5	oris	r5,r5,MSR_VEC@h	SYNC	mtmsr	r5			/* enable use of AltiVec now */	isync	cmpi	0,r3,0	beqlr-				/* if no previous owner, done */	addi	r3,r3,THREAD		/* want THREAD of task */	lwz	r5,PT_REGS(r3)	cmpi	0,r5,0	SAVE_32VR(0, r4, r3)	MFVSCR(vr0)	li	r4,THREAD_VSCR	STVX(vr0, r4, r3)	beq	1f	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r3,MSR_VEC@h	andc	r4,r4,r3		/* disable AltiVec for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP	li	r5,0	lis	r4,last_task_used_altivec@ha	stw	r5,last_task_used_altivec@l(r4)#endif /* CONFIG_SMP */	blr#endif /* CONFIG_ALTIVEC *//* * giveup_fpu(tsk) * Disable FP for the task given as the argument, * and save the floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. */	.globl	giveup_fpugiveup_fpu:	mfmsr	r5	ori	r5,r5,MSR_FP	SYNC_601	ISYNC_601	mtmsr	r5			/* enable use of fpu now */	SYNC_601	isync	cmpi	0,r3,0	beqlr-				/* if no previous owner, done */	addi	r3,r3,THREAD	        /* want THREAD of task */	lwz	r5,PT_REGS(r3)	cmpi	0,r5,0	SAVE_32FPRS(0, r3)	mffs	fr0	stfd	fr0,THREAD_FPSCR-4(r3)	beq	1f	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	li	r3,MSR_FP|MSR_FE0|MSR_FE1	andc	r4,r4,r3		/* disable FP for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP	li	r5,0	lis	r4,last_task_used_math@ha	stw	r5,last_task_used_math@l(r4)#endif /* CONFIG_SMP */	blr/* * This code is jumped to from the startup code to copy * the kernel image to physical address 0. */relocate_kernel:	addis	r9,r26,klimit@ha	/* fetch klimit */	lwz	r25,klimit@l(r9)	addis	r25,r25,-KERNELBASE@h	li	r3,0			/* Destination base address */	li	r6,0			/* Destination offset */	li	r5,0x4000		/* # bytes of memory to copy */	bl	copy_and_flush		/* copy the first 0x4000 bytes */	addi	r0,r3,4f@l		/* jump to the address of 4f */	mtctr	r0			/* in copy and do the rest. */	bctr				/* jump to the copy */4:	mr	r5,r25	bl	copy_and_flush		/* copy the rest */	b	turn_on_mmu/* * Copy routine used to copy the kernel to start at physical address 0 * and flush and invalidate the caches as needed. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. */copy_and_flush:	addi	r5,r5,-4	addi	r6,r6,-44:	li	r0,L1_CACHE_LINE_SIZE/4	mtctr	r03:	addi	r6,r6,4			/* copy a cache line */	lwzx	r0,r6,r4	stwx	r0,r6,r3	bdnz	3b	dcbst	r6,r3			/* write it to memory */	sync	icbi	r6,r3			/* flush the icache line */	cmplw	0,r6,r5	blt	4b	sync				/* additional sync needed on g4 */	isync	addi	r5,r5,4	addi	r6,r6,4	blr#ifdef CONFIG_APUS/* * On APUS the physical base address of the kernel is not known at compile * time, which means the __pa/__va constants used are incorrect. In the * __init section is recorded the virtual addresses of instructions using * these constants, so all that has to be done is fix these before * continuing the kernel boot. * * r4 = The physical address of the kernel base.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -