⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 head.s

📁 讲述linux的初始化过程
💻 S
📖 第 1 页 / 共 3 页
字号:
 */	. = 0x1100DataLoadTLBMiss:	MOL_HOOK_TLBMISS( 15 )/* * r0:	stored ctr * r1:	linux style pte ( later becomes ppc hardware pte ) * r2:	ptr to linux-style pte * r3:	scratch */	mfctr	r0	/* Get PTE (linux-style) and check access */	mfspr	r3,DMISS	lis	r1,KERNELBASE@h		/* check if kernel address */	cmplw	0,r3,r1	mfspr	r2,SPRG3	lwz	r2,PGDIR(r2)	blt+	112f	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */112:	tophys(r2,r2)	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */	lwz	r2,0(r2)		/* get pmd entry */	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */	beq-	DataAddressInvalid	/* return if no mapping */	tophys(r2,r2)	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */	lwz	r1,0(r2)		/* get linux-style pte */	/* setup access flags in r3 */	mfmsr	r3	rlwinm	r3,r3,32-13,30,30	/* MSR_PR -> _PAGE_USER */	ori	r3,r3,1			/* set _PAGE_PRESENT bit in access */	/* save r2 and use it as scratch for the andc. */		andc.	r3,r3,r1		/* check access & ~permission */	bne-	DataAddressInvalid	/* return if access not permitted */	ori	r1,r1,0x100		/* set _PAGE_ACCESSED in pte */	stw	r1,0(r2)		/* update PTE (accessed bit) */	/* Convert linux-style PTE to low word of PPC-style PTE */	/* this computation could be done better -- Cort */	rlwinm	r3,r1,32-9,31,31	/* _PAGE_HWWRITE -> PP lsb */	rlwimi	r1,r1,32-1,31,31	/* _PAGE_USER -> PP (both bits now) */	ori	r3,r3,0xe04		/* clear out reserved bits */	andc	r1,r1,r3		/* PP=2 or 0, when _PAGE_HWWRITE */	mtspr	RPA,r1	mfspr	r3,DMISS	tlbld	r3	mfspr	r3,SRR1		/* Need to restore CR0 */	mtcrf	0x80,r3	rfiDataAddressInvalid:	mfspr	r3,SRR1	rlwinm	r1,r3,9,6,6	/* Get load/store bit */	addis	r1,r1,0x2000	mtspr	DSISR,r1	mtctr	r0		/* Restore CTR */	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */	mtspr	SRR1,r2	mfspr	r1,DMISS	/* Get failing address */	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */	beq	20f		/* Jump if big endian */	xori	r1,r1,320:	mtspr	DAR,r1		/* Set fault address */	mfmsr	r0		/* Restore "normal" registers */	xoris	r0,r0,MSR_TGPR>>16	mtcrf	0x80,r3		/* Restore CR0 */	sync			/* Some chip revs have problems here... */	mtmsr	r0	b	DataAccess	/* * Handle TLB miss for DATA Store on 603/603e */	. = 0x1200DataStoreTLBMiss:	MOL_HOOK_TLBMISS( 16 )/* * r0:	stored ctr * r1:	linux style pte ( later becomes ppc hardware pte ) * r2:	ptr to linux-style pte * r3:	scratch */	mfctr	r0	/* Get PTE (linux-style) and check access */	mfspr	r3,DMISS	lis	r1,KERNELBASE@h		/* check if kernel address */	cmplw	0,r3,r1	mfspr	r2,SPRG3	lwz	r2,PGDIR(r2)	blt+	112f	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */112:	tophys(r2,r2)	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */	lwz	r2,0(r2)		/* get pmd entry */	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */	beq-	DataAddressInvalid	/* return if no mapping */	tophys(r2,r2)	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */	lwz	r1,0(r2)		/* get linux-style pte */	/* setup access flags in r3 */	mfmsr	r3	rlwinm	r3,r3,32-13,30,30	/* MSR_PR -> _PAGE_USER */	ori	r3,r3,0x5		/* _PAGE_PRESENT|_PAGE_RW */	/* save r2 and use it as scratch for the andc. */		andc.	r3,r3,r1		/* check access & ~permission */	bne-	DataAddressInvalid	/* return if access not permitted */	ori	r1,r1,0x384		/* set _PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_RW|_PAGE_HWWRITE in pte */	stw	r1,0(r2)		/* update PTE (accessed bit) */	/* Convert linux-style PTE to low word of PPC-style PTE */	/* this computation could be done better -- Cort */	rlwinm	r3,r1,32-9,31,31	/* _PAGE_HWWRITE -> PP lsb */	rlwimi	r1,r1,32-1,31,31	/* _PAGE_USER -> PP (both bits now) */	ori	r3,r3,0xe04		/* clear out reserved bits */	andc	r1,r1,r3		/* PP=2 or 0, when _PAGE_HWWRITE */	mtspr	RPA,r1	mfspr	r3,DMISS	tlbld	r3	mfspr	r3,SRR1		/* Need to restore CR0 */	mtcrf	0x80,r3	rfi	STD_MOL_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, 11)	STD_EXCEPTION(0x1400, SMI, SMIException)	STD_EXCEPTION(0x1500, Trap_15, UnknownException)	STD_EXCEPTION(0x1600, Trap_16, UnknownException)	STD_EXCEPTION(0x1700, Trap_17, TAUException)	STD_EXCEPTION(0x1800, Trap_18, UnknownException)	STD_EXCEPTION(0x1900, Trap_19, UnknownException)	STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)	STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)	STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)	STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)	STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)	STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)	STD_MOL_EXCEPTION(0x2000, RunMode, RunModeException, 5)	STD_EXCEPTION(0x2100, Trap_21, UnknownException)	STD_EXCEPTION(0x2200, Trap_22, UnknownException)	STD_EXCEPTION(0x2300, Trap_23, UnknownException)	STD_EXCEPTION(0x2400, Trap_24, UnknownException)	STD_EXCEPTION(0x2500, Trap_25, UnknownException)	STD_EXCEPTION(0x2600, Trap_26, UnknownException)	STD_EXCEPTION(0x2700, Trap_27, UnknownException)	STD_EXCEPTION(0x2800, Trap_28, UnknownException)	STD_EXCEPTION(0x2900, Trap_29, UnknownException)	STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)	STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)	STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)	STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)	STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)	STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)	. = 0x3000#ifdef CONFIG_ALTIVECAltiVecUnavailable:	EXCEPTION_PROLOG	MOL_HOOK_RESTORE(12)	bne	load_up_altivec		/* if from user, just load it up */	li	r20,MSR_KERNEL	bl	transfer_to_handler	/* if from kernel, take a trap */	.long	KernelAltiVec	.long	ret_from_except#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_PPC64BRIDGEDataAccess:	EXCEPTION_PROLOG	b	DataAccessContInstructionAccess:	EXCEPTION_PROLOG	b	InstructionAccessContDataSegment:	EXCEPTION_PROLOG	b	DataSegmentContInstructionSegment:	EXCEPTION_PROLOG	b	InstructionSegmentCont#endif /* CONFIG_PPC64BRIDGE *//* * This code finishes saving the registers to the exception frame * and jumps to the appropriate handler for the exception, turning * on address translation. */	.globl	transfer_to_handlertransfer_to_handler:	stw	r22,_NIP(r21)	stw	r23,_MSR(r21)	SAVE_4GPRS(8, r21)	SAVE_8GPRS(12, r21)	SAVE_8GPRS(24, r21)	andi.	r23,r23,MSR_PR	mfspr	r23,SPRG3		/* if from user, fix up THREAD.regs */	beq	2f	addi	r24,r1,STACK_FRAME_OVERHEAD	stw	r24,PT_REGS(r23)#ifdef CONFIG_ALTIVEC	mfpvr	r24			/* check if we are on a G4 */	srwi	r24,r24,16	cmpwi	r24,PVR_7400@h	bne	2f	mfspr	r22,SPRN_VRSAVE		/* if so, save vrsave register value */	stw	r22,THREAD_VRSAVE(r23)#endif /* CONFIG_ALTIVEC */2:	addi	r2,r23,-THREAD		/* set r2 to current */	tovirt(r2,r2)	mflr	r23	andi.	r24,r23,0x3f00		/* get vector offset */	stw	r24,TRAP(r21)	li	r22,RESULT	stwcx.	r22,r22,r21		/* to clear the reservation */	li	r22,0	stw	r22,RESULT(r21)	mtspr	SPRG2,r22		/* r1 is now kernel sp */	addi	r24,r2,TASK_STRUCT_SIZE	/* check for kernel stack overflow */	cmplw	0,r1,r2	cmplw	1,r1,r24	crand	1,1,4	bgt-	stack_ovf		/* if r2 < r1 < r2+TASK_STRUCT_SIZE */	lwz	r24,0(r23)		/* virtual address of handler */	lwz	r23,4(r23)		/* where to go when done */	FIX_SRR1(r20,r22)	MOL_HOOK(6)	mtspr	SRR0,r24	mtspr	SRR1,r20	mtlr	r23	SYNC	RFI				/* jump to handler, enable MMU *//* * On kernel stack overflow, load up an initial stack pointer * and call StackOverflow(regs), which should not return. */stack_ovf:	addi	r3,r1,STACK_FRAME_OVERHEAD	lis	r1,init_task_union@ha	addi	r1,r1,init_task_union@l	addi	r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD	lis	r24,StackOverflow@ha	addi	r24,r24,StackOverflow@l	li	r20,MSR_KERNEL	FIX_SRR1(r20,r22)	mtspr	SRR0,r24	mtspr	SRR1,r20	SYNC	RFI/* * Disable FP for the task which had the FPU previously, * and save its floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. * On SMP we know the fpu is free, since we give it up every * switch.  -- Cort */load_up_fpu:	mfmsr	r5	ori	r5,r5,MSR_FP#ifdef CONFIG_PPC64BRIDGE	clrldi	r5,r5,1			/* turn off 64-bit mode */#endif /* CONFIG_PPC64BRIDGE */	SYNC	MTMSRD(r5)			/* enable use of fpu now */	SYNC/* * For SMP, we don't do lazy FPU switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another.  Instead we call giveup_fpu in switch_to. */#ifndef CONFIG_SMP	lis	r6,0                    /* get __pa constant */	tophys(r6,r6)	addis	r3,r6,last_task_used_math@ha	lwz	r4,last_task_used_math@l(r3)	cmpi	0,r4,0	beq	1f	add	r4,r4,r6	addi	r4,r4,THREAD	        /* want THREAD of last_task_used_math */	SAVE_32FPRS(0, r4)	mffs	fr0	stfd	fr0,THREAD_FPSCR-4(r4)	lwz	r5,PT_REGS(r4)	add	r5,r5,r6	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	li	r20,MSR_FP|MSR_FE0|MSR_FE1	andc	r4,r4,r20		/* disable FP for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */	/* enable use of FP after return */	ori	r23,r23,MSR_FP|MSR_FE0|MSR_FE1	mfspr	r5,SPRG3		/* current task's THREAD (phys) */	lfd	fr0,THREAD_FPSCR-4(r5)	mtfsf	0xff,fr0	REST_32FPRS(0, r5)#ifndef CONFIG_SMP	subi	r4,r5,THREAD	sub	r4,r4,r6	stw	r4,last_task_used_math@l(r3)#endif /* CONFIG_SMP */	/* restore registers and return */	lwz	r3,_CCR(r21)	lwz	r4,_LINK(r21)	mtcrf	0xff,r3	mtlr	r4	REST_GPR(1, r21)	REST_4GPRS(3, r21)	/* we haven't used ctr or xer */	mtspr	SRR1,r23	mtspr	SRR0,r22	REST_GPR(20, r21)	REST_2GPRS(22, r21)	lwz	r21,GPR21(r21)	SYNC	RFI	/* * FP unavailable trap from kernel - print a message, but let * the task use FP in the kernel until it returns to user mode. */KernelFP:	lwz	r3,_MSR(r1)	ori	r3,r3,MSR_FP	stw	r3,_MSR(r1)		/* enable use of FP after return */	lis	r3,86f@h	ori	r3,r3,86f@l	mr	r4,r2			/* current */	lwz	r5,_NIP(r1)	bl	printk	b	ret_from_except86:	.string	"floating point used in kernel (task=%p, pc=%x)\n"	.align	4#ifdef CONFIG_ALTIVEC/* Note that the AltiVec support is closely modeled after the FP * support.  Changes to one are likely to be applicable to the * other!  */load_up_altivec:/* * Disable AltiVec for the task which had AltiVec previously, * and save its AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. * On SMP we know the AltiVec units are free, since we give it up every * switch.  -- Kumar */	mfmsr	r5	oris	r5,r5,MSR_VEC@h	SYNC	mtmsr	r5			/* enable use of AltiVec now */	SYNC/* * For SMP, we don't do lazy AltiVec switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another.  Instead we call giveup_altivec in switch_to. */#ifndef CONFIG_SMP#ifndef CONFIG_APUS	lis	r6,-KERNELBASE@h#else	lis	r6,CYBERBASEp@h	lwz	r6,0(r6)#endif	addis	r3,r6,last_task_used_altivec@ha	lwz	r4,last_task_used_altivec@l(r3)	cmpi	0,r4,0	beq	1f	add	r4,r4,r6	addi	r4,r4,THREAD	/* want THREAD of last_task_used_altivec */	SAVE_32VR(0,r20,r4)	MFVSCR(vr0)	li	r20,THREAD_VSCR	STVX(vr0,r20,r4)	lwz	r5,PT_REGS(r4)	add	r5,r5,r6	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r20,MSR_VEC@h	andc	r4,r4,r20	/* disable altivec for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */	/* enable use of AltiVec after return */	oris	r23,r23,MSR_VEC@h	mfspr	r5,SPRG3		/* current task's THREAD (phys) */	li	r20,THREAD_VSCR	LVX(vr0,r20,r5)	MTVSCR(vr0)	REST_32VR(0,r20,r5)#ifndef CONFIG_SMP	subi	r4,r5,THREAD	sub	r4,r4,r6	stw	r4,last_task_used_altivec@l(r3)#endif /* CONFIG_SMP */	/* restore registers and return */	lwz	r3,_CCR(r21)	lwz	r4,_LINK(r21)	mtcrf	0xff,r3	mtlr	r4	REST_GPR(1, r21)	REST_4GPRS(3, r21)	/* we haven't used ctr or xer */	mtspr	SRR1,r23	mtspr	SRR0,r22	REST_GPR(20, r21)	REST_2GPRS(22, r21)	lwz	r21,GPR21(r21)	SYNC	RFI/* * AltiVec unavailable trap from kernel - print a message, but let * the task use AltiVec in the kernel until it returns to user mode. */KernelAltiVec:	lwz	r3,_MSR(r1)	oris	r3,r3,MSR_VEC@h	stw	r3,_MSR(r1)	/* enable use of AltiVec after return */	lis	r3,87f@h	ori	r3,r3,87f@l	mr	r4,r2		/* current */	lwz	r5,_NIP(r1)	bl	printk	b	ret_from_except87:	.string	"AltiVec used in kernel  (task=%p, pc=%x)  \n"	.align	4/* * giveup_altivec(tsk) * Disable AltiVec for the task given as the argument, * and save the AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. */	.globl	giveup_altivecgiveup_altivec:#ifdef CONFIG_MOL	mflr	r4	MOL_HOOK_MMU(13, r5)	mtlr	r4#endif	mfmsr	r5	oris	r5,r5,MSR_VEC@h	SYNC	mtmsr	r5			/* enable use of AltiVec now */	SYNC	cmpi	0,r3,0	beqlr-				/* if no previous owner, done */	addi	r3,r3,THREAD		/* want THREAD of task */	lwz	r5,PT_REGS(r3)	cmpi	0,r5,0	SAVE_32VR(0, r4, r3)	MFVSCR(vr0)	li	r4,THREAD_VSCR	STVX(vr0, r4, r3)	beq	1f	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r3,MSR_VEC@h	andc	r4,r4,r3		/* disable AltiVec for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP	li	r5,0	lis	r4,last_task_used_altivec@ha	stw	r5,last_task_used_altivec@l(r4)#endif /* CONFIG_SMP */	blr#endif /* CONFIG_ALTIVEC */	/* * giveup_fpu(tsk) * Disable FP for the task given as the argument, * and save the floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. */	.globl	giveup_fpugiveup_fpu:#ifdef CONFIG_MOL	mflr	r4	MOL_HOOK_MMU(7, r5)	mtlr	r4#endif			mfmsr	r5	ori	r5,r5,MSR_FP	SYNC	mtmsr	r5			/* enable use of fpu now */	SYNC	cmpi	0,r3,0	beqlr-				/* if no previous owner, done */	addi	r3,r3,THREAD	        /* want THREAD of task */	lwz	r5,PT_REGS(r3)	cmpi	0,r5,0	SAVE_32FPRS(0, r3)	mffs	fr0	stfd	fr0,THREAD_FPSCR-4(r3)	beq	1f	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	li	r3,MSR_FP|MSR_FE0|MSR_FE1	andc	r4,r4,r3		/* disable FP for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP	li	r5,0	lis	r4,last_task_used_math@ha	stw	r5,last_task_used_math@l(r4)#endif /* CONFIG_SMP */	blr/* * This code is jumped to from the startup code to copy * the kernel image to physical address 0. */relocate_kernel:	addis	r9,r26,klimit@ha	/* fetch klimit */	lwz	r25,klimit@l(r9)	addis	r25,r25,-KERNELBASE@h	li	r3,0			/* Destination base address */	li	r6,0			/* Destination offset */	li	r5,0x4000		/* # bytes of memory to copy */	bl	copy_and_flush		/* copy the first 0x4000 bytes */	addi	r0,r3,4f@l		/* jump to the address of 4f */	mtctr	r0			/* in copy and do the rest. */	bctr				/* jump to the copy */4:	mr	r5,r25	bl	copy_and_flush		/* copy the rest */	b	turn_on_mmu/* * Copy routine used to copy the kernel to start at physical address 0 * and flush and invalidate the caches as needed. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. */copy_and_flush:	addi	r5,r5,-4	addi	r6,r6,-44:	li	r0,CACHELINE_WORDS	mtctr	r03:	addi	r6,r6,4			/* copy a cache line */	lwzx	r0,r6,r4	stwx	r0,r6,r3	bdnz	3b	dcbst	r6,r3			/* write it to memory */	sync	icbi	r6,r3			/* flush the icache line */	cmplw	0,r6,r5	blt	4b	isync	addi	r5,r5,4	addi	r6,r6,4	blr#ifdef CONFIG_APUS/* * On APUS the physical base address of the kernel is not known at compile * time, which means the __pa/__va constants used are incorect. In the * __init section is recorded the virtual addresses of instructions using * these constants, so all that has to be done is fix these before * continuing the kernel boot. * * r4 = The physical address of the kernel base. */fix_mem_constants:	mr	r10,r4	addis	r10,r10,-KERNELBASE@h    /* virt_to_phys constant */	neg	r11,r10	                 /* phys_to_virt constant */	lis	r12,__vtop_table_begin@h	ori	r12,r12,__vtop_table_begin@l	add	r12,r12,r10	         /* table begin phys address */	lis	r13,__vtop_table_end@h	ori	r13,r13,__vtop_table_end@l	add	r13,r13,r10	         /* table end phys address */	subi	r12,r12,4	subi	r13,r13,41:	lwzu	r14,4(r12)               /* virt address of instruction */	add     r14,r14,r10              /* phys address of instruction */	lwz     r15,0(r14)               /* instruction, now insert top */	rlwimi  r15,r10,16,16,31         /* half of vp const in low half */	stw	r15,0(r14)               /* of instruction and restore. */	dcbst	r0,r14			 /* write it to memory */	sync	icbi	r0,r14			 /* flush the icache line */	cmpw	r12,r13	bne     1b

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -