head.s

来自「linux-2.4.29操作系统的源码」· S 代码 · 共 1,807 行 · 第 1/4 页

S
1,807
字号
#ifndef CONFIG_SMP	tophys(r6,0)			/* get __pa constant */	addis	r3,r6,last_task_used_math@ha	lwz	r4,last_task_used_math@l(r3)	cmpi	0,r4,0	beq	1f	add	r4,r4,r6	addi	r4,r4,THREAD		/* want last_task_used_math->thread */	SAVE_32FPRS(0, r4)	mffs	fr0	stfd	fr0,THREAD_FPSCR-4(r4)	lwz	r5,PT_REGS(r4)	add	r5,r5,r6	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	li	r20,MSR_FP|MSR_FE0|MSR_FE1	andc	r4,r4,r20		/* disable FP for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */	/* enable use of FP after return */	mfspr	r5,SPRG3		/* current task's THREAD (phys) */	lwz	r4,THREAD_FPEXC_MODE(r5)	ori	r23,r23,MSR_FP		/* enable FP for current */	or	r23,r23,r4	lfd	fr0,THREAD_FPSCR-4(r5)	mtfsf	0xff,fr0	REST_32FPRS(0, r5)#ifndef CONFIG_SMP	subi	r4,r5,THREAD	sub	r4,r4,r6	stw	r4,last_task_used_math@l(r3)#endif /* CONFIG_SMP */	/* restore registers and return */	lwz	r3,_CCR(r21)	lwz	r4,_LINK(r21)	mtcrf	0xff,r3	mtlr	r4	REST_GPR(1, r21)	REST_4GPRS(3, r21)	/* we haven't used ctr or xer */	mtspr	SRR1,r23	mtspr	SRR0,r22	REST_GPR(20, r21)	REST_2GPRS(22, r21)	lwz	r21,GPR21(r21)	SYNC	RFI/* * FP unavailable trap from kernel - print a message, but let * the task use FP in the kernel until it returns to user mode. */KernelFP:	lwz	r3,_MSR(r1)	ori	r3,r3,MSR_FP	stw	r3,_MSR(r1)		/* enable use of FP after return */	lis	r3,86f@h	ori	r3,r3,86f@l	mr	r4,r2			/* current */	lwz	r5,_NIP(r1)	bl	printk	b	ret_from_except86:	.string	"floating point used in kernel (task=%p, pc=%x)\n"	.align	4#ifdef CONFIG_ALTIVEC/* Note that the AltiVec support is closely modeled after the FP * support.  Changes to one are likely to be applicable to the * other!  */load_up_altivec:/* * Disable AltiVec for the task which had AltiVec previously, * and save its AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. * On SMP we know the AltiVec units are free, since we give it up every * switch.  -- Kumar */	mfmsr	r5	oris	r5,r5,MSR_VEC@h	mtmsr	r5			/* enable use of AltiVec now */	isync/* * For SMP, we don't do lazy AltiVec switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another.  Instead we call giveup_altivec in switch_to. */#ifndef CONFIG_SMP#ifndef CONFIG_APUS	lis	r6,-KERNELBASE@h#else	lis	r6,CYBERBASEp@h	lwz	r6,0(r6)#endif	addis	r3,r6,last_task_used_altivec@ha	lwz	r4,last_task_used_altivec@l(r3)	cmpi	0,r4,0	beq	1f	add	r4,r4,r6	addi	r4,r4,THREAD	/* want THREAD of last_task_used_altivec */	SAVE_32VR(0,r20,r4)	MFVSCR(vr0)	li	r20,THREAD_VSCR	STVX(vr0,r20,r4)	lwz	r5,PT_REGS(r4)	add	r5,r5,r6	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r20,MSR_VEC@h	andc	r4,r4,r20	/* disable altivec for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */	/* enable use of AltiVec after return */	oris	r23,r23,MSR_VEC@h	mfspr	r5,SPRG3		/* current task's THREAD (phys) */	li	r4,1	li	r20,THREAD_VSCR	stw	r4,THREAD_USED_VR(r5)	LVX(vr0,r20,r5)	MTVSCR(vr0)	REST_32VR(0,r20,r5)#ifndef CONFIG_SMP	subi	r4,r5,THREAD	sub	r4,r4,r6	stw	r4,last_task_used_altivec@l(r3)#endif /* CONFIG_SMP */	/* restore registers and return */	lwz	r3,_CCR(r21)	lwz	r4,_LINK(r21)	mtcrf	0xff,r3	mtlr	r4	REST_GPR(1, r21)	REST_4GPRS(3, r21)	/* we haven't used ctr or xer */	mtspr	SRR1,r23	mtspr	SRR0,r22	REST_GPR(20, r21)	REST_2GPRS(22, r21)	lwz	r21,GPR21(r21)	SYNC	RFI/* * AltiVec unavailable trap from kernel - print a message, but let * the task use AltiVec in the kernel until it returns to user mode. */KernelAltiVec:	lwz	r3,_MSR(r1)	oris	r3,r3,MSR_VEC@h	stw	r3,_MSR(r1)	/* enable use of AltiVec after return */	lis	r3,87f@h	ori	r3,r3,87f@l	mr	r4,r2		/* current */	lwz	r5,_NIP(r1)	bl	printk	b	ret_from_except87:	.string	"AltiVec used in kernel  (task=%p, pc=%x)  \n"	.align	4/* * giveup_altivec(tsk) * Disable AltiVec for the task given as the argument, * and save the AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. */	.globl	giveup_altivecgiveup_altivec:	mfmsr	r5	oris	r5,r5,MSR_VEC@h	SYNC	mtmsr	r5			/* enable use of AltiVec now */	isync	cmpi	0,r3,0	beqlr-				/* if no previous owner, done */	addi	r3,r3,THREAD		/* want THREAD of task */	lwz	r5,PT_REGS(r3)	cmpi	0,r5,0	SAVE_32VR(0, r4, r3)	MFVSCR(vr0)	li	r4,THREAD_VSCR	STVX(vr0, r4, r3)	beq	1f	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r3,MSR_VEC@h	andc	r4,r4,r3		/* disable AltiVec for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP	li	r5,0	lis	r4,last_task_used_altivec@ha	stw	r5,last_task_used_altivec@l(r4)#endif /* CONFIG_SMP */	blr#endif /* CONFIG_ALTIVEC *//* * giveup_fpu(tsk) * Disable FP for the task given as the argument, * and save the floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. */	.globl	giveup_fpugiveup_fpu:	mfmsr	r5	ori	r5,r5,MSR_FP	SYNC_601	ISYNC_601	mtmsr	r5			/* enable use of fpu now */	SYNC_601	isync	cmpi	0,r3,0	beqlr-				/* if no previous owner, done */	addi	r3,r3,THREAD	        /* want THREAD of task */	lwz	r5,PT_REGS(r3)	cmpi	0,r5,0	SAVE_32FPRS(0, r3)	mffs	fr0	stfd	fr0,THREAD_FPSCR-4(r3)	beq	1f	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	li	r3,MSR_FP|MSR_FE0|MSR_FE1	andc	r4,r4,r3		/* disable FP for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP	li	r5,0	lis	r4,last_task_used_math@ha	stw	r5,last_task_used_math@l(r4)#endif /* CONFIG_SMP */	blr/* * This code is jumped to from the startup code to copy * the kernel image to physical address 0. */relocate_kernel:	addis	r9,r26,klimit@ha	/* fetch klimit */	lwz	r25,klimit@l(r9)	addis	r25,r25,-KERNELBASE@h	li	r3,0			/* Destination base address */	li	r6,0			/* Destination offset */	li	r5,0x4000		/* # bytes of memory to copy */	bl	copy_and_flush		/* copy the first 0x4000 bytes */	addi	r0,r3,4f@l		/* jump to the address of 4f */	mtctr	r0			/* in copy and do the rest. */	bctr				/* jump to the copy */4:	mr	r5,r25	bl	copy_and_flush		/* copy the rest */	b	turn_on_mmu/* * Copy routine used to copy the kernel to start at physical address 0 * and flush and invalidate the caches as needed. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. */copy_and_flush:	addi	r5,r5,-4	addi	r6,r6,-44:	li	r0,L1_CACHE_LINE_SIZE/4	mtctr	r03:	addi	r6,r6,4			/* copy a cache line */	lwzx	r0,r6,r4	stwx	r0,r6,r3	bdnz	3b	dcbst	r6,r3			/* write it to memory */	sync	icbi	r6,r3			/* flush the icache line */	cmplw	0,r6,r5	blt	4b	sync				/* additional sync needed on g4 */	isync	addi	r5,r5,4	addi	r6,r6,4	blr#ifdef CONFIG_APUS/* * On APUS the physical base address of the kernel is not known at compile * time, which means the __pa/__va constants used are incorrect. In the * __init section is recorded the virtual addresses of instructions using * these constants, so all that has to be done is fix these before * continuing the kernel boot. * * r4 = The physical address of the kernel base. */fix_mem_constants:	mr	r10,r4	addis	r10,r10,-KERNELBASE@h    /* virt_to_phys constant */	neg	r11,r10	                 /* phys_to_virt constant */	lis	r12,__vtop_table_begin@h	ori	r12,r12,__vtop_table_begin@l	add	r12,r12,r10	         /* table begin phys address */	lis	r13,__vtop_table_end@h	ori	r13,r13,__vtop_table_end@l	add	r13,r13,r10	         /* table end phys address */	subi	r12,r12,4	subi	r13,r13,41:	lwzu	r14,4(r12)               /* virt address of instruction */	add     r14,r14,r10              /* phys address of instruction */	lwz     r15,0(r14)               /* instruction, now insert top */	rlwimi  r15,r10,16,16,31         /* half of vp const in low half */	stw	r15,0(r14)               /* of instruction and restore. */	dcbst	r0,r14			 /* write it to memory */	sync	icbi	r0,r14			 /* flush the icache line */	cmpw	r12,r13	bne     1b	sync				/* additional sync needed on g4 */	isync/* * Map the memory where the exception handlers will * be copied to when hash constants have been patched. */#ifdef CONFIG_APUS_FAST_EXCEPT	lis	r8,0xfff0#else	lis	r8,0#endif	ori	r8,r8,0x2		/* 128KB, supervisor */	mtspr	DBAT3U,r8	mtspr	DBAT3L,r8	lis	r12,__ptov_table_begin@h	ori	r12,r12,__ptov_table_begin@l	add	r12,r12,r10	         /* table begin phys address */	lis	r13,__ptov_table_end@h	ori	r13,r13,__ptov_table_end@l	add	r13,r13,r10	         /* table end phys address */	subi	r12,r12,4	subi	r13,r13,41:	lwzu	r14,4(r12)               /* virt address of instruction */	add     r14,r14,r10              /* phys address of instruction */	lwz     r15,0(r14)               /* instruction, now insert top */	rlwimi  r15,r11,16,16,31         /* half of pv const in low half*/	stw	r15,0(r14)               /* of instruction and restore. */	dcbst	r0,r14			 /* write it to memory */	sync	icbi	r0,r14			 /* flush the icache line */	cmpw	r12,r13	bne     1b	sync				/* additional sync needed on g4 */	isync				/* No speculative loading until now */	blr/*********************************************************************** *  Please note that on APUS the exception handlers are located at the *  physical address 0xfff0000. For this reason, the exception handlers *  cannot use relative branches to access the code below. ***********************************************************************/#endif /* CONFIG_APUS */#ifdef CONFIG_SMP#ifdef CONFIG_GEMINI	.globl	__secondary_start_gemini__secondary_start_gemini:        mfspr   r4,HID0        ori     r4,r4,HID0_ICFI        li      r3,0        ori     r3,r3,HID0_ICE        andc    r4,r4,r3        mtspr   HID0,r4        sync        b       __secondary_start#endif /* CONFIG_GEMINI */	.globl	__secondary_start_psurge__secondary_start_psurge:	li	r24,1			/* cpu # */	b	__secondary_start_psurge99	.globl	__secondary_start_psurge2__secondary_start_psurge2:	li	r24,2			/* cpu # */	b	__secondary_start_psurge99	.globl	__secondary_start_psurge3__secondary_start_psurge3:	li	r24,3			/* cpu # */	b	__secondary_start_psurge99__secondary_start_psurge99:	/* we come in here with IR=0 and DR=1, and DBAT 0	   set to map the 0xf0000000 - 0xffffffff region */	mfmsr	r0	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */	SYNC	mtmsr	r0	isync	.globl	__secondary_start__secondary_start:#ifdef CONFIG_PPC64BRIDGE	mfmsr	r0	clrldi	r0,r0,1		/* make sure it's in 32-bit mode */	SYNC	MTMSRD(r0)	isync#endif	/* Copy some CPU settings from CPU 0 */	bl	__restore_cpu_setup	lis	r3,-KERNELBASE@h	mr	r4,r24	bl	identify_cpu	bl	call_setup_cpu		/* Call setup_cpu for this CPU */#ifdef CONFIG_6xx	lis	r3,-KERNELBASE@h	bl	init_idle_6xx#endif /* CONFIG_6xx */	/* get current */	lis	r2,current_set@h	ori	r2,r2,current_set@l	tophys(r2,r2)	slwi	r24,r24,2		/* get current_set[cpu#] */	lwzx	r2,r2,r24	/* stack */	addi	r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD	li	r0,0	tophys(r3,r1)	stw	r0,0(r3)	/* load up the MMU */	bl	load_up_mmu	/* ptr to phys current thread */	tophys(r4,r2)	addi	r4,r4,THREAD	/* phys address of our thread_struct */	CLR_TOP32(r4)	mtspr	SPRG3,r4	li	r3,0	mtspr	SPRG2,r3	/* 0 => r1 has kernel sp */	stw	r3,PT_REGS(r4)	/* set thread.regs to 0 for kernel thread */	/* enable MMU and jump to start_secondary */	li	r4,MSR_KERNEL	FIX_SRR1(r4,r5)	lis	r3,start_secondary@h	ori	r3,r3,start_secondary@l	mtspr	SRR0,r3	mtspr	SRR1,r4	SYNC	RFI#endif /* CONFIG_SMP *//* * Those generic dummy functions are kept for CPUs not * included in CONFIG_6xx */_GLOBAL(__setup_cpu_power3)	blr

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?