⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 head.s

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 S
📖 第 1 页 / 共 4 页
字号:
 * On SMP we know the AltiVec units are free, since we give it up every * switch.  -- Kumar */	mfmsr	r5	oris	r5,r5,MSR_VEC@h	mtmsr	r5			/* enable use of AltiVec now */	isync/* * For SMP, we don't do lazy AltiVec switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another.  Instead we call giveup_altivec in switch_to. */#ifndef CONFIG_SMP#ifndef CONFIG_APUS	lis	r6,-KERNELBASE@h#else	lis	r6,CYBERBASEp@h	lwz	r6,0(r6)#endif	addis	r3,r6,last_task_used_altivec@ha	lwz	r4,last_task_used_altivec@l(r3)	cmpi	0,r4,0	beq	1f	add	r4,r4,r6	addi	r4,r4,THREAD	/* want THREAD of last_task_used_altivec */	SAVE_32VR(0,r20,r4)	MFVSCR(vr0)	li	r20,THREAD_VSCR	STVX(vr0,r20,r4)	lwz	r5,PT_REGS(r4)	add	r5,r5,r6	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r20,MSR_VEC@h	andc	r4,r4,r20	/* disable altivec for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */	/* enable use of AltiVec after return */	oris	r23,r23,MSR_VEC@h	mfspr	r5,SPRG3		/* current task's THREAD (phys) */	li	r20,THREAD_VSCR	LVX(vr0,r20,r5)	MTVSCR(vr0)	REST_32VR(0,r20,r5)#ifndef CONFIG_SMP	subi	r4,r5,THREAD	sub	r4,r4,r6	stw	r4,last_task_used_altivec@l(r3)#endif /* CONFIG_SMP */	/* restore registers and return */	lwz	r3,_CCR(r21)	lwz	r4,_LINK(r21)	mtcrf	0xff,r3	mtlr	r4	REST_GPR(1, r21)	REST_4GPRS(3, r21)	/* we haven't used ctr or xer */	mtspr	SRR1,r23	mtspr	SRR0,r22	REST_GPR(20, r21)	REST_2GPRS(22, r21)	lwz	r21,GPR21(r21)	SYNC	RFI/* * AltiVec unavailable trap from kernel - print a message, but let * the task use AltiVec in the kernel until it returns to user mode. */KernelAltiVec:	lwz	r3,_MSR(r1)	oris	r3,r3,MSR_VEC@h	stw	r3,_MSR(r1)	/* enable use of AltiVec after return */	lis	r3,87f@h	ori	r3,r3,87f@l	mr	r4,r2		/* current */	lwz	r5,_NIP(r1)	bl	printk	b	ret_from_except87:	.string	"AltiVec used in kernel  (task=%p, pc=%x)  \n"	.align	4/* * giveup_altivec(tsk) * Disable AltiVec for the task given as the argument, * and save the AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. */	.globl	giveup_altivecgiveup_altivec:	mfmsr	r5	oris	r5,r5,MSR_VEC@h	SYNC	mtmsr	r5			/* enable use of AltiVec now */	isync	cmpi	0,r3,0	beqlr-				/* if no previous owner, done */	addi	r3,r3,THREAD		/* want THREAD of task */	lwz	r5,PT_REGS(r3)	cmpi	0,r5,0	SAVE_32VR(0, r4, r3)	MFVSCR(vr0)	li	r4,THREAD_VSCR	STVX(vr0, r4, r3)	beq	1f	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	lis	r3,MSR_VEC@h	andc	r4,r4,r3		/* disable AltiVec for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP	li	r5,0	lis	r4,last_task_used_altivec@ha	stw	r5,last_task_used_altivec@l(r4)#endif /* CONFIG_SMP */	blr#endif /* CONFIG_ALTIVEC */	/* * giveup_fpu(tsk) * Disable FP for the task given as the argument, * and save the floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. */	.globl	giveup_fpugiveup_fpu:	mfmsr	r5	ori	r5,r5,MSR_FP	SYNC	mtmsr	r5			/* enable use of fpu now */	SYNC	isync	cmpi	0,r3,0	beqlr-				/* if no previous owner, done */	addi	r3,r3,THREAD	        /* want THREAD of task */	lwz	r5,PT_REGS(r3)	cmpi	0,r5,0	SAVE_32FPRS(0, r3)	mffs	fr0	stfd	fr0,THREAD_FPSCR-4(r3)	beq	1f	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)	li	r3,MSR_FP|MSR_FE0|MSR_FE1	andc	r4,r4,r3		/* disable FP for previous task */	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP	li	r5,0	lis	r4,last_task_used_math@ha	stw	r5,last_task_used_math@l(r4)#endif /* CONFIG_SMP */	blr/* * This code is jumped to from the startup code to copy * the kernel image to physical address 0. */relocate_kernel:	addis	r9,r26,klimit@ha	/* fetch klimit */	lwz	r25,klimit@l(r9)	addis	r25,r25,-KERNELBASE@h	li	r3,0			/* Destination base address */	li	r6,0			/* Destination offset */	li	r5,0x4000		/* # bytes of memory to copy */	bl	copy_and_flush		/* copy the first 0x4000 bytes */	addi	r0,r3,4f@l		/* jump to the address of 4f */	mtctr	r0			/* in copy and do the rest. */	bctr				/* jump to the copy */4:	mr	r5,r25	bl	copy_and_flush		/* copy the rest */	b	turn_on_mmu/* * Copy routine used to copy the kernel to start at physical address 0 * and flush and invalidate the caches as needed. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. */copy_and_flush:	addi	r5,r5,-4	addi	r6,r6,-44:	li	r0,L1_CACHE_LINE_SIZE/4	mtctr	r03:	addi	r6,r6,4			/* copy a cache line */	lwzx	r0,r6,r4	stwx	r0,r6,r3	bdnz	3b	dcbst	r6,r3			/* write it to memory */	sync	icbi	r6,r3			/* flush the icache line */	cmplw	0,r6,r5	blt	4b	sync				/* additional sync needed on g4 */	isync	addi	r5,r5,4	addi	r6,r6,4	blr#ifdef CONFIG_APUS/* * On APUS the physical base address of the kernel is not known at compile * time, which means the __pa/__va constants used are incorrect. In the * __init section is recorded the virtual addresses of instructions using * these constants, so all that has to be done is fix these before * continuing the kernel boot. * * r4 = The physical address of the kernel base. */fix_mem_constants:	mr	r10,r4	addis	r10,r10,-KERNELBASE@h    /* virt_to_phys constant */	neg	r11,r10	                 /* phys_to_virt constant */	lis	r12,__vtop_table_begin@h	ori	r12,r12,__vtop_table_begin@l	add	r12,r12,r10	         /* table begin phys address */	lis	r13,__vtop_table_end@h	ori	r13,r13,__vtop_table_end@l	add	r13,r13,r10	         /* table end phys address */	subi	r12,r12,4	subi	r13,r13,41:	lwzu	r14,4(r12)               /* virt address of instruction */	add     r14,r14,r10              /* phys address of instruction */	lwz     r15,0(r14)               /* instruction, now insert top */	rlwimi  r15,r10,16,16,31         /* half of vp const in low half */	stw	r15,0(r14)               /* of instruction and restore. */	dcbst	r0,r14			 /* write it to memory */	sync	icbi	r0,r14			 /* flush the icache line */	cmpw	r12,r13	bne     1b	sync				/* additional sync needed on g4 */	isync/* * Map the memory where the exception handlers will * be copied to when hash constants have been patched.   */#ifdef CONFIG_APUS_FAST_EXCEPT	lis	r8,0xfff0#else	lis	r8,0#endif	ori	r8,r8,0x2		/* 128KB, supervisor */	mtspr	DBAT3U,r8	mtspr	DBAT3L,r8	lis	r12,__ptov_table_begin@h	ori	r12,r12,__ptov_table_begin@l	add	r12,r12,r10	         /* table begin phys address */	lis	r13,__ptov_table_end@h	ori	r13,r13,__ptov_table_end@l	add	r13,r13,r10	         /* table end phys address */	subi	r12,r12,4	subi	r13,r13,41:	lwzu	r14,4(r12)               /* virt address of instruction */	add     r14,r14,r10              /* phys address of instruction */	lwz     r15,0(r14)               /* instruction, now insert top */	rlwimi  r15,r11,16,16,31         /* half of pv const in low half*/	stw	r15,0(r14)               /* of instruction and restore. */	dcbst	r0,r14			 /* write it to memory */	sync	icbi	r0,r14			 /* flush the icache line */	cmpw	r12,r13	bne     1b	sync				/* additional sync needed on g4 */	isync				/* No speculative loading until now */	blr	apus_interrupt_entry:	/* This is horrible, but there's no way around it. Enable the	 * data cache so the IRQ hardware register can be accessed	 * without cache intervention. Then disable interrupts and get	 * the current emulated m68k IPL value. 	 */		mfmsr	20	xori	r20,r20,MSR_DR	SYNC	mtmsr	r20	isync	lis	r4,APUS_IPL_EMU@h	li	r20,(IPLEMU_SETRESET|IPLEMU_DISABLEINT)	stb	r20,APUS_IPL_EMU@l(r4)	eieio	lbz	r3,APUS_IPL_EMU@l(r4)	li	r2,IPLEMU_IPLMASK	rlwinm. r20,r3,32-3,29,31	bne	2f	mr	r20,r2		/* lvl7! Need to reset state machine. */	b	3f2:	cmp	0,r20,r2	beq	1f3:	eieio	stb     r2,APUS_IPL_EMU@l(r4)	ori	r20,r20,IPLEMU_SETRESET	eieio	stb     r20,APUS_IPL_EMU@l(r4)1:	eieio	li	r20,IPLEMU_DISABLEINT	stb	r20,APUS_IPL_EMU@l(r4)	/* At this point we could do some magic to avoid the overhead	 * of calling the C interrupt handler in case of a spurious	 * interrupt. Could not get a simple hack to work though.	 */		mfmsr	r20	xori	r20,r20,MSR_DR	SYNC	mtmsr	r20	isync	stw	r3,(_CCR+4)(r21);	addi	r3,r1,STACK_FRAME_OVERHEAD;	li	r20,MSR_KERNEL;	bl	transfer_to_handler;	.long	do_IRQ;	.long	ret_from_except/*********************************************************************** *  Please note that on APUS the exception handlers are located at the *  physical address 0xfff0000. For this reason, the exception handlers *  cannot use relative branches to access the code below. ***********************************************************************/#endif /* CONFIG_APUS */#ifdef CONFIG_SMP#ifdef CONFIG_GEMINI	.globl	__secondary_start_gemini__secondary_start_gemini:        mfspr   r4,HID0        ori     r4,r4,HID0_ICFI        li      r3,0        ori     r3,r3,HID0_ICE        andc    r4,r4,r3        mtspr   HID0,r4        sync        bl      prom_init        b       __secondary_start#endif /* CONFIG_GEMINI */		.globl	__secondary_start_psurge__secondary_start_psurge:	li	r24,1			/* cpu # */	b	__secondary_start_psurge99	.globl	__secondary_start_psurge2__secondary_start_psurge2:	li	r24,2			/* cpu # */	b	__secondary_start_psurge99	.globl	__secondary_start_psurge3__secondary_start_psurge3:	li	r24,3			/* cpu # */	b	__secondary_start_psurge99__secondary_start_psurge99:	/* we come in here with IR=0 and DR=1, and DBAT 0	   set to map the 0xf0000000 - 0xffffffff region */	mfmsr	r0	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */	SYNC	mtmsr	r0	isync	.globl	__secondary_start__secondary_start:#ifdef CONFIG_PPC64BRIDGE	mfmsr	r0	clrldi	r0,r0,1		/* make sure it's in 32-bit mode */	SYNC	MTMSRD(r0)	isync#endif	lis	r3,-KERNELBASE@h	mr	r4,r24	bl	identify_cpu	bl	call_setup_cpu		/* Call setup_cpu for this CPU */	/* get current */	lis	r2,current_set@h	ori	r2,r2,current_set@l	tophys(r2,r2)	slwi	r24,r24,2		/* get current_set[cpu#] */	lwzx	r2,r2,r24	/* stack */	addi	r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD	li	r0,0	tophys(r3,r1)	stw	r0,0(r3)	/* load up the MMU */	bl	load_up_mmu	/* ptr to phys current thread */	tophys(r4,r2)	addi	r4,r4,THREAD	/* phys address of our thread_struct */	CLR_TOP32(r4)	mtspr	SPRG3,r4	li	r3,0	mtspr	SPRG2,r3	/* 0 => r1 has kernel sp */	stw	r3,PT_REGS(r4)	/* set thread.regs to 0 for kernel thread */	/* enable MMU and jump to start_secondary */	li	r4,MSR_KERNEL	lis	r3,start_secondary@h	ori	r3,r3,start_secondary@l	mtspr	SRR0,r3	mtspr	SRR1,r4	SYNC	RFI#endif /* CONFIG_SMP *//* * Enable caches and 604-specific features if necessary. */_GLOBAL(__setup_cpu_601)	blr_GLOBAL(__setup_cpu_603)	b	setup_common_caches_GLOBAL(__setup_cpu_604)	mflr	r4	bl	setup_common_caches	bl	setup_604_hid0	mtlr	r4	blr_GLOBAL(__setup_cpu_750)	mflr	r4	bl	setup_common_caches	bl	setup_750_7400_hid0	mtlr	r4	blr_GLOBAL(__setup_cpu_7400)	mflr	r4	bl	setup_common_caches	bl	setup_750_7400_hid0	mtlr	r4	blr_GLOBAL(__setup_cpu_7410)	mflr	r4	bl	setup_common_caches	bl	setup_750_7400_hid0	li	r3,0	mtspr	SPRN_L2CR2,r3	mtlr	r4

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -