📄 head.s
字号:
#else rldimi r22,r20,15,48 /* Insert desired EE value */#endif mtmsrd r22 blr/* * Kernel profiling with soft disable on iSeries */do_profile: ld r22,8(r21) /* Get SRR1 */ andi. r22,r22,MSR_PR /* Test if in kernel */ bnelr /* return if not in kernel */ ld r22,0(r21) /* Get SRR0 */ ld r25,PACAPROFSTEXT(r20) /* _stext */ subf r22,r25,r22 /* offset into kernel */ lwz r25,PACAPROFSHIFT(r20) srd r22,r22,r25 lwz r25,PACAPROFLEN(r20) /* length of profile table (-1) */ cmp 0,r22,r25 /* off end? */ ble 1f mr r22,r25 /* force into last entry */1: sldi r22,r22,2 /* convert to offset into buffer */ ld r25,PACAPROFBUFFER(r20) /* profile buffer */ add r25,r25,r222: lwarx r22,0,r25 /* atomically increment */ addi r22,r22,1 stwcx. r22,0,r25 bne- 2b blr/* * On pSeries, secondary processors spin in the following code. * At entry, r3 = this processor's number (in Linux terms, not hardware). */_GLOBAL(pseries_secondary_smp_init) /* turn on 64-bit mode */ bl .enable_64b_mode isync /* Set up a paca value for this processor. */ LOADADDR(r24, paca) /* Get base vaddr of Paca array */ mulli r13,r3,PACA_SIZE /* Calculate vaddr of right Paca */ add r13,r13,r24 /* for this processor. */ mtspr SPRG3,r13 /* Save vaddr of Paca in SPRG3 */ mr r24,r3 /* __secondary_start needs cpu# */1: HMT_LOW lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ /* start. */ sync /* Create a temp kernel stack for use before relocation is on. */ mr r1,r13 addi r1,r1,PACAGUARD addi r1,r1,0x1000 subi r1,r1,STACK_FRAME_OVERHEAD cmpi 0,r23,0#ifdef CONFIG_SMP#ifdef SECONDARY_PROCESSORS bne .__secondary_start#endif#endif b 1b /* Loop until told to go */_GLOBAL(__start_initialization_iSeries) LOADADDR(r1,init_task_union) addi r1,r1,TASK_UNION_SIZE li r0,0 stdu r0,-STACK_FRAME_OVERHEAD(r1) LOADADDR(r3,cpu_specs) LOADADDR(r4,cur_cpu_spec) li r5,0 bl .identify_cpu LOADADDR(r2,__toc_start) addi r2,r2,0x4000 addi r2,r2,0x4000 LOADADDR(r9,systemcfg) SET_REG_TO_CONST(r4, KERNELBASE+0x5000) std r4,0(r9) /* set the systemcfg pointer */ LOADADDR(r9,naca) SET_REG_TO_CONST(r4, KERNELBASE+0x4000) std r4,0(r9) /* set the naca pointer */#if 1 /* DRENG:PPPBBB:TIA This looks like dead code to me -Peter */ /* Get the pointer to the segment table */ ld r6,PACA(r4) /* Get the base paca pointer */ ld r4,PACASTABVIRT(r6)#endif bl .iSeries_fixup_klimit b .start_here_common_GLOBAL(__start_initialization_pSeries) mr r31,r3 /* save parameters */ mr r30,r4 mr r29,r5 mr r28,r6 mr r27,r7 mr r26,r8 /* YABOOT: debug_print() routine */ mr r25,r9 /* YABOOT: debug_delay() routine */ mr r24,r10 /* YABOOT: debug_prom() routine */ bl .enable_64b_mode /* put a relocation offset into r3 */ bl .reloc_offset LOADADDR(r2,__toc_start) addi r2,r2,0x4000 addi r2,r2,0x4000 /* Relocate the TOC from a virt addr to a real addr */ sub r2,r2,r3 /* setup the systemcfg pointer which is needed by prom_init */ LOADADDR(r9,systemcfg) sub r9,r9,r3 /* addr of the variable systemcfg */ SET_REG_TO_CONST(r4, KERNELBASE+0x5000) sub r4,r4,r3 std r4,0(r9) /* set the value of systemcfg */ /* setup the naca pointer which is needed by prom_init */ LOADADDR(r9,naca) sub r9,r9,r3 /* addr of the variable naca */ SET_REG_TO_CONST(r4, KERNELBASE+0x4000) sub r4,r4,r3 std r4,0(r9) /* set the value of naca */ /* DRENG / PPPBBB Fix the following comment!!! -Peter */ /* The following copies the first 0x100 bytes of code from the */ /* load addr to physical addr 0x0. This code causes secondary */ /* processors to spin until a flag in the PACA is set. This */ /* is done at this time rather than with the entire kernel */ /* relocation which is done below because we need to cause the */ /* processors to spin on code that is not going to move while OF */ /* is still alive. Although the spin code is not actually run on */ /* a uniprocessor, we always do this copy. */ SET_REG_TO_CONST(r4, KERNELBASE)/* Src addr */ sub r4,r4,r3 /* current address of __start */ /* the source addr */ li r3,0 /* Dest addr */ li r5,0x100 /* # bytes of memory to copy */ li r6,0 /* Destination offset */ bl .copy_and_flush /* copy the first 0x100 bytes */ mr r3,r31 mr r4,r30 mr r5,r29 mr r6,r28 mr r7,r27 mr r8,r26 mr r9,r25 mr r10,r24 bl .prom_init li r24,0 /* cpu # *//* * At this point, r3 contains the physical address we are running at, * returned by prom_init() */_STATIC(__after_prom_start)/* * We need to run with __start at physical address 0. * This will leave some code in the first 256B of * real memory, which are reserved for software use. * The remainder of the first page is loaded with the fixed * interrupt vectors. The next two pages are filled with * unknown exception placeholders. * * Note: This process overwrites the OF exception vectors. * r26 == relocation offset * r27 == KERNELBASE */ bl .reloc_offset mr r26,r3 SET_REG_TO_CONST(r27,KERNELBASE) li r3,0 /* target addr */ sub r4,r27,r26 /* source addr */ /* current address of _start */ /* i.e. where we are running */ /* the source addr */ LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */ sub r5,r5,r27 li r6,0x100 /* Start offset, the first 0x100 */ /* bytes were copied earlier. */ bl .copy_and_flush /* copy the first n bytes */ /* this includes the code being */ /* executed here. */ LOADADDR(r0, 4f) /* Jump to the copy of this code */ mtctr r0 /* that we just made/relocated */ bctr4: LOADADDR(r5,klimit) sub r5,r5,r26 ld r5,0(r5) /* get the value of klimit */ sub r5,r5,r27 bl .copy_and_flush /* copy the rest */ b .start_here_pSeries/* * Copy routine used to copy the kernel to start at physical address 0 * and flush and invalidate the caches as needed. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. * * Note: this routine *only* clobbers r0, r6 and lr */_STATIC(copy_and_flush) addi r5,r5,-8 addi r6,r6,-84: li r0,16 /* Use the least common */ /* denominator cache line */ /* size. This results in */ /* extra cache line flushes */ /* but operation is correct. */ /* Can't get cache line size */ /* from NACA as it is being */ /* moved too. */ mtctr r0 /* put # words/line in ctr */3: addi r6,r6,8 /* copy a cache line */ ldx r0,r6,r4 stdx r0,r6,r3 bdnz 3b dcbst r6,r3 /* write it to memory */ sync icbi r6,r3 /* flush the icache line */ cmpld 0,r6,r5 blt 4b sync addi r5,r5,8 addi r6,r6,8 blr.align 8copy_to_here:/* * load_up_fpu(unused, unused, tsk) * Disable FP for the task which had the FPU previously, * and save its floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. * On SMP we know the fpu is free, since we give it up every * switch (ie, no lazy save of the FP registers). * On entry: r13 == 'current' && last_task_used_math != 'current' */_STATIC(load_up_fpu) mfmsr r5 /* grab the current MSR */ ori r5,r5,MSR_FP mtmsrd r5 /* enable use of fpu now */ isync/* * For SMP, we don't do lazy FPU switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another. Instead we call giveup_fpu in switch_to. * */#ifndef CONFIG_SMP LOADBASE(r3,last_task_used_math) ld r4,last_task_used_math@l(r3) cmpi 0,r4,0 beq 1f /* Save FP state to last_task_used_math's THREAD struct */ addi r4,r4,THREAD SAVE_32FPRS(0, r4) mffs fr0 stfd fr0,THREAD_FPSCR(r4) /* Disable FP for last_task_used_math */ ld r5,PT_REGS(r4) ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r20,MSR_FP|MSR_FE0|MSR_FE1 andc r4,r4,r20 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */ /* enable use of FP after return */ ld r4,PACACURRENT(r13) addi r5,r4,THREAD /* Get THREAD */ ld r20,THREAD_FPEXC_MODE(r5) ori r23,r23,MSR_FP or r23,r23,r20 lfd fr0,THREAD_FPSCR(r5) mtfsf 0xff,fr0 REST_32FPRS(0, r5)#ifndef CONFIG_SMP /* Update last_task_used_math to 'current' */ std r4,last_task_used_math@l(r3)#endif /* CONFIG_SMP */ /* restore registers and return */ b fast_exception_return/* * disable_kernel_fp() * Disable the FPU. */_GLOBAL(disable_kernel_fp) mfmsr r3 rldicl r0,r3,(63-MSR_FP_LG),1 rldicl r3,r0,(MSR_FP_LG+1),0 mtmsrd r3 /* disable use of fpu now */ isync blr/* * giveup_fpu(tsk) * Disable FP for the task given as the argument, * and save the floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. */_GLOBAL(giveup_fpu) mfmsr r5 ori r5,r5,MSR_FP mtmsrd r5 /* enable use of fpu now */ isync cmpi 0,r3,0 beqlr- /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ ld r5,PT_REGS(r3) cmpi 0,r5,0 SAVE_32FPRS(0, r3) mffs fr0 stfd fr0,THREAD_FPSCR(r3)#if 0 /* PPPBBB: enable code below if we run with FE0/1 = 0,0 as default */ clrrdi r4,r13,60 /* r4 <- 0xC000000000000000 */ lfd fr0,__zero@l(r4) mtfsf 0xff,fr0#endif beq 1f ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r3,MSR_FP|MSR_FE0|MSR_FE1 andc r4,r4,r3 /* disable FP for previous task */ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP li r5,0 LOADBASE(r4,last_task_used_math) std r5,last_task_used_math@l(r4)#endif /* CONFIG_SMP */ blr#ifdef CONFIG_ALTIVEC/* * load_up_altivec(unused, unused, tsk) * Disable Altivec for the task which used altivec upreviously, * and save its altivec registers in its thread_struct. * Enables Altivec for use in the kernel on return. * On SMP we know the fpu is free, since we give it up every * switch (ie, no lazy save of the altivec registers). * On entry: r13 == 'current' && last_task_used_altivec != 'current' */_STATIC(load_up_altivec)/* * Disable AltiVec for the task which had AltiVec previously, * and save its AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. * On SMP we know the AltiVec units are free, since we give it up every * switch. -- Kumar */ mfmsr r5 oris r5,r5,MSR_VEC@h mtmsrd r5 /* enable use of AltiVec now */ isync/* * For SMP, we don't do lazy AltiVec switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another. Instead we call giveup_altivec in switch_to. */#ifndef CONFIG_SMP LOADBASE(r3,last_task_used_altivec) ld r4,last_task_used_altivec@l(r3) cmpi 0,r4,0 beq 1f addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ SAVE_32VR(0,r20,r4) MFVSCR(vr0) li r20,THREAD_VSCR STVX(vr0,r20,r4) ld r5,PT_REGS(r4) ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) lis r20,MSR_VEC@h andc r4,r4,r20 /* disable altivec for previous task */ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */ /* enable use of AltiVec after return */ ld r4,PACACURRENT(r13) addi r5,r4,THREAD oris r23,r23,MSR_VEC@h li r20,THREAD_VSCR LVX(vr0,r20,r5) MTVSCR(vr0) REST_32VR(0,r20,r5)#ifndef CONFIG_SMP /* Update last_task_used_altivec to 'current' */ std r4,last_task_used_altivec@l(r3)#endif /* CONFIG_SMP */ /* restore registers and return */ b fast_exception_return/* * giveup_altivec(tsk) * Disable AltiVec for the task given as the argument, * and save the AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. */_GLOBAL(giveup_altivec) mfmsr r5 oris r5,r5,MSR_VEC@h mtmsrd r5 /* enable use of AltiVec now */ isync cmpi 0,r3,0 beqlr- /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ ld r5,PT_REGS(r3) cmpi 0,r5,0 SAVE_32VR(0, r4, r3) MFVSCR(vr0) li r4,THREAD_VSCR STVX(vr0, r4, r3) beq 1f ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) lis r3,MSR_VEC@h andc r4,r4,r3 /* disable AltiVec for previous task */ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP li r5,0 LOADBASE(r4,last_task_used_altivec) std r5,last_task_used_altivec@l(r4)#endif /* CONFIG_SMP */ blr#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SMP/* * This function is called after the master CPU has released the * secondary processors. The execution environment is relocation off. * The paca for this processor has the following fields initialized at * this point:
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -