📄 start.s
字号:
mtspr SRR1,r20 mtlr r23 SYNC rfi /* jump to handler, enable MMU */int_return: mfmsr r28 /* Disable interrupts */ li r4,0 ori r4,r4,MSR_EE andc r28,r28,r4 SYNC /* Some chip revs need this... */ mtmsr r28 SYNC lwz r2,_CTR(r1) lwz r0,_LINK(r1) mtctr r2 mtlr r0 lwz r2,_XER(r1) lwz r0,_CCR(r1) mtspr XER,r2 mtcrf 0xFF,r0 REST_10GPRS(3, r1) REST_10GPRS(13, r1) REST_8GPRS(23, r1) REST_GPR(31, r1) lwz r2,_NIP(r1) /* Restore environment */ lwz r0,_MSR(r1) mtspr SRR0,r2 mtspr SRR1,r0 lwz r0,GPR0(r1) lwz r2,GPR2(r1) lwz r1,GPR1(r1) SYNC rfi .globl dc_readdc_read: blr .globl get_pvrget_pvr: mfspr r3, PVR blr .globl get_svrget_svr: mfspr r3, SVR blr/* * Function: in8 * Description: Input 8 bits */ .globl in8in8: lbz r3,0x0000(r3) blr/* * Function: out8 * Description: Output 8 bits */ .globl out8out8: stb r4,0x0000(r3) blr/* * Function: out16 * Description: Output 16 bits */ .globl out16out16: sth r4,0x0000(r3) blr/* * Function: out16r * Description: Byte reverse and output 16 bits */ .globl out16rout16r: sthbrx r4,r0,r3 blr/* * Function: out32 * Description: Output 32 bits */ .globl out32out32: stw r4,0x0000(r3) blr/* * Function: out32r * Description: Byte reverse and output 32 bits */ .globl out32rout32r: stwbrx r4,r0,r3 blr/* * Function: in16 * Description: Input 16 bits */ .globl in16in16: lhz r3,0x0000(r3) blr/* * Function: in16r * Description: Input 16 bits and byte reverse */ .globl in16rin16r: lhbrx r3,r0,r3 blr/* * Function: in32 * Description: Input 32 bits */ .globl in32in32: lwz 3,0x0000(3) blr/* * Function: in32r * Description: Input 32 bits and byte reverse */ .globl in32rin32r: lwbrx r3,r0,r3 blr/* * Function: ppcDcbf * Description: Data Cache block flush * Input: r3 = effective address * Output: none. */ .globl ppcDcbfppcDcbf: dcbf r0,r3 blr/* * Function: ppcDcbi * Description: Data Cache block Invalidate * Input: r3 = effective address * Output: none. */ .globl ppcDcbippcDcbi: dcbi r0,r3 blr/* * Function: ppcDcbz * Description: Data Cache block zero. * Input: r3 = effective address * Output: none. */ .globl ppcDcbzppcDcbz: dcbz r0,r3 blr/* * Function: ppcSync * Description: Processor Synchronize * Input: none. * Output: none. */ .globl ppcSyncppcSync: sync blr/* * void relocate_code (addr_sp, gd, addr_moni) * * This "function" does not return, instead it continues in RAM * after relocating the monitor code. * * r3 = dest * r4 = src * r5 = length in bytes * r6 = cachelinesize */ .globl relocate_coderelocate_code: mr r1, r3 /* Set new stack pointer */ mr r9, r4 /* Save copy of Global Data pointer */ mr r29, r9 /* Save for DECLARE_GLOBAL_DATA_PTR */ mr r10, r5 /* Save copy of Destination Address */ mr r3, r5 /* Destination Address */ lis r4, CFG_MONITOR_BASE@h /* Source Address */ ori r4, r4, CFG_MONITOR_BASE@l lwz r5, GOT(__init_end) sub r5, r5, r4 li r6, CFG_CACHELINE_SIZE /* Cache Line Size */ /* * Fix GOT pointer: * * New GOT-PTR = (old GOT-PTR - CFG_MONITOR_BASE) + Destination Address * * Offset: */ sub r15, r10, r4 /* First our own GOT */ add r14, r14, r15 /* then the one used by the C code */ add r30, r30, r15 /* * Now relocate code */#ifdef CONFIG_ECC bl board_relocate_rom sync mr r3, r10 /* Destination Address */ lis r4, CFG_MONITOR_BASE@h /* Source Address */ ori r4, r4, CFG_MONITOR_BASE@l lwz r5, GOT(__init_end) sub r5, r5, r4 li r6, CFG_CACHELINE_SIZE /* Cache Line Size */#else cmplw cr1,r3,r4 addi r0,r5,3 srwi. r0,r0,2 beq cr1,4f /* In place copy is not necessary */ beq 7f /* Protect against 0 count */ mtctr r0 bge cr1,2f la r8,-4(r4) la r7,-4(r3)1: lwzu r0,4(r8) stwu r0,4(r7) bdnz 1b b 4f2: slwi r0,r0,2 add r8,r4,r0 add r7,r3,r03: lwzu r0,-4(r8) stwu r0,-4(r7) bdnz 3b#endif/* * Now flush the cache: note that we must start from a cache aligned * address. Otherwise we might miss one cache line. */4: cmpwi r6,0 add r5,r3,r5 beq 7f /* Always flush prefetch queue in any case */ subi r0,r6,1 andc r3,r3,r0 mr r4,r35: dcbst 0,r4 add r4,r4,r6 cmplw r4,r5 blt 5b sync /* Wait for all dcbst to complete on bus */ mr r4,r36: icbi 0,r4 add r4,r4,r6 cmplw r4,r5 blt 6b7: sync /* Wait for all icbi to complete on bus */ isync/* * We are done. Do not return, instead branch to second part of board * initialization, now running from RAM. */ addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET mtlr r0 blrin_ram:#ifdef CONFIG_ECC bl board_init_ecc#endif /* * Relocation Function, r14 point to got2+0x8000 * * Adjust got2 pointers, no need to check for 0, this code * already puts a few entries in the table. */ li r0,__got2_entries@sectoff@l la r3,GOT(_GOT2_TABLE_) lwz r11,GOT(_GOT2_TABLE_) mtctr r0 sub r11,r3,r11 addi r3,r3,-41: lwzu r0,4(r3) add r0,r0,r11 stw r0,0(r3) bdnz 1b /* * Now adjust the fixups and the pointers to the fixups * in case we need to move ourselves again. */2: li r0,__fixup_entries@sectoff@l lwz r3,GOT(_FIXUP_TABLE_) cmpwi r0,0 mtctr r0 addi r3,r3,-4 beq 4f3: lwzu r4,4(r3) lwzux r0,r4,r11 add r0,r0,r11 stw r10,0(r3) stw r0,0(r4) bdnz 3b4:/* clear_bss: */ /* * Now clear BSS segment */ lwz r3,GOT(__bss_start) lwz r4,GOT(_end) cmplw 0, r3, r4 beq 6f li r0, 05: stw r0, 0(r3) addi r3, r3, 4 cmplw 0, r3, r4 bne 5b6: mr r3, r9 /* Init Date pointer */ mr r4, r10 /* Destination Address */ bl board_init_r /* not reached - end relocate_code *//*-----------------------------------------------------------------------*/ /* * Copy exception vector code to low memory * * r3: dest_addr * r7: source address, r8: end address, r9: target address */ .globl trap_inittrap_init: lwz r7, GOT(_start) lwz r8, GOT(_end_of_vectors) li r9, 0x100 /* reset vector always at 0x100 */ cmplw 0, r7, r8 bgelr /* return if r7>=r8 - just in case */ mflr r4 /* save link register */1: lwz r0, 0(r7) stw r0, 0(r9) addi r7, r7, 4 addi r9, r9, 4 cmplw 0, r7, r8 bne 1b /* * relocate `hdlr' and `int_return' entries */ li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET li r8, Alignment - _start + EXC_OFF_SYS_RESET2: bl trap_reloc addi r7, r7, 0x100 /* next exception vector */ cmplw 0, r7, r8 blt 2b li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET bl trap_reloc li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET bl trap_reloc li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET li r8, SystemCall - _start + EXC_OFF_SYS_RESET3: bl trap_reloc addi r7, r7, 0x100 /* next exception vector */ cmplw 0, r7, r8 blt 3b li r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET4: bl trap_reloc addi r7, r7, 0x100 /* next exception vector */ cmplw 0, r7, r8 blt 4b /* enable execptions from RAM vectors */ mfmsr r7 li r8,MSR_IP andc r7,r7,r8 mtmsr r7 mtlr r4 /* restore link register */ blr /* * Function: relocate entries for one exception vector */trap_reloc: lwz r0, 0(r7) /* hdlr ... */ add r0, r0, r3 /* ... += dest_addr */ stw r0, 0(r7) lwz r0, 4(r7) /* int_return ... */ add r0, r0, r3 /* ... += dest_addr */ stw r0, 4(r7) sync isync blr.globl enable_ext_addrenable_ext_addr: mfspr r0, HID0 lis r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@h ori r0, r0, (HID0_HIGH_BAT_EN | HID0_XBSEN | HID0_XAEN)@l mtspr HID0, r0 sync isync blr#if (CFG_CCSRBAR_DEFAULT != CFG_CCSRBAR).globl setup_ccsrbarsetup_ccsrbar: /* Special sequence needed to update CCSRBAR itself */ lis r4, CFG_CCSRBAR_DEFAULT@h ori r4, r4, CFG_CCSRBAR_DEFAULT@l lis r5, CFG_CCSRBAR@h ori r5, r5, CFG_CCSRBAR@l srwi r6,r5,12 stw r6, 0(r4) isync lis r5, 0xffff ori r5,r5,0xf000 lwz r5, 0(r5) isync lis r3, CFG_CCSRBAR@h lwz r5, CFG_CCSRBAR@l(r3) isync blr#endif#ifdef CFG_INIT_RAM_LOCKlock_ram_in_cache: /* Allocate Initial RAM in data cache. */ lis r3, (CFG_INIT_RAM_ADDR & ~31)@h ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l li r2, ((CFG_INIT_RAM_END & ~31) + \ (CFG_INIT_RAM_ADDR & 31) + 31) / 32 mtctr r21: dcbz r0, r3 addi r3, r3, 32 bdnz 1b#if 1/* Lock the data cache */ mfspr r0, HID0 ori r0, r0, 0x1000 sync mtspr HID0, r0 sync blr#endif#if 0 /* Lock the first way of the data cache */ mfspr r0, LDSTCR ori r0, r0, 0x0080#if defined(CONFIG_ALTIVEC) dssall#endif sync mtspr LDSTCR, r0 sync isync blr#endif.globl unlock_ram_in_cacheunlock_ram_in_cache: /* invalidate the INIT_RAM section */ lis r3, (CFG_INIT_RAM_ADDR & ~31)@h ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l li r2, ((CFG_INIT_RAM_END & ~31) + \ (CFG_INIT_RAM_ADDR & 31) + 31) / 32 mtctr r21: icbi r0, r3 addi r3, r3, 32 bdnz 1b sync /* Wait for all icbi to complete on bus */ isync#if 1/* Unlock the data cache and invalidate it */ mfspr r0, HID0 li r3,0x1000 andc r0,r0,r3 li r3,0x0400 or r0,r0,r3 sync mtspr HID0, r0 sync blr#endif#if 0 /* Unlock the first way of the data cache */ mfspr r0, LDSTCR li r3,0x0080 andc r0,r0,r3#ifdef CONFIG_ALTIVEC dssall#endif sync mtspr LDSTCR, r0 sync isync li r3,0x0400 or r0,r0,r3 sync mtspr HID0, r0 sync blr#endif#endif/* If this is a multi-cpu system then we need to handle the * 2nd cpu. The assumption is that the 2nd cpu is being * held in boot holdoff mode until the 1st cpu unlocks it * from Linux. We'll do some basic cpu init and then pass * it to the Linux Reset Vector. * Sri: Much of this initialization is not required. Linux * rewrites the bats, and the sprs and also enables the L1 cache. */#if (CONFIG_NUM_CPUS > 1).globl secondary_cpu_setupsecondary_cpu_setup: /* Do only core setup on all cores except cpu0 */ bl invalidate_bats sync bl enable_ext_addr#ifdef CFG_L2 /* init the L2 cache */ addis r3, r0, L2_INIT@h ori r3, r3, L2_INIT@l sync mtspr l2cr, r3#ifdef CONFIG_ALTIVEC dssall#endif /* invalidate the L2 cache */ bl l2cache_invalidate sync#endif /* enable and invalidate the data cache */ bl dcache_enable sync /* enable and invalidate the instruction cache*/ bl icache_enable sync /* TBEN in HID0 */ mfspr r4, HID0 oris r4, r4, 0x0400 mtspr HID0, r4 sync isync /*SYNCBE|ABE in HID1*/ mfspr r4, HID1 ori r4, r4, 0x0C00 mtspr HID1, r4 sync isync lis r3, CONFIG_LINUX_RESET_VEC@h ori r3, r3, CONFIG_LINUX_RESET_VEC@l mtlr r3 blr /* Never Returns, Running in Linux Now */#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -