📄 head.s
字号:
/* * linux/arch/arm/boot/compressed/head.S * * Copyright (C) 1996-2002 Russell King * Copyright (C) 2004 Hyok S. Choi (MPU support) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */#include <linux/linkage.h>/* * Debugging stuff * * Note that these macros must not contain any code which is not * 100% relocatable. Any attempt to do so will result in a crash. * Please select one of the following when turning on debugging. */#ifdef DEBUG#if defined(CONFIG_DEBUG_ICEDCC)#ifdef CONFIG_CPU_V6 .macro loadsp, rb .endm .macro writeb, ch, rb mcr p14, 0, \ch, c0, c5, 0 .endm#else .macro loadsp, rb .endm .macro writeb, ch, rb mcr p14, 0, \ch, c1, c0, 0 .endm#endif#else#include <asm/arch/debug-macro.S> .macro writeb, ch, rb senduart \ch, \rb .endm#if defined(CONFIG_ARCH_SA1100) .macro loadsp, rb mov \rb, #0x80000000 @ physical base address#ifdef CONFIG_DEBUG_LL_SER3 add \rb, \rb, #0x00050000 @ Ser3#else add \rb, \rb, #0x00010000 @ Ser1#endif .endm#elif defined(CONFIG_ARCH_S3C2410) .macro loadsp, rb mov \rb, #0x50000000 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT .endm#else .macro loadsp, rb addruart \rb .endm#endif#endif#endif .macro kputc,val mov r0, \val bl putc .endm .macro kphex,val,len mov r0, \val mov r1, #\len bl phex .endm .macro debug_reloc_start#ifdef DEBUG kputc #'\n' kphex r6, 8 /* processor id */ kputc #':' kphex r7, 8 /* architecture id */#ifdef CONFIG_CPU_CP15 kputc #':' mrc p15, 0, r0, c1, c0 kphex r0, 8 /* control reg */#endif kputc #'\n' kphex r5, 8 /* decompressed kernel start */ kputc #'-' kphex r9, 8 /* decompressed kernel end */ kputc #'>' kphex r4, 8 /* kernel execution address */ kputc #'\n'#endif .endm .macro debug_reloc_end#ifdef DEBUG kphex r5, 8 /* end of kernel */ kputc #'\n' mov r0, r4 bl memdump /* dump 256 bytes at start of kernel */#endif .endm .section ".start", #alloc, #execinstr/* * sort out different calling conventions */ .alignstart: .type start,#function .rept 8 mov r0, r0 .endr b 1f .word 0x016f2818 @ Magic numbers to help the loader .word start @ absolute load/run zImage address .word _edata @ zImage end address1: mov r7, r1 @ save architecture ID mov r8, r2 @ save atags pointer#ifndef __ARM_ARCH_2__ /* * Booting from Angel - need to enter SVC mode and disable * FIQs/IRQs (numeric definitions from angel arm.h source). * We only do this if we were in user mode on entry. */ mrs r2, cpsr @ get current mode tst r2, #3 @ not user? bne not_angel mov r0, #0x17 @ angel_SWIreason_EnterSVC swi 0x123456 @ angel_SWI_ARMnot_angel: mrs r2, cpsr @ turn off interrupts to orr r2, r2, #0xc0 @ prevent angel from running msr cpsr_c, r2#else teqp pc, #0x0c000003 @ turn off interrupts#endif /* * Note that some cache flushing and other stuff may * be needed here - is there an Angel SWI call for this? */ /* * some architecture specific code can be inserted * by the linker here, but it should preserve r7, r8, and r9. */ .text adr r0, LC0 ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} subs r0, r0, r1 @ calculate the delta offset @ if delta is zero, we are beq not_relocated @ running at the address we @ were linked at. /* * We're running at a different address. We need to fix * up various pointers: * r5 - zImage base address * r6 - GOT start * ip - GOT end */ add r5, r5, r0 add r6, r6, r0 add ip, ip, r0#ifndef CONFIG_ZBOOT_ROM /* * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, * we need to fix up pointers into the BSS region. * r2 - BSS start * r3 - BSS end * sp - stack pointer */ add r2, r2, r0 add r3, r3, r0 add sp, sp, r0 /* * Relocate all entries in the GOT table. */1: ldr r1, [r6, #0] @ relocate entries in the GOT add r1, r1, r0 @ table. This fixes up the str r1, [r6], #4 @ C references. cmp r6, ip blo 1b#else /* * Relocate entries in the GOT table. We only relocate * the entries that are outside the (relocated) BSS region. */1: ldr r1, [r6, #0] @ relocate entries in the GOT cmp r1, r2 @ entry < bss_start || cmphs r3, r1 @ _end < entry addlo r1, r1, r0 @ table. This fixes up the str r1, [r6], #4 @ C references. cmp r6, ip blo 1b#endifnot_relocated: mov r0, #01: str r0, [r2], #4 @ clear bss str r0, [r2], #4 str r0, [r2], #4 str r0, [r2], #4 cmp r2, r3 blo 1b /* * The C runtime environment should now be setup * sufficiently. Turn the cache on, set up some * pointers, and start decompressing. */ bl cache_on mov r1, sp @ malloc space above stack add r2, sp, #0x10000 @ 64k max/* * Check to see if we will overwrite ourselves. * r4 = final kernel address * r5 = start of this image * r2 = end of malloc space (and therefore this image) * We basically want: * r4 >= r2 -> OK * r4 + image length <= r5 -> OK */ cmp r4, r2 bhs wont_overwrite sub r3, sp, r5 @ > compressed kernel size add r0, r4, r3, lsl #2 @ allow for 4x expansion cmp r0, r5 bls wont_overwrite mov r5, r2 @ decompress after malloc space mov r0, r5 mov r3, r7 bl decompress_kernel add r0, r0, #127 + 128 @ alignment + stack bic r0, r0, #127 @ align the kernel length/* * r0 = decompressed kernel length * r1-r3 = unused * r4 = kernel execution address * r5 = decompressed kernel start * r6 = processor ID * r7 = architecture ID * r8 = atags pointer * r9-r14 = corrupted */ add r1, r5, r0 @ end of decompressed kernel adr r2, reloc_start ldr r3, LC1 add r3, r2, r31: ldmia r2!, {r9 - r14} @ copy relocation code stmia r1!, {r9 - r14} ldmia r2!, {r9 - r14} stmia r1!, {r9 - r14} cmp r2, r3 blo 1b add sp, r1, #128 @ relocate the stack bl cache_clean_flush add pc, r5, r0 @ call relocation code/* * We're not in danger of overwriting ourselves. Do this the simple way. * * r4 = kernel execution address * r7 = architecture ID */wont_overwrite: mov r0, r4 mov r3, r7 bl decompress_kernel b call_kernel .type LC0, #objectLC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 .word zreladdr @ r4 .word _start @ r5 .word _got_start @ r6 .word _got_end @ ip .word user_stack+4096 @ spLC1: .word reloc_end - reloc_start .size LC0, . - LC0#ifdef CONFIG_ARCH_RPC .globl paramsparams: ldr r0, =params_phys mov pc, lr .ltorg .align#endif/* * Turn on the cache. We need to setup some page tables so that we * can have both the I and D caches on. * * We place the page tables 16k down from the kernel execution address, * and we hope that nothing else is using it. If we're using it, we * will go pop! * * On entry, * r4 = kernel execution address * r6 = processor ID * r7 = architecture number * r8 = atags pointer * r9 = run-time address of "start" (???) * On exit, * r1, r2, r3, r9, r10, r12 corrupted * This routine must preserve: * r4, r5, r6, r7, r8 */ .align 5cache_on: mov r3, #8 @ cache_on function b call_cache_fn/* * Initialize the highest priority protection region, PR7 * to cover all 32bit address and cacheable and bufferable. */__armv4_mpu_cache_on: mov r0, #0x3f @ 4G, the whole mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting mcr p15, 0, r0, c6, c7, 1 mov r0, #0x80 @ PR7 mcr p15, 0, r0, c2, c0, 0 @ D-cache on mcr p15, 0, r0, c2, c0, 1 @ I-cache on mcr p15, 0, r0, c3, c0, 0 @ write-buffer on mov r0, #0xc000 mcr p15, 0, r0, c5, c0, 1 @ I-access permission mcr p15, 0, r0, c5, c0, 0 @ D-access permission mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache mrc p15, 0, r0, c1, c0, 0 @ read control reg @ ...I .... ..D. WC.M orr r0, r0, #0x002d @ .... .... ..1. 11.1 orr r0, r0, #0x1000 @ ...1 .... .... .... mcr p15, 0, r0, c1, c0, 0 @ write control reg mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache mov pc, lr__armv3_mpu_cache_on: mov r0, #0x3f @ 4G, the whole mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting mov r0, #0x80 @ PR7 mcr p15, 0, r0, c2, c0, 0 @ cache on mcr p15, 0, r0, c3, c0, 0 @ write-buffer on mov r0, #0xc000 mcr p15, 0, r0, c5, c0, 0 @ access permission mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mrc p15, 0, r0, c1, c0, 0 @ read control reg @ .... .... .... WC.M orr r0, r0, #0x000d @ .... .... .... 11.1 mov r0, #0 mcr p15, 0, r0, c1, c0, 0 @ write control reg mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr__setup_mmu: sub r3, r4, #16384 @ Page directory size bic r3, r3, #0xff @ Align the pointer bic r3, r3, #0x3f00/* * Initialise the page tables, turning on the cacheable and bufferable * bits for the RAM area only. */ mov r0, r3 mov r9, r0, lsr #18 mov r9, r9, lsl #18 @ start of RAM add r10, r9, #0x10000000 @ a reasonable RAM size mov r1, #0x12 orr r1, r1, #3 << 10 add r2, r3, #163841: cmp r1, r9 @ if virt > start of RAM orrhs r1, r1, #0x0c @ set cacheable, bufferable cmp r1, r10 @ if virt > end of RAM bichs r1, r1, #0x0c @ clear cacheable, bufferable str r1, [r0], #4 @ 1:1 mapping add r1, r1, #1048576 teq r0, r2 bne 1b/* * If ever we are running from Flash, then we surely want the cache * to be enabled also for our execution instance... We map 2MB of it * so there is no map overlap problem for up to 1 MB compressed kernel. * If the execution is in RAM then we would only be duplicating the above. */ mov r1, #0x1e orr r1, r1, #3 << 10 mov r2, pc, lsr #20 orr r1, r1, r2, lsl #20 add r0, r3, r2, lsl #2 str r1, [r0], #4 add r1, r1, #1048576 str r1, [r0] mov pc, lr__armv4_mmu_cache_on: mov r12, lr bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs mrc p15, 0, r0, c1, c0, 0 @ read control reg orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x0030 bl __common_mmu_cache_on mov r0, #0 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs mov pc, r12__armv7_mmu_cache_on: mov r12, lr mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 tst r11, #0xf @ VMSA blne __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer tst r11, #0xf @ VMSA mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs mrc p15, 0, r0, c1, c0, 0 @ read control reg orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x003c @ write buffer orrne r0, r0, #1 @ MMU enabled movne r1, #-1 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r1, c3, c0, 0 @ load domain access control mcr p15, 0, r0, c1, c0, 0 @ load control register mrc p15, 0, r0, c1, c0, 0 @ and read it back mov r0, #0 mcr p15, 0, r0, c7, c5, 4 @ ISB mov pc, r12__arm6_mmu_cache_on: mov r12, lr bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 mov r0, #0x30 bl __common_mmu_cache_on
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -