📄 head.s
字号:
andn %g1, %l3, %g1 /* NOTE: We hold on to %g1 paddr base as we need it below to lock * NOTE: the PROM cif code into the TLB. */ or %g5, %g1, %g5 /* Or it into TAG being built. */ clr %l0 /* TLB entry walker. */ sethi %hi(KERNBASE), %g3 /* 4M lower limit */ sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */ mov TLB_TAG_ACCESS, %l71: /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ ldxa [%l0] ASI_ITLB_TAG_READ, %g1 nop nop nop andn %g1, %l2, %g1 /* Get vaddr */ cmp %g1, %g3 blu,pn %xcc, 2f cmp %g1, %g7 bgeu,pn %xcc, 2f nop stxa %g0, [%l7] ASI_IMMU stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS membar #Sync2: cmp %l0, (63 << 3) blu,pt %xcc, 1b add %l0, (1 << 3), %l0 nop; nop; nop clr %l0 /* TLB entry walker. */1: /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ ldxa [%l0] ASI_DTLB_TAG_READ, %g1 nop nop nop andn %g1, %l2, %g1 /* Get vaddr */ cmp %g1, %g3 blu,pn %xcc, 2f cmp %g1, %g7 bgeu,pn %xcc, 2f nop stxa %g0, [%l7] ASI_DMMU stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS membar #Sync2: cmp %l0, (63 << 3) blu,pt %xcc, 1b add %l0, (1 << 3), %l0 nop; nop; nop /* PROM never puts any TLB entries into the MMU with the lock bit * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too. */ sethi %hi(KERNBASE), %g3 mov (63 << 3), %g7 stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */ stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */ membar #Sync stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */ stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */ membar #Sync flush %g3 membar #Sync sethi %hi(_end), %g3 /* Check for bigkernel case */ or %g3, %lo(_end), %g3 srl %g3, 23, %g3 /* Check if _end > 8M */ brz,pt %g3, 2f sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ sethi %hi(0x400000), %g3 or %g3, %lo(0x400000), %g3 add %g5, %g3, %g5 /* New tte data */ andn %g5, (_PAGE_G), %g5 sethi %hi(KERNBASE+0x400000), %g3 or %g3, %lo(KERNBASE+0x400000), %g3 mov (62 << 3), %g7 stxa %g3, [%l7] ASI_DMMU stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS membar #Sync stxa %g3, [%l7] ASI_IMMU stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS membar #Sync flush %g3 membar #Sync sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */2: ba,pt %xcc, 1f nop1: set sun4u_init, %g2 jmpl %g2 + %g0, %g0 nopsun4u_init: /* Set ctx 0 */ mov PRIMARY_CONTEXT, %g7 stxa %g0, [%g7] ASI_DMMU membar #Sync mov SECONDARY_CONTEXT, %g7 stxa %g0, [%g7] ASI_DMMU membar #Sync /* We are now safely (we hope) in Nucleus context (0), rewrite * the KERNBASE TTE's so they no longer have the global bit set. * Don't forget to setup TAG_ACCESS first 8-) */ mov TLB_TAG_ACCESS, %g2 stxa %g3, [%g2] ASI_IMMU stxa %g3, [%g2] ASI_DMMU membar #Sync BRANCH_IF_ANY_CHEETAH(g1,g5,cheetah_tlb_fixup) ba,pt %xcc, spitfire_tlb_fixup nopcheetah_tlb_fixup: set (0 << 16) | (15 << 3), %g7 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1 andn %g1, (_PAGE_G), %g1 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS membar #Sync ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1 andn %g1, (_PAGE_G), %g1 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS membar #Sync /* Kill instruction prefetch queues. */ flush %g3 membar #Sync mov 2, %g2 /* Set TLB type to cheetah+. */ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g5,g7,1f) mov 1, %g2 /* Set TLB type to cheetah. */1: sethi %hi(tlb_type), %g5 stw %g2, [%g5 + %lo(tlb_type)] /* Patch copy/page operations to cheetah optimized versions. */ call cheetah_patch_copyops nop call cheetah_patch_cachetlbops nop ba,pt %xcc, tlb_fixup_done nopspitfire_tlb_fixup: mov (63 << 3), %g7 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1 andn %g1, (_PAGE_G), %g1 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS membar #Sync ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1 andn %g1, (_PAGE_G), %g1 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS membar #Sync /* Kill instruction prefetch queues. */ flush %g3 membar #Sync /* Set TLB type to spitfire. */ mov 0, %g2 sethi %hi(tlb_type), %g5 stw %g2, [%g5 + %lo(tlb_type)]tlb_fixup_done: sethi %hi(init_thread_union), %g6 or %g6, %lo(init_thread_union), %g6 ldx [%g6 + TI_TASK], %g4 mov %sp, %l6 mov %o4, %l7#if 0 /* We don't do it like this anymore, but for historical hack value * I leave this snippet here to show how crazy we can be sometimes. 8-) */ /* Setup "Linux Current Register", thanks Sun 8-) */ wr %g0, 0x1, %pcr /* Blackbird errata workaround. See commentary in * smp.c:smp_percpu_timer_interrupt() for more * information. */ ba,pt %xcc, 99f nop .align 6499: wr %g6, %g0, %pic rd %pic, %g0#endif wr %g0, ASI_P, %asi mov 1, %g5 sllx %g5, THREAD_SHIFT, %g5 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 add %g6, %g5, %sp mov 0, %fp wrpr %g0, 0, %wstate wrpr %g0, 0x0, %tl /* Clear the bss */ sethi %hi(__bss_start), %o0 or %o0, %lo(__bss_start), %o0 sethi %hi(_end), %o1 or %o1, %lo(_end), %o1 call __bzero sub %o1, %o0, %o1 mov %l6, %o1 ! OpenPROM stack call prom_init mov %l7, %o0 ! OpenPROM cif handler /* Off we go.... */ call start_kernel nop /* Not reached... *//* IMPORTANT NOTE: Whenever making changes here, check * trampoline.S as well. -jj */ .globl setup_tbasetup_tba: /* i0 = is_starfire */ save %sp, -160, %sp rdpr %tba, %g7 sethi %hi(prom_tba), %o1 or %o1, %lo(prom_tba), %o1 stx %g7, [%o1] /* Setup "Linux" globals 8-) */ rdpr %pstate, %o1 mov %g6, %o2 wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate sethi %hi(sparc64_ttable_tl0), %g5 wrpr %g5, %tba mov %o2, %g6 /* Set up MMU globals */ wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate /* Set fixed globals used by dTLB miss handler. */#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) mov TSB_REG, %g1 stxa %g0, [%g1] ASI_DMMU membar #Sync mov TLB_SFSR, %g1 sethi %uhi(KERN_HIGHBITS), %g2 or %g2, %ulo(KERN_HIGHBITS), %g2 sllx %g2, 32, %g2 or %g2, KERN_LOWBITS, %g2 BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base) ba,pt %xcc, spitfire_vpte_base nopcheetah_vpte_base: sethi %uhi(VPTE_BASE_CHEETAH), %g3 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 ba,pt %xcc, 2f sllx %g3, 32, %g3spitfire_vpte_base: sethi %uhi(VPTE_BASE_SPITFIRE), %g3 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 sllx %g3, 32, %g32: clr %g7#undef KERN_HIGHBITS#undef KERN_LOWBITS /* Kill PROM timer */ sethi %hi(0x80000000), %o2 sllx %o2, 32, %o2 wr %o2, 0, %tick_cmpr BRANCH_IF_ANY_CHEETAH(o2,o3,1f) ba,pt %xcc, 2f nop /* Disable STICK_INT interrupts. */1: sethi %hi(0x80000000), %o2 sllx %o2, 32, %o2 wr %o2, %asr25 /* Ok, we're done setting up all the state our trap mechanims needs, * now get back into normal globals and let the PROM know what is up. */2: wrpr %g0, %g0, %wstate wrpr %o1, PSTATE_IE, %pstate call init_irqwork_curcpu nop sethi %hi(sparc64_ttable_tl0), %g5 call prom_set_trap_table mov %g5, %o0 rdpr %pstate, %o1 or %o1, PSTATE_IE, %o1 wrpr %o1, 0, %pstate ret restore/* * The following skips make sure the trap table in ttable.S is aligned * on a 32K boundary as required by the v9 specs for TBA register. */sparc64_boot_end: .skip 0x2000 + _start - sparc64_boot_endbootup_user_stack_end: .skip 0x2000#ifdef CONFIG_SBUS/* This is just a hack to fool make depend config.h discovering strategy: As the .S files below need config.h, but make depend does not find it for them, we include config.h in head.S */#endif! 0x0000000000408000#include "ttable.S"#include "systbls.S" .align 1024 .globl swapper_pg_dirswapper_pg_dir: .word 0#include "etrap.S"#include "rtrap.S"#include "winfixup.S"#include "entry.S" /* This is just anal retentiveness on my part... */ .align 16384 .data .align 8 .globl prom_tba, tlb_typeprom_tba: .xword 0tlb_type: .word 0 /* Must NOT end up in BSS */ .section ".fixup",#alloc,#execinstr .globl __ret_efault__ret_efault: ret restore %g0, -EFAULT, %o0
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -