📄 entry.s
字号:
/* $Id: entry.S,v 1.120 2000/09/08 13:58:12 jj Exp $ * arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points. * * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */#include <linux/config.h>#include <linux/errno.h>#include <asm/head.h>#include <asm/asi.h>#include <asm/smp.h>#include <asm/ptrace.h>#include <asm/page.h>#include <asm/signal.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/visasm.h>/* #define SYSCALL_TRACING */#define curptr g6#define NR_SYSCALLS 256 /* Each OS is different... */ .text .align 32 .globl sparc64_vpte_patchme1 .globl sparc64_vpte_patchme2sparc64_vpte_nucleus:sparc64_vpte_patchme1: sethi %hi(0), %g5 ! This has to be patchedsparc64_vpte_patchme2: or %g5, %lo(0), %g5 ! This is patched too ba,pt %xcc, sparc64_kpte_continue ! Part of dtlb_backend add %g1, %g1, %g1 ! Finish PMD offset adjustment /* This is trivial with the new code... */ .globl do_fpdisdo_fpdis: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g5 ! Load Group sethi %hi(TSTATE_PEF), %g4 ! IEU0 wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles andcc %g5, FPRS_FEF, %g0 ! IEU1 Group be,a,pt %icc, 1f ! CTI clr %g7 ! IEU0 ldub [%g6 + AOFF_task_thread + AOFF_thread_gsr], %g7 ! Load Group1: andcc %g5, FPRS_DL, %g0 ! IEU1 bne,pn %icc, 2f ! CTI fzero %f0 ! FPA andcc %g5, FPRS_DU, %g0 ! IEU1 Group bne,pn %icc, 1f ! CTI fzero %f2 ! FPA faddd %f0, %f2, %f4 fmuld %f0, %f2, %f6 faddd %f0, %f2, %f8 fmuld %f0, %f2, %f10 faddd %f0, %f2, %f12 fmuld %f0, %f2, %f14 faddd %f0, %f2, %f16 fmuld %f0, %f2, %f18 faddd %f0, %f2, %f20 fmuld %f0, %f2, %f22 faddd %f0, %f2, %f24 fmuld %f0, %f2, %f26 faddd %f0, %f2, %f28 fmuld %f0, %f2, %f30 faddd %f0, %f2, %f32 fmuld %f0, %f2, %f34 faddd %f0, %f2, %f36 fmuld %f0, %f2, %f38 faddd %f0, %f2, %f40 fmuld %f0, %f2, %f42 faddd %f0, %f2, %f44 fmuld %f0, %f2, %f46 faddd %f0, %f2, %f48 fmuld %f0, %f2, %f50 faddd %f0, %f2, %f52 fmuld %f0, %f2, %f54 faddd %f0, %f2, %f56 fmuld %f0, %f2, %f58 b,pt %xcc, fpdis_exit2 faddd %f0, %f2, %f601: mov SECONDARY_CONTEXT, %g3 add %g6, AOFF_task_fpregs + 0x80, %g1 faddd %f0, %f2, %f4 fmuld %f0, %f2, %f6 ldxa [%g3] ASI_DMMU, %g5 add %g6, AOFF_task_fpregs + 0xc0, %g2 stxa %g0, [%g3] ASI_DMMU faddd %f0, %f2, %f8 fmuld %f0, %f2, %f10 flush %g6 membar #StoreLoad | #LoadLoad ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-( ldda [%g2] ASI_BLK_S, %f48 faddd %f0, %f2, %f12 fmuld %f0, %f2, %f14 faddd %f0, %f2, %f16 fmuld %f0, %f2, %f18 faddd %f0, %f2, %f20 fmuld %f0, %f2, %f22 faddd %f0, %f2, %f24 fmuld %f0, %f2, %f26 faddd %f0, %f2, %f28 fmuld %f0, %f2, %f30 b,pt %xcc, fpdis_exit membar #Sync2: andcc %g5, FPRS_DU, %g0 bne,pt %icc, 3f fzero %f32 mov SECONDARY_CONTEXT, %g3 fzero %f34 ldxa [%g3] ASI_DMMU, %g5 add %g6, AOFF_task_fpregs, %g1 stxa %g0, [%g3] ASI_DMMU add %g6, AOFF_task_fpregs + 0x40, %g2 faddd %f32, %f34, %f36 fmuld %f32, %f34, %f38 flush %g6 membar #StoreLoad | #LoadLoad ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( ldda [%g2] ASI_BLK_S, %f16 faddd %f32, %f34, %f40 fmuld %f32, %f34, %f42 faddd %f32, %f34, %f44 fmuld %f32, %f34, %f46 faddd %f32, %f34, %f48 fmuld %f32, %f34, %f50 faddd %f32, %f34, %f52 fmuld %f32, %f34, %f54 faddd %f32, %f34, %f56 fmuld %f32, %f34, %f58 faddd %f32, %f34, %f60 fmuld %f32, %f34, %f62 b,pt %xcc, fpdis_exit membar #Sync3: mov SECONDARY_CONTEXT, %g3 add %g6, AOFF_task_fpregs, %g1 ldxa [%g3] ASI_DMMU, %g5 mov 0x40, %g2 stxa %g0, [%g3] ASI_DMMU flush %g6 membar #StoreLoad | #LoadLoad ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( ldda [%g1 + %g2] ASI_BLK_S, %f16 add %g1, 0x80, %g1 ldda [%g1] ASI_BLK_S, %f32 ldda [%g1 + %g2] ASI_BLK_S, %f48 membar #Syncfpdis_exit: stxa %g5, [%g3] ASI_DMMU flush %g6fpdis_exit2: wr %g7, 0, %gsr ldx [%g6 + AOFF_task_thread + AOFF_thread_xfsr], %fsr rdpr %tstate, %g3 or %g3, %g4, %g3 ! anal... wrpr %g3, %tstate wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits retry .globl do_fptrap .align 32do_fptrap: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3 stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr] rd %fprs, %g1 or %g3, %g1, %g3 stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved] rd %gsr, %g3 stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_gsr] mov SECONDARY_CONTEXT, %g3 add %g6, AOFF_task_fpregs, %g2 ldxa [%g3] ASI_DMMU, %g5 stxa %g0, [%g3] ASI_DMMU flush %g6 membar #StoreStore | #LoadStore andcc %g1, FPRS_DL, %g0 be,pn %icc, 4f mov 0x40, %g3 stda %f0, [%g2] ASI_BLK_S stda %f16, [%g2 + %g3] ASI_BLK_S andcc %g1, FPRS_DU, %g0 be,pn %icc, 5f4: add %g2, 128, %g2 stda %f32, [%g2] ASI_BLK_S stda %f48, [%g2 + %g3] ASI_BLK_S5: mov SECONDARY_CONTEXT, %g1 membar #Sync stxa %g5, [%g1] ASI_DMMU flush %g6 ba,pt %xcc, etrap wr %g0, 0, %fprs /* The registers for cross calls will be: * * DATA 0: [low 32-bits] Address of function to call, jmp to this * [high 32-bits] MMU Context Argument 0, place in %g5 * DATA 1: Address Argument 1, place in %g6 * DATA 2: Address Argument 2, place in %g7 * * With this method we can do most of the cross-call tlb/cache * flushing very quickly. * * Current CPU's IRQ worklist table is locked into %g1, * don't touch. */ .text .align 32 .globl do_ivecdo_ivec: mov 0x40, %g3 ldxa [%g3 + %g0] ASI_UDB_INTR_R, %g3 sethi %hi(KERNBASE), %g4 cmp %g3, %g4 bgeu,pn %xcc, do_ivec_xcall srlx %g3, 32, %g5 stxa %g0, [%g0] ASI_INTR_RECEIVE membar #Sync sethi %hi(ivector_table), %g2 sllx %g3, 5, %g3 or %g2, %lo(ivector_table), %g2 add %g2, %g3, %g3 ldx [%g3 + 0x08], %g2 /* irq_info */ ldub [%g3 + 0x04], %g4 /* pil */ brz,pn %g2, do_ivec_spurious mov 1, %g2 sllx %g2, %g4, %g2 sllx %g4, 2, %g4 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ wr %g2, 0x0, %set_softint retrydo_ivec_xcall: mov 0x50, %g1 ldxa [%g1 + %g0] ASI_UDB_INTR_R, %g1 srl %g3, 0, %g3 mov 0x60, %g7 ldxa [%g7 + %g0] ASI_UDB_INTR_R, %g7 stxa %g0, [%g0] ASI_INTR_RECEIVE membar #Sync jmpl %g3, %g0 nopdo_ivec_spurious: stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */ rdpr %pstate, %g5 wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate sethi %hi(109f), %g7 ba,pt %xcc, etrap109: or %g7, %lo(109b), %g7 call catch_disabled_ivec add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,pt %xcc, rtrap clr %l6 .globl save_alternate_globalssave_alternate_globals: /* %o0 = save_area */ rdpr %pstate, %o5 andn %o5, PSTATE_IE, %o1 wrpr %o1, PSTATE_AG, %pstate stx %g0, [%o0 + 0x00] stx %g1, [%o0 + 0x08] stx %g2, [%o0 + 0x10] stx %g3, [%o0 + 0x18] stx %g4, [%o0 + 0x20] stx %g5, [%o0 + 0x28] stx %g6, [%o0 + 0x30] stx %g7, [%o0 + 0x38] wrpr %o1, PSTATE_IG, %pstate stx %g0, [%o0 + 0x40] stx %g1, [%o0 + 0x48] stx %g2, [%o0 + 0x50] stx %g3, [%o0 + 0x58] stx %g4, [%o0 + 0x60] stx %g5, [%o0 + 0x68] stx %g6, [%o0 + 0x70] stx %g7, [%o0 + 0x78] wrpr %o1, PSTATE_MG, %pstate stx %g0, [%o0 + 0x80] stx %g1, [%o0 + 0x88] stx %g2, [%o0 + 0x90] stx %g3, [%o0 + 0x98] stx %g4, [%o0 + 0xa0] stx %g5, [%o0 + 0xa8] stx %g6, [%o0 + 0xb0] stx %g7, [%o0 + 0xb8] wrpr %o5, 0x0, %pstate retl nop .globl restore_alternate_globalsrestore_alternate_globals: /* %o0 = save_area */ rdpr %pstate, %o5 andn %o5, PSTATE_IE, %o1 wrpr %o1, PSTATE_AG, %pstate ldx [%o0 + 0x00], %g0 ldx [%o0 + 0x08], %g1 ldx [%o0 + 0x10], %g2 ldx [%o0 + 0x18], %g3 ldx [%o0 + 0x20], %g4 ldx [%o0 + 0x28], %g5 ldx [%o0 + 0x30], %g6 ldx [%o0 + 0x38], %g7 wrpr %o1, PSTATE_IG, %pstate ldx [%o0 + 0x40], %g0 ldx [%o0 + 0x48], %g1 ldx [%o0 + 0x50], %g2 ldx [%o0 + 0x58], %g3 ldx [%o0 + 0x60], %g4 ldx [%o0 + 0x68], %g5 ldx [%o0 + 0x70], %g6 ldx [%o0 + 0x78], %g7 wrpr %o1, PSTATE_MG, %pstate ldx [%o0 + 0x80], %g0 ldx [%o0 + 0x88], %g1 ldx [%o0 + 0x90], %g2 ldx [%o0 + 0x98], %g3 ldx [%o0 + 0xa0], %g4 ldx [%o0 + 0xa8], %g5 ldx [%o0 + 0xb0], %g6 ldx [%o0 + 0xb8], %g7 wrpr %o5, 0x0, %pstate retl nop .globl getcc, setccgetcc: ldx [%o0 + PT_V9_TSTATE], %o1 srlx %o1, 32, %o1 and %o1, 0xf, %o1 retl stx %o1, [%o0 + PT_V9_G1]setcc: ldx [%o0 + PT_V9_TSTATE], %o1 ldx [%o0 + PT_V9_G1], %o2 or %g0, %ulo(TSTATE_ICC), %o3 sllx %o3, 32, %o3 andn %o1, %o3, %o1 sllx %o2, 32, %o2 and %o2, %o3, %o2 or %o1, %o2, %o1 retl stx %o1, [%o0 + PT_V9_TSTATE] .globl utrap, utrap_illutrap: brz,pn %g1, etrap nop save %sp, -128, %sp rdpr %tstate, %l6 rdpr %cwp, %l7 andn %l6, TSTATE_CWP, %l6 wrpr %l6, %l7, %tstate rdpr %tpc, %l6 rdpr %tnpc, %l7 wrpr %g1, 0, %tnpc doneutrap_ill: call bad_trap add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,pt %xcc, rtrap clr %l6#ifdef CONFIG_BLK_DEV_FD .globl floppy_hardintfloppy_hardint: wr %g0, (1 << 11), %clear_softint sethi %hi(doing_pdma), %g1 ld [%g1 + %lo(doing_pdma)], %g2 brz,pn %g2, floppy_dosoftint sethi %hi(fdc_status), %g3 ldx [%g3 + %lo(fdc_status)], %g3 sethi %hi(pdma_vaddr), %g5 ldx [%g5 + %lo(pdma_vaddr)], %g4 sethi %hi(pdma_size), %g5 ldx [%g5 + %lo(pdma_size)], %g5next_byte: lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7 andcc %g7, 0x80, %g0 be,pn %icc, floppy_fifo_emptied andcc %g7, 0x20, %g0 be,pn %icc, floppy_overrun andcc %g7, 0x40, %g0 be,pn %icc, floppy_write sub %g5, 1, %g5 inc %g3 lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7 dec %g3 orcc %g0, %g5, %g0 stb %g7, [%g4] bne,pn %xcc, next_byte add %g4, 1, %g4 b,pt %xcc, floppy_tdone nopfloppy_write: ldub [%g4], %g7 orcc %g0, %g5, %g0 inc %g3 stba %g7, [%g3] ASI_PHYS_BYPASS_EC_E dec %g3 bne,pn %xcc, next_byte add %g4, 1, %g4floppy_tdone: sethi %hi(pdma_vaddr), %g1 stx %g4, [%g1 + %lo(pdma_vaddr)] sethi %hi(pdma_size), %g1 stx %g5, [%g1 + %lo(pdma_size)] sethi %hi(auxio_register), %g1 ldx [%g1 + %lo(auxio_register)], %g7 ldub [%g7], %g5 or %g5, 0xc2, %g5 stb %g5, [%g7] andn %g5, 0x02, %g5 nop; nop; nop; nop; nop; nop; nop; nop; nop; nop; nop; nop; stb %g5, [%g7] sethi %hi(doing_pdma), %g1 b,pt %xcc, floppy_dosoftint st %g0, [%g1 + %lo(doing_pdma)]floppy_fifo_emptied: sethi %hi(pdma_vaddr), %g1 stx %g4, [%g1 + %lo(pdma_vaddr)] sethi %hi(pdma_size), %g1 stx %g5, [%g1 + %lo(pdma_size)] sethi %hi(irq_action), %g1 or %g1, %lo(irq_action), %g1 ldx [%g1 + (11 << 3)], %g3 ! irqaction[floppy_irq] ldx [%g3 + 0x10], %g4 ! action->mask == ino_bucket ptr ldx [%g4 + 0x10], %g4 ! bucket->iclr stwa %g0, [%g4] ASI_PHYS_BYPASS_EC_E ! ICLR_IDLE membar #Sync ! probably not needed... retryfloppy_overrun: sethi %hi(pdma_vaddr), %g1 stx %g4, [%g1 + %lo(pdma_vaddr)] sethi %hi(pdma_size), %g1 stx %g5, [%g1 + %lo(pdma_size)] sethi %hi(doing_pdma), %g1 st %g0, [%g1 + %lo(doing_pdma)]floppy_dosoftint: rdpr %pil, %g2 wrpr %g0, 15, %pil sethi %hi(109f), %g7 b,pt %xcc, etrap_irq109: or %g7, %lo(109b), %g7 mov 11, %o0 mov 0, %o1 call sparc_floppy_irq add %sp, STACK_BIAS + REGWIN_SZ, %o2 b,pt %xcc, rtrap clr %l6#endif /* CONFIG_BLK_DEV_FD */ /* XXX Here is stuff we still need to write... -DaveM XXX */ .globl netbsd_syscallnetbsd_syscall: retl nop /* These next few routines must be sure to clear the * SFSR FaultValid bit so that the fast tlb data protection * handler does not flush the wrong context and lock up the * box. */ .globl __do_data_access_exception .globl __do_data_access_exception_tl1__do_data_access_exception_tl1: rdpr %pstate, %g4 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate rdpr %tl, %g3 cmp %g3, 1 mov TLB_SFSR, %g3 mov DMMU_SFAR, %g5 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit membar #Sync bgu,pn %icc, winfix_dax rdpr %tpc, %g3 sethi %hi(109f), %g7 ba,pt %xcc, etraptl1 or %g7, %lo(109f), %g7 ! Merge in below__do_data_access_exception: rdpr %pstate, %g4 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate mov TLB_SFSR, %g3 mov DMMU_SFAR, %g5 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit membar #Sync sethi %hi(109f), %g7 ba,pt %xcc, etrap109: or %g7, %lo(109b), %g7 mov %l4, %o1 mov %l5, %o2 call data_access_exception add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,pt %xcc, rtrap clr %l6 .globl __do_instruction_access_exception .globl __do_instruction_access_exception_tl1__do_instruction_access_exception_tl1: rdpr %pstate, %g4 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate mov TLB_SFSR, %g3 mov DMMU_SFAR, %g5 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit membar #Sync sethi %hi(109f), %g7 ba,pt %xcc, etraptl1 or %g7, %lo(109f), %g7 ! Merge in below__do_instruction_access_exception: rdpr %pstate, %g4 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate mov TLB_SFSR, %g3 mov DMMU_SFAR, %g5 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit membar #Sync sethi %hi(109f), %g7 ba,pt %xcc, etrap109: or %g7, %lo(109b), %g7 mov %l4, %o1 mov %l5, %o2 call instruction_access_exception add %sp, STACK_BIAS + REGWIN_SZ, %o0 ba,pt %xcc, rtrap clr %l6 /* This is the trap handler entry point for ECC correctable * errors. They are corrected, but we listen for the trap * so that the event can be logged. * * Disrupting errors are either: * 1) single-bit ECC errors during UDB reads to system * memory * 2) data parity errors during write-back events * * As far as I can make out from the manual, the CEE trap * is only for correctable errors during memory read * accesses by the front-end of the processor. * * The code below is only for trap level 1 CEE events, * as it is the only situation where we can safely record * and log. For trap level >1 we just clear the CE bit * in the AFSR and return. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -