📄 mca_asm.s
字号:
//// assembly portion of the IA64 MCA handling//// Mods by cfleck to integrate into kernel build// 00/03/15 davidm Added various stop bits to get a clean compile//// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp// kstack, switch modes, jump to C INIT handler//// 02/01/04 J.Hall <jenna.s.hall@intel.com>// Before entering virtual mode code:// 1. Check for TLB CPU error// 2. Restore current thread pointer to kr6// 3. Move stack ptr 16 bytes to conform to C calling convention//// 04/11/12 Russ Anderson <rja@sgi.com>// Added per cpu MCA/INIT stack save areas.//#include <linux/config.h>#include <linux/threads.h>#include <asm/asmmacro.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/mca_asm.h>#include <asm/mca.h>#ifdef XEN#include <asm/vhpt.h>#include <public/arch-ia64.h>#endif/* * When we get a machine check, the kernel stack pointer is no longer * valid, so we need to set a new stack pointer. */#define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE *//* * Needed for return context to SAL */#define IA64_MCA_SAME_CONTEXT 0#define IA64_MCA_COLD_BOOT -2#include "minstate.h"/* * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec) * 1. GR1 = OS GP * 2. GR8 = PAL_PROC physical address * 3. GR9 = SAL_PROC physical address * 4. GR10 = SAL GP (physical) * 5. GR11 = Rendez state * 6. GR12 = Return address to location within SAL_CHECK */#ifdef XEN#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \ GET_THIS_PADDR(_tmp, ia64_sal_to_os_handoff_state_addr);; \ ld8 _tmp=[_tmp];; \ st8 [_tmp]=r1,0x08;; \ st8 [_tmp]=r8,0x08;; \ st8 [_tmp]=r9,0x08;; \ st8 [_tmp]=r10,0x08;; \ st8 [_tmp]=r11,0x08;; \ st8 [_tmp]=r12,0x08;; \ st8 [_tmp]=r17,0x08;; \ st8 [_tmp]=r18,0x08#else#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \ LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \ st8 [_tmp]=r1,0x08;; \ st8 [_tmp]=r8,0x08;; \ st8 [_tmp]=r9,0x08;; \ st8 [_tmp]=r10,0x08;; \ st8 [_tmp]=r11,0x08;; \ st8 [_tmp]=r12,0x08;; \ st8 [_tmp]=r17,0x08;; \ st8 [_tmp]=r18,0x08#endif /* XEN *//* * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec) * (p6) is executed if we never entered virtual mode (TLB error) * (p7) is executed if we entered virtual mode as expected (normal case) * 1. GR8 = OS_MCA return status * 2. GR9 = SAL GP (physical) * 3. GR10 = 0/1 returning same/new context * 4. GR22 = New min state save area pointer * returns ptr to SAL rtn save loc in _tmp */#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \ movl _tmp=ia64_os_to_sal_handoff_state;; \ DATA_VA_TO_PA(_tmp);; \ ld8 r8=[_tmp],0x08;; \ ld8 r9=[_tmp],0x08;; \ ld8 r10=[_tmp],0x08;; \ ld8 r22=[_tmp],0x08;; // now _tmp is pointing to SAL rtn save location/* * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state * imots_os_status=IA64_MCA_COLD_BOOT * imots_sal_gp=SAL GP * imots_context=IA64_MCA_SAME_CONTEXT * imots_new_min_state=Min state save area pointer * imots_sal_check_ra=Return address to location within SAL_CHECK * */#ifdef XEN#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\ movl tmp=IA64_MCA_COLD_BOOT; \ GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);; \ ld8 sal_to_os_handoff=[sal_to_os_handoff];; \ movl os_to_sal_handoff=ia64_os_to_sal_handoff_state;; \ dep os_to_sal_handoff = 0, os_to_sal_handoff, 60, 4;; \ /*DATA_VA_TO_PA(os_to_sal_handoff);;*/ \ st8 [os_to_sal_handoff]=tmp,8;; \ ld8 tmp=[sal_to_os_handoff],48;; \ st8 [os_to_sal_handoff]=tmp,8;; \ movl tmp=IA64_MCA_SAME_CONTEXT;; \ st8 [os_to_sal_handoff]=tmp,8;; \ ld8 tmp=[sal_to_os_handoff],-8;; \ st8 [os_to_sal_handoff]=tmp,8;; \ ld8 tmp=[sal_to_os_handoff];; \ st8 [os_to_sal_handoff]=tmp;;#else /* XEN */#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\ movl tmp=IA64_MCA_COLD_BOOT; \ movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \ movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \ st8 [os_to_sal_handoff]=tmp,8;; \ ld8 tmp=[sal_to_os_handoff],48;; \ st8 [os_to_sal_handoff]=tmp,8;; \ movl tmp=IA64_MCA_SAME_CONTEXT;; \ st8 [os_to_sal_handoff]=tmp,8;; \ ld8 tmp=[sal_to_os_handoff],-8;; \ st8 [os_to_sal_handoff]=tmp,8;; \ ld8 tmp=[sal_to_os_handoff];; \ st8 [os_to_sal_handoff]=tmp;;#endif /* XEN */#define GET_IA64_MCA_DATA(reg) \ GET_THIS_PADDR(reg, ia64_mca_data) \ ;; \ ld8 reg=[reg] .global ia64_os_mca_dispatch .global ia64_os_mca_dispatch_end#ifndef XEN .global ia64_sal_to_os_handoff_state .global ia64_os_to_sal_handoff_state#endif .global ia64_do_tlb_purge .text .align 16/* * Just the TLB purge part is moved to a separate function * so we can re-use the code for cpu hotplug code as well * Caller should now setup b1, so we can branch once the * tlb flush is complete. */ia64_do_tlb_purge:#define O(member) IA64_CPUINFO_##member##_OFFSET GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 ;; addl r17=O(PTCE_STRIDE),r2 addl r2=O(PTCE_BASE),r2 ;; ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base ld4 r19=[r2],4 // r19=ptce_count[0] ld4 r21=[r17],4 // r21=ptce_stride[0] ;; ld4 r20=[r2] // r20=ptce_count[1] ld4 r22=[r17] // r22=ptce_stride[1] mov r24=0 ;; adds r20=-1,r20 ;;#undef O2: cmp.ltu p6,p7=r24,r19(p7) br.cond.dpnt.few 4f mov ar.lc=r203: ptc.e r18 ;; add r18=r22,r18 br.cloop.sptk.few 3b ;; add r18=r21,r18 add r24=1,r24 ;; br.sptk.few 2b4: srlz.i // srlz.i implies srlz.d ;; // Now purge addresses formerly mapped by TR registers // 1. Purge ITR&DTR for kernel. movl r16=KERNEL_START mov r18=KERNEL_TR_PAGE_SHIFT<<2 ;; ptr.i r16, r18 ptr.d r16, r18 ;; srlz.i ;; srlz.d ;; // 2. Purge DTR for PERCPU data. movl r16=PERCPU_ADDR mov r18=PERCPU_PAGE_SHIFT<<2 ;; ptr.d r16,r18 ;; srlz.d ;; // 3. Purge ITR for PAL code. GET_THIS_PADDR(r2, ia64_mca_pal_base) ;; ld8 r16=[r2] mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.i r16,r18 ;; srlz.i ;; // 4. Purge DTR for stack.#ifdef XEN // Kernel registers are saved in a per_cpu cpu_kr_ia64_t // to allow the kernel registers themselves to be used by domains. GET_THIS_PADDR(r2, cpu_kr);; add r2=IA64_KR_CURRENT_STACK_OFFSET,r2 ;; ld8 r16=[r2]#else mov r16=IA64_KR(CURRENT_STACK)#endif ;; shl r16=r16,IA64_GRANULE_SHIFT movl r19=PAGE_OFFSET ;; add r16=r19,r16 mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.d r16,r18 ;; srlz.i ;;#ifdef XEN // 5. shared_info GET_THIS_PADDR(r2, inserted_shared_info);; ld8 r16=[r2] mov r18=XSI_SHIFT<<2 ;; ptr.d r16,r18 ;; srlz.d ;; // 6. mapped_regs GET_THIS_PADDR(r2, inserted_mapped_regs);; ld8 r16=[r2] mov r18=XMAPPEDREGS_SHIFT<<2 ;; ptr.d r16,r18 ;; srlz.d ;; // 7. VPD // The VPD will not be mapped in the case where // a VMX domain hasn't been started since boot GET_THIS_PADDR(r2, inserted_vpd);; ld8 r16=[r2] mov r18=IA64_GRANULE_SHIFT<<2 ;; cmp.eq p7,p0=r16,r0 ;;(p7) br.cond.sptk .vpd_not_mapped ;; ptr.i r16,r18 ;; ptr.d r16,r18 ;; srlz.i ;; srlz.d ;;.vpd_not_mapped: // 8. VHPT // GET_VA_VCPU_VHPT_MADDR() may not give the // value of the VHPT currently pinned into the TLB GET_THIS_PADDR(r2, inserted_vhpt);; ld8 r2=[r2] ;; cmp.eq p7,p0=r2,r0 ;;(p7) br.cond.sptk .vhpt_not_mapped dep r16=0,r2,0,IA64_GRANULE_SHIFT mov r18=IA64_GRANULE_SHIFT<<2 ;; ptr.d r16,r18 ;; srlz.d ;;.vhpt_not_mapped:#endif // Now branch away to caller. br.sptk.many b1 ;;ia64_os_mca_dispatch: // Serialize all MCA processing mov r3=1;; LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;ia64_os_mca_spin: xchg8 r4=[r2],r3;; cmp.ne p6,p0=r4,r0(p6) br ia64_os_mca_spin // Save the SAL to OS MCA handoff state as defined // by SAL SPEC 3.0 // NOTE : The order in which the state gets saved // is dependent on the way the C-structure // for ia64_mca_sal_to_os_state_t has been // defined in include/asm/mca.h SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) ;; // LOG PROCESSOR STATE INFO FROM HERE ON..begin_os_mca_dump: br ia64_os_mca_proc_state_dump;;ia64_os_mca_done_dump:#ifdef XEN // Set current to ar.k6 GET_THIS_PADDR(r2,cpu_kr);; add r2=IA64_KR_CURRENT_OFFSET,r2;; ld8 r2=[r2];; mov ar.k6=r2;; GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);; ld8 r2=[r2];; adds r16=56,r2#else LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)#endif ;; ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. ;; tbit.nz p6,p7=r18,60(p7) br.spnt done_tlb_purge_and_reload // The following code purges TC and TR entries. Then reload all TC entries. // Purge percpu data TC entries.begin_tlb_purge_and_reload: movl r18=ia64_reload_tr;; LOAD_PHYSICAL(p0,r18,ia64_reload_tr);; mov b1=r18;; br.sptk.many ia64_do_tlb_purge;;ia64_reload_tr: // Finally reload the TR registers. // 1. Reload DTR/ITR registers for kernel. mov r18=KERNEL_TR_PAGE_SHIFT<<2 movl r17=KERNEL_START ;; mov cr.itir=r18 mov cr.ifa=r17 mov r16=IA64_TR_KERNEL mov r19=ip movl r18=PAGE_KERNEL ;; dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT ;; or r18=r17,r18 ;; itr.i itr[r16]=r18 ;; itr.d dtr[r16]=r18 ;; srlz.i srlz.d ;; // 2. Reload DTR register for PERCPU data. GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte) ;; movl r16=PERCPU_ADDR // vaddr movl r18=PERCPU_PAGE_SHIFT<<2 ;; mov cr.itir=r18 mov cr.ifa=r16 ;; ld8 r18=[r2] // load per-CPU PTE mov r16=IA64_TR_PERCPU_DATA; ;; itr.d dtr[r16]=r18 ;; srlz.d ;;#ifndef XEN // 3. Reload ITR for PAL code. GET_THIS_PADDR(r2, ia64_mca_pal_pte) ;; ld8 r18=[r2] // load PAL PTE ;; GET_THIS_PADDR(r2, ia64_mca_pal_base) ;; ld8 r16=[r2] // load PAL vaddr mov r19=IA64_GRANULE_SHIFT<<2 ;; mov cr.itir=r19 mov cr.ifa=r16 mov r20=IA64_TR_PALCODE ;; itr.i itr[r20]=r18 ;; srlz.i ;;#endif // 4. Reload DTR for stack.#ifdef XEN // Kernel registers are saved in a per_cpu cpu_kr_ia64_t // to allow the kernel registers themselves to be used by domains. GET_THIS_PADDR(r2, cpu_kr);; add r2=IA64_KR_CURRENT_STACK_OFFSET,r2 ;; ld8 r16=[r2]#else mov r16=IA64_KR(CURRENT_STACK)#endif ;; shl r16=r16,IA64_GRANULE_SHIFT movl r19=PAGE_OFFSET ;; add r18=r19,r16 movl r20=PAGE_KERNEL ;; add r16=r20,r16 mov r19=IA64_GRANULE_SHIFT<<2 ;; mov cr.itir=r19 mov cr.ifa=r18 mov r20=IA64_TR_CURRENT_STACK ;; itr.d dtr[r20]=r16 ;; srlz.d ;;#ifdef XEN // if !VMX_DOMAIN(current) // pin down shared_info and mapped_regs // else // pin down VPD GET_THIS_PADDR(r2,cpu_kr);; add r2=IA64_KR_CURRENT_OFFSET,r2 ;; ld8 r2=[r2] ;; dep r2=0,r2,60,4 ;; add r2=IA64_VCPU_FLAGS_OFFSET,r2 ;; ld8 r2=[r2] ;; cmp.eq p6,p7 = r2,r0(p7) br.cond.sptk .vmx_domain // 5. shared_info GET_THIS_PADDR(r2, inserted_shared_info);; ld8 r16=[r2] mov r18=XSI_SHIFT<<2 movl r20=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW) ;; GET_THIS_PADDR(r2, domain_shared_info);; ld8 r17=[r2] ;; dep r17=0,r17,60,4 ;; or r17=r17,r20 // construct PA | page properties mov cr.itir=r18 mov cr.ifa=r16 ;; mov r16=IA64_TR_SHARED_INFO ;; itr.d dtr[r16]=r17 // wire in new mapping... ;; srlz.d ;; // 6. mapped_regs GET_THIS_PADDR(r2, inserted_mapped_regs);; ld8 r16=[r2] mov r18=XMAPPEDREGS_SHIFT<<2 ;; GET_THIS_PADDR(r2,cpu_kr);; add r2=IA64_KR_CURRENT_OFFSET,r2 ;; ld8 r2=[r2] ;; dep r2=0,r2,60,4 ;; add r2=IA64_VPD_BASE_OFFSET,r2 ;; ld8 r17=[r2] ;; dep r17=0,r17,60,4 ;; or r17=r17,r20 // construct PA | page properties mov cr.itir=r18 mov cr.ifa=r16 ;; mov r16=IA64_TR_MAPPED_REGS ;; itr.d dtr[r16]=r17 // wire in new mapping... ;; srlz.d ;; br.sptk.many .reload_vpd_not_mapped;;.vmx_domain: // 7. VPD GET_THIS_PADDR(r2, inserted_vpd);; ld8 r16=[r2] mov r18=IA64_GRANULE_SHIFT<<2 ;; cmp.eq p7,p0=r16,r0 ;;(p7) br.cond.sptk .reload_vpd_not_mapped dep r17=0,r16,60,4 ;; dep r17=0,r17,0,IA64_GRANULE_SHIFT ;; movl r20=PAGE_KERNEL ;; or r17=r20,r17 // construct PA | page properties ;; mov cr.itir=r18 mov cr.ifa=r16 ;; mov r16=IA64_TR_VPD mov r18=IA64_TR_MAPPED_REGS ;; itr.i itr[r16]=r17 ;; itr.d dtr[r18]=r17 ;; srlz.i ;; srlz.d ;;.reload_vpd_not_mapped: // 8. VHPT GET_THIS_PADDR(r2, inserted_vhpt);; ld8 r2=[r2] ;; cmp.eq p7,p0=r2,r0 ;;(p7) br.cond.sptk .overlap_vhpt // vhpt isn't mapped. // avoid overlapping with stack TR dep r16=0,r2,0,IA64_GRANULE_SHIFT ;; GET_THIS_PADDR(r2,cpu_kr);; add r2=IA64_KR_CURRENT_OFFSET,r2 ;; ld8 r2=[r2] ;; dep r17=0,r2,0,IA64_GRANULE_SHIFT ;; cmp.eq p7,p0=r16,r17(p7) br.cond.sptk .overlap_vhpt movl r20=PAGE_KERNEL ;; mov r18=IA64_TR_VHPT dep r17=0,r16,60,4 // physical address of // va_vhpt & ~(IA64_GRANULE_SIZE - 1) mov r19=IA64_GRANULE_SHIFT<<2 ;; or r17=r17,r20 // construct PA | page properties mov cr.itir=r19 mov cr.ifa=r16 ;; itr.d dtr[r18]=r17 // wire in new mapping... ;; srlz.d ;;.overlap_vhpt:#endif br.sptk.many done_tlb_purge_and_reloaderr: COLD_BOOT_HANDOFF_STATE(r20,r21,r22) br.sptk.many ia64_os_mca_done_restoredone_tlb_purge_and_reload: // Setup new stack frame for OS_MCA handling GET_IA64_MCA_DATA(r2) ;; add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2 ;; rse_switch_context(r6,r3,r2);; // RSC management in this new context
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -