⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vmx_ivt.s

📁 xen 3.2.2 源码
💻 S
📖 第 1 页 / 共 3 页
字号:
/* * arch/ia64/kernel/vmx_ivt.S * * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co *      Stephane Eranian <eranian@hpl.hp.com> *      David Mosberger <davidm@hpl.hp.com> * Copyright (C) 2000, 2002-2003 Intel Co *      Asit Mallick <asit.k.mallick@intel.com> *      Suresh Siddha <suresh.b.siddha@intel.com> *      Kenneth Chen <kenneth.w.chen@intel.com> *      Fenghua Yu <fenghua.yu@intel.com> * * * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. * * 05/3/20 Xuefei Xu  (Anthony Xu) (anthony.xu@intel.com) *              Supporting Intel virtualization architecture * *//* * This file defines the interruption vector table used by the CPU. * It does not include one entry per possible cause of interruption. * * The first 20 entries of the table contain 64 bundles each while the * remaining 48 entries contain only 16 bundles each. * * The 64 bundles are used to allow inlining the whole handler for critical * interruptions like TLB misses. * *  For each entry, the comment is as follows: * *              // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) *  entry offset ----/     /         /                  /          / *  entry number ---------/         /                  /          / *  size of the entry -------------/                  /          / *  vector name -------------------------------------/          / *  interruptions triggering this vector ----------------------/ * * The table is 32KB in size and must be aligned on 32KB boundary. * (The CPU ignores the 15 lower bits of the address) * * Table is based upon EAS2.6 (Oct 1999) */#include <linux/config.h>#include <asm/asmmacro.h>#include <asm/break.h>#include <asm/ia32.h>#include <asm/kregs.h>#include <asm/offsets.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/ptrace.h>#include <asm/system.h>#include <asm/thread_info.h>#include <asm/unistd.h>#include <asm/vhpt.h>#include <asm/virt_event.h>#include <asm/vmx_phy_mode.h>#include <xen/errno.h>#if 1# define PSR_DEFAULT_BITS   psr.ac#else# define PSR_DEFAULT_BITS   0#endif#ifdef VTI_DEBUG  /*   * This lets you track the last eight faults that occurred on the CPU.  Make sure ar.k2 isn't   * needed for something else before enabling this...   */#define VMX_DBG_FAULT(i) \    add r16=IVT_CUR_OFS,r21;    \    add r17=IVT_DBG_OFS,r21;;   \    ld8  r18=[r16];;    \    add r17=r18,r17;   \    mov r19=cr.iip;     \    mov r20=cr.ipsr;    \    mov r22=cr.ifa;     \    mov r23=i;;          \    st8 [r17]=r19,8;   \    add r18=32,r18;;     \    st8 [r17]=r20,8;    \    mov r19=0xfe0;;      \    st8 [r17]=r22,8;   \    and r18=r19,r18;;   \    st8 [r17]=r23;      \    st8 [r16]=r18;;     \//# define VMX_DBG_FAULT(i)   mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16#else# define VMX_DBG_FAULT(i)#endif#include "vmx_minstate.h"#define MINSTATE_VIRT	/* needed by minstate.h */#include "minstate.h"#define VMX_FAULT(n)    \vmx_fault_##n:;          \    mov r19=n;           \    br.sptk.many dispatch_to_fault_handler;         \    ;;                  \#define VMX_REFLECT(n)    \    mov r31=pr;           \    mov r19=n;       /* prepare to save predicates */ \    mov r29=cr.ipsr;      \    ;;      \    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \(p7)br.sptk.many vmx_dispatch_reflection;        \    br.sptk.many dispatch_to_fault_handlerGLOBAL_ENTRY(vmx_panic)    br.sptk.many vmx_panic    ;;END(vmx_panic)    .section .text.ivt,"ax"    .align 32768    // align on 32KB boundary    .global vmx_ia64_ivtvmx_ia64_ivt://///////////////////////////////////////////////////////////////////////////////////////// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)ENTRY(vmx_vhpt_miss)    VMX_DBG_FAULT(0)    VMX_FAULT(0)END(vmx_vhpt_miss)    .org vmx_ia64_ivt+0x400/////////////////////////////////////////////////////////////////////////////////////////// 0x0400 Entry 1 (size 64 bundles) ITLB (21)ENTRY(vmx_itlb_miss)    VMX_DBG_FAULT(1)    mov r29=cr.ipsr    mov r31 = pr    ;;    tbit.z p6,p7=r29,IA64_PSR_VM_BIT(p6) br.sptk vmx_alt_itlb_miss_vmm    mov r16 = cr.ifa    ;;    thash r17 = r16    ttag r20 = r16    ;;    mov r18 = r17    adds r28 = VLE_TITAG_OFFSET,r17    adds r19 = VLE_CCHAIN_OFFSET, r17    ;;    ld8 r17 = [r19]	// Read chain    ;;vmx_itlb_loop:    cmp.eq p6,p0 = r0, r17 // End of chain ?(p6)br vmx_itlb_out    ;;    adds r16 = VLE_TITAG_OFFSET, r17    adds r19 = VLE_CCHAIN_OFFSET, r17    ;;    ld8 r24 = [r16] // Read tag    ld8 r23 = [r19] // Read chain    ;;    lfetch [r23]    cmp.eq  p6,p7 = r20, r24 // does tag match ?    ;;(p7)mov r17 = r23; // No: entry = chain(p7)br.sptk vmx_itlb_loop // again    ;;    // Swap the first entry with the entry found in the collision chain    // to speed up next hardware search (and keep LRU).    // In comments 1 stands for the first entry and 2 for the found entry.    ld8 r25 = [r17] // Read value of 2    ld8 r27 = [r18] // Read value of 1    ld8 r29 = [r28] // Read tag of 1    dep r22 = -1,r24,63,1    // set ti=1 of 2 (to disable it during the swap)    ;;    st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET // Write tag of 2    st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET // Write tag of 1    extr.u r19 = r27, 56, 4 // Extract collision chain length    mf    ;;    ld8 r29 = [r16] // read itir of 2    ld8 r22 = [r28] // read itir of 1    dep r27 = r0, r27, 56, 4 // Clear collision chain length for 2    dep r25 = r19, r25, 56, 4 // Write collision chain length for 1    ;;    st8 [r16] = r22 // Write itir of 2    st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET // write itir of 1    st8 [r18] = r25 // Write value of 1    st8 [r17] = r27 // Write value of 2    ;;    st8.rel [r28] = r24 // Write tag of 1 (with ti=0)    // Insert the translation entry    itc.i r25    dv_serialize_data    // Resume    mov r17=cr.isr    mov r23=r31    mov r22=b0    adds r16=IA64_VPD_BASE_OFFSET,r21    ;;    ld8 r18=[r16]    ;;    adds r19=VPD(VPSR),r18    movl r20=__vsa_base    ;;    ld8 r19=[r19]    ld8 r20=[r20]    ;;    br.sptk ia64_vmm_entry    ;;vmx_itlb_out:    mov r19 = 1    br.sptk vmx_dispatch_tlb_miss    VMX_FAULT(1);END(vmx_itlb_miss)    .org vmx_ia64_ivt+0x0800/////////////////////////////////////////////////////////////////////////////////////////// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)ENTRY(vmx_dtlb_miss)    VMX_DBG_FAULT(2)    mov r29=cr.ipsr	    mov r31 = pr    ;;    tbit.z p6,p7=r29,IA64_PSR_VM_BIT(p6)br.sptk vmx_alt_dtlb_miss_vmm    mov r16 = cr.ifa    ;;    thash r17 = r16    ttag r20 = r16    ;;    mov r18 = r17    adds r28 = VLE_TITAG_OFFSET,r17    adds r19 = VLE_CCHAIN_OFFSET, r17    ;;    ld8 r17 = [r19]    ;;vmx_dtlb_loop:    cmp.eq p6,p0 = r0, r17(p6)br vmx_dtlb_out    ;;    adds r16 = VLE_TITAG_OFFSET, r17    adds r19 = VLE_CCHAIN_OFFSET, r17    ;;    ld8 r24 = [r16]    ld8 r23 = [r19]    ;;    lfetch [r23]    cmp.eq  p6,p7 = r20, r24    ;;(p7)mov r17 = r23;(p7)br.sptk vmx_dtlb_loop    ;;    ld8 r25 = [r17]    ld8 r27 = [r18]    ld8 r29 = [r28]    dep r22 = -1,r24,63,1    //set ti=1    ;;    st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET    st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET    extr.u r19 = r27, 56, 4    mf    ;;    ld8 r29 = [r16]    ld8 r22 = [r28]    dep r27 = r0, r27, 56, 4    dep r25 = r19, r25, 56, 4    ;;    st8 [r16] = r22    st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET    st8 [r18] = r25    st8 [r17] = r27    ;;    st8.rel [r28] = r24     itc.d r25    dv_serialize_data    mov r17=cr.isr    mov r23=r31    mov r22=b0    adds r16=IA64_VPD_BASE_OFFSET,r21    ;;    ld8 r18=[r16]    ;;    adds r19=VPD(VPSR),r18    movl r20=__vsa_base    ;;    ld8 r19=[r19]    ld8 r20=[r20]    ;;    br.sptk ia64_vmm_entry    ;;vmx_dtlb_out:    mov r19 = 2    br.sptk vmx_dispatch_tlb_miss    VMX_FAULT(2);END(vmx_dtlb_miss)    .org vmx_ia64_ivt+0x0c00/////////////////////////////////////////////////////////////////////////////////////////// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)ENTRY(vmx_alt_itlb_miss)    VMX_DBG_FAULT(3)    mov r29=cr.ipsr    mov r31 = pr    adds r22=IA64_VCPU_MMU_MODE_OFFSET, r21    ;;    tbit.nz p7,p0=r29,IA64_PSR_VM_BIT(p7)br.spnt vmx_alt_itlb_miss_domvmx_alt_itlb_miss_vmm:    mov r16=cr.ifa    // get address that caused the TLB miss    movl r17=PAGE_KERNEL    mov r24=cr.ipsr    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)    ;;    and r19=r19,r16     // clear ed, reserved bits, and PTE control bits    extr.u r18=r16,XEN_VIRT_UC_BIT, 15    // extract UC bit    ;;    or r19=r17,r19      // insert PTE control bits into r19    mov r20=IA64_GRANULE_SHIFT<<2    ;;    dep r19=r18,r19,4,1	// set bit 4 (uncached) if the access was to UC region    mov cr.itir=r20    ;;    itc.i r19		// insert the TLB entry    mov pr=r31,-1    rfi    ;;vmx_alt_itlb_miss_dom:    ld1 r23=[r22]  // Load mmu_mode    ;;    cmp.eq p6,p7=VMX_MMU_PHY_D,r23(p7)br.sptk vmx_fault_3    ;;    mov r19=3    br.sptk vmx_dispatch_tlb_miss    VMX_FAULT(3);END(vmx_alt_itlb_miss)    .org vmx_ia64_ivt+0x1000/////////////////////////////////////////////////////////////////////////////////////////// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)ENTRY(vmx_alt_dtlb_miss)    VMX_DBG_FAULT(4)    mov r29=cr.ipsr    mov r31=pr    adds r22=IA64_VCPU_MMU_MODE_OFFSET, r21    ;;    tbit.nz p7,p0=r29,IA64_PSR_VM_BIT(p7)br.spnt vmx_alt_dtlb_miss_domvmx_alt_dtlb_miss_vmm:    mov r16=cr.ifa		// get address that caused the TLB miss    ;;#ifdef CONFIG_VIRTUAL_FRAME_TABLE    // Test for the address of virtual frame_table    shr r22=r16,56;;    cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22(p8)br.cond.sptk frametable_miss ;;#endif    movl r17=PAGE_KERNEL    mov r20=cr.isr    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)    mov r24=cr.ipsr    ;;    and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field    tbit.nz p6,p7=r20,IA64_ISR_SP_BIT		// is speculation bit on?    extr.u r18=r16,XEN_VIRT_UC_BIT, 1		// extract UC bit    and r19=r19,r16				// clear ed, reserved bits, and PTE control bits    tbit.nz p9,p0=r20,IA64_ISR_NA_BIT		// is non-access bit on?    ;;(p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field    dep r24=-1,r24,IA64_PSR_ED_BIT,1    or r19=r19,r17				// insert PTE control bits into r19    mov r20=IA64_GRANULE_SHIFT<<2    ;;    dep r19=r18,r19,4,1	// set bit 4 (uncached) if the access was to UC region(p6)mov cr.ipsr=r24    mov cr.itir=r20    ;;(p7)itc.d r19		// insert the TLB entry    mov pr=r31,-1    rfi    ;;vmx_alt_dtlb_miss_dom:    ld1 r23=[r22]  // Load mmu_mode    ;;    cmp.eq p6,p7=VMX_MMU_PHY_D,r23(p7)br.sptk vmx_fault_4    ;;    mov r19=4    br.sptk vmx_dispatch_tlb_miss    VMX_FAULT(4);END(vmx_alt_dtlb_miss)    .org vmx_ia64_ivt+0x1400/////////////////////////////////////////////////////////////////////////////////////////// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)ENTRY(vmx_nested_dtlb_miss)    VMX_DBG_FAULT(5)    VMX_FAULT(5)END(vmx_nested_dtlb_miss)    .org vmx_ia64_ivt+0x1800/////////////////////////////////////////////////////////////////////////////////////////// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)ENTRY(vmx_ikey_miss)    VMX_DBG_FAULT(6)    VMX_REFLECT(6)END(vmx_ikey_miss)    .org vmx_ia64_ivt+0x1c00/////////////////////////////////////////////////////////////////////////////////////////// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)ENTRY(vmx_dkey_miss)    VMX_DBG_FAULT(7)    VMX_REFLECT(7)END(vmx_dkey_miss)    .org vmx_ia64_ivt+0x2000/////////////////////////////////////////////////////////////////////////////////////////// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)ENTRY(vmx_dirty_bit)    VMX_DBG_FAULT(8)    VMX_REFLECT(8)END(vmx_dirty_bit)    .org vmx_ia64_ivt+0x2400/////////////////////////////////////////////////////////////////////////////////////////// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)ENTRY(vmx_iaccess_bit)    VMX_DBG_FAULT(9)    VMX_REFLECT(9)END(vmx_iaccess_bit)    .org vmx_ia64_ivt+0x2800/////////////////////////////////////////////////////////////////////////////////////////// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)ENTRY(vmx_daccess_bit)    VMX_DBG_FAULT(10)    VMX_REFLECT(10)END(vmx_daccess_bit)    .org vmx_ia64_ivt+0x2c00/////////////////////////////////////////////////////////////////////////////////////////// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)ENTRY(vmx_break_fault)    VMX_DBG_FAULT(11)    mov r31=pr    mov r19=11    mov r17=cr.iim    ;;#ifdef VTI_DEBUG    // break 0 is already handled in vmx_ia64_handle_break.    cmp.eq p6,p7=r17,r0    (p6) br.sptk vmx_fault_11    ;;#endif    mov r29=cr.ipsr    adds r22=IA64_VCPU_BREAKIMM_OFFSET, r21    ;;    ld4 r22=[r22]    extr.u r24=r29,IA64_PSR_CPL0_BIT,2    cmp.ltu p6,p0=NR_hypercalls,r2    ;;    cmp.ne.or p6,p0=r22,r17    cmp.ne.or p6,p0=r0,r24(p6) br.sptk.many vmx_dispatch_break_fault    ;;   /*    * The streamlined system call entry/exit paths only save/restore the initial part    * of pt_regs.  This implies that the callers of system-calls must adhere to the    * normal procedure calling conventions.    *    *   Registers to be saved & restored:    *   CR registers: cr.ipsr, cr.iip, cr.ifs    *   AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr    *   others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15    *   Registers to be restored only:    *   r8-r11: output value from the system call.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -