⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 optvfault.s

📁 xen虚拟机源代码安装包
💻 S
📖 第 1 页 / 共 3 页
字号:
    ;;    .mem.offset 0,0; st8.spill [r2]=r18,16    .mem.offset 8,0; st8.spill [r3]=r19,16    ;;    .mem.offset 0,0; st8.spill [r2]=r20,16    .mem.offset 8,0; st8.spill [r3]=r21,16    ;;    .mem.offset 0,0; st8.spill [r2]=r22,16    .mem.offset 8,0; st8.spill [r3]=r23,16    ;;    .mem.offset 0,0; st8.spill [r2]=r24,16    .mem.offset 8,0; st8.spill [r3]=r25,16    ;;    .mem.offset 0,0; st8.spill [r2]=r26,16    .mem.offset 8,0; st8.spill [r3]=r27,16    ;;    .mem.offset 0,0; st8.spill [r2]=r28,16    .mem.offset 8,0; st8.spill [r3]=r29,16    ;;    .mem.offset 0,0; st8.spill [r2]=r30,16    .mem.offset 8,0; st8.spill [r3]=r31,16    ;;    mov r9=ar.unat    adds r8=IA64_VPD_VB0NAT_OFFSET, r14    ;;    st8 [r8]=r9    adds r8=IA64_VPD_VB1NAT_OFFSET, r14    ;;    ld8 r9=[r8]    adds r2=IA64_VPD_VB1REG_OFFSET, r14    adds r3=IA64_VPD_VB1REG_OFFSET+8, r14    ;;    mov ar.unat=r9    ;;    ld8.fill r16=[r2],16    ld8.fill r17=[r3],16    ;;    ld8.fill r18=[r2],16    ld8.fill r19=[r3],16    ;;    ld8.fill r20=[r2],16    ld8.fill r21=[r3],16    ;;    ld8.fill r22=[r2],16    ld8.fill r23=[r3],16    ;;    ld8.fill r24=[r2],16    ld8.fill r25=[r3],16    ;;    ld8.fill r26=[r2],16    ld8.fill r27=[r3],16    ;;    ld8.fill r28=[r2],16    ld8.fill r29=[r3],16    ;;    ld8.fill r30=[r2],16    ld8.fill r31=[r3],16    ;;    mov ar.unat=r15    ;;    bsw.0    ;;    br.ret.sptk.many b0END(vmx_asm_bsw1)// rfiENTRY(vmx_asm_rfi)    adds r18=IA64_VPD_BASE_OFFSET,r21    ;;    ld8 r18=[r18]    ;;    adds r26=IA64_VPD_VIFS_OFFSET,r18    ;;    ld8 r26=[r26]    ;;    tbit.z p6,p0=r26,63    (p6) br.cond.dptk.few vmx_asm_rfi_1    ;;    //if vifs.v=1 desert current register frame    alloc r27=ar.pfs,0,0,0,0    ;;vmx_asm_rfi_1:    adds r26=IA64_VPD_VHPI_OFFSET,r18    ;;    ld8 r26=[r26]    ;;    cmp.ne p6,p0=r26,r0    (p6) br.cond.dpnt.many vmx_virtualization_fault_back    ;;    VMX_VIRT_SAVE    ;;    mov out0=r21    movl r14=ia64_leave_hypervisor_virt    ;;    mov rp=r14    br.call.sptk.many b6=vmx_vcpu_rfi_fastEND(vmx_asm_rfi)//mov r1=ar3 (only itc is virtualized)ENTRY(vmx_asm_mov_from_ar)    add r18=VCPU_VTM_OFFSET_OFS,r21    add r16=VCPU_VTM_LAST_ITC_OFS,r21    extr.u r17=r25,6,7    ;;    ld8 r18=[r18]    mov r19=ar.itc    mov r24=b0    ;;    ld8 r16=[r16]    add r19=r19,r18    movl r20=asm_mov_to_reg    ;;    adds r30=vmx_resume_to_guest-asm_mov_to_reg,r20    shladd r17=r17,4,r20    cmp.gtu p6,p0=r16,r19    ;;    (p6) mov r19=r16    mov b0=r17    br.sptk.few b0    ;;END(vmx_asm_mov_from_ar)// mov r1=rr[r3]ENTRY(vmx_asm_mov_from_rr)    extr.u r16=r25,20,7    extr.u r17=r25,6,7    movl r20=asm_mov_from_reg    ;;    adds r30=vmx_asm_mov_from_rr_back_1-asm_mov_from_reg,r20    shladd r16=r16,4,r20    mov r24=b0    ;;    add r27=VCPU_VRR0_OFS,r21    mov b0=r16    br.many b0    ;;vmx_asm_mov_from_rr_back_1:    adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20    adds r22=asm_mov_to_reg-asm_mov_from_reg,r20    shr.u r26=r19,61    ;;    shladd r17=r17,4,r22    shladd r27=r26,3,r27    ;;    ld8 r19=[r27]    mov b0=r17    br.many b0END(vmx_asm_mov_from_rr)// mov rr[r3]=r2ENTRY(vmx_asm_mov_to_rr)    extr.u r16=r25,20,7         // r3    extr.u r17=r25,13,7         // r2    movl r20=asm_mov_from_reg    ;;    adds r30=vmx_asm_mov_to_rr_back_1-asm_mov_from_reg,r20    shladd r16=r16,4,r20        // get r3    ;;    mov b0=r16    br.many b0    ;;vmx_asm_mov_to_rr_back_1:    adds r30=vmx_asm_mov_to_rr_back_2-asm_mov_from_reg,r20    shr.u r16=r19,61            // get RR #    ;;    //if rr7, go back    cmp.eq p6,p0=7,r16    mov b0=r23// restore b0    (p6) br.cond.dpnt.many vmx_virtualization_fault_back    ;;    mov r16=r19    shladd r17=r17,4,r20        // get r2    ;;    mov b0=r17    br.many b0vmx_asm_mov_to_rr_back_2:    mov r17=r19                 // get value    ;;    // if invalid value , go back    adds r26=IA64_VCPU_RID_BITS_OFFSET,r21    mov r27=r0    ;;    ld1 r27=[r26]    ;;    shr r19=r19,r27    ;;    cmp.ne p6,p0=r19,r0    mov b0=r23// restore b0    (p6) br.cond.dpnt.many vmx_virtualization_fault_back    ;;    VMX_VIRT_SAVE    ;;    mov out0=r21    mov out1=r16    mov out2=r17    movl r14=ia64_leave_hypervisor_virt    ;;    mov rp=r14    br.call.sptk.many b6=vmx_vcpu_set_rr_fastEND(vmx_asm_mov_to_rr)//rsm 25ENTRY(vmx_asm_rsm)    extr.u r26=r25,6,21 // Imm21    extr.u r27=r25,31,2 // I2d    ;;    extr.u r28=r25,36,1 // I    dep r26=r27,r26,21,2    ;;    //r18 is imm24    dep r16=r28,r26,23,1    ;;    VMX_VIRT_SAVE    ;;    mov out0=r21    mov out1=r16    movl r14=ia64_leave_hypervisor_virt    ;;    mov rp=r14    br.call.sptk.many b6=vmx_vcpu_rsm_fastEND(vmx_asm_rsm)//ssm 24ENTRY(vmx_asm_ssm)    adds r18=IA64_VPD_BASE_OFFSET,r21    ;;    ld8 r18=[r18]    ;;    adds r26=IA64_VPD_VHPI_OFFSET,r18    ;;    ld8 r26=[r26]    ;;    cmp.ne p6,p0=r26,r0    (p6) br.cond.dpnt.many vmx_virtualization_fault_back    ;;    extr.u r26=r25,6,21    extr.u r27=r25,31,2    ;;    extr.u r28=r25,36,1    dep r26=r27,r26,21,2    ;;  //r18 is imm24    dep r16=r28,r26,23,1    ;;    VMX_VIRT_SAVE    ;;    mov out0=r21    mov out1=r16    movl r14=ia64_leave_hypervisor_virt    ;;    mov rp=r14    br.call.sptk.many b6=vmx_vcpu_ssm_fastEND(vmx_asm_ssm)//mov psr.l=r2ENTRY(vmx_asm_mov_to_psr)    extr.u r26=r25,13,7 //r2    movl r27=asm_mov_from_reg    ;;    adds r30=vmx_asm_mov_to_psr_back-asm_mov_from_reg,r27    shladd r26=r26,4,r27    ;;    mov b0=r26    br.many b0    ;;vmx_asm_mov_to_psr_back:    adds r18=IA64_VPD_BASE_OFFSET,r21    tbit.nz p6,p0 = r19, IA64_PSR_I_BIT    ;;    ld8 r18=[r18]    ;;    adds r26=IA64_VPD_VHPI_OFFSET,r18    ;;    ld8 r26=[r26]    ;;    // if enable interrupt and vhpi has value, return    cmp.ne.and p6,p0=r26,r0    (p6) br.cond.dpnt.many vmx_virtualization_fault_back    ;;    mov r16=r19    ;;    VMX_VIRT_SAVE    ;;    mov out0=r21    mov out1=r16    movl r14=ia64_leave_hypervisor_virt    ;;    mov rp=r14    br.call.sptk.many b6=vmx_vcpu_mov_to_psr_fastEND(vmx_asm_mov_to_psr)// thash r1=r3// TODO: add support when pta.vf = 1ENTRY(vmx_asm_thash)    extr.u r17=r25,20,7                 // get r3 from opcode in r25    extr.u r18=r25,6,7                  // get r1 from opcode in r25    movl r20=asm_mov_from_reg    ;;    adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20    shladd r17=r17,4,r20                // get addr of MOVE_FROM_REG(r17)    adds r16=IA64_VPD_BASE_OFFSET,r21   // get vcpu.arch.priveregs    mov r24=b0                          // save b0    ;;    ld8 r16=[r16]                       // get VPD addr    mov b0=r17    br.many b0                          // r19 return value    ;;vmx_asm_thash_back1:    shr.u r23=r19,61                    // get RR number    adds r28=VCPU_VRR0_OFS,r21  // get vcpu->arch.arch_vmx.vrr[0]'s addr    adds r16=IA64_VPD_VPTA_OFFSET,r16   // get virtual pta    ;;    shladd r27=r23,3,r28        // get vcpu->arch.arch_vmx.vrr[r23]'s addr    ld8 r17=[r16]               // get virtual PTA    mov r26=1    ;;    extr.u r29=r17,2,6// get pta.size    ld8 r28=[r27]               // get vcpu->arch.arch_vmx.vrr[r23]'s value    ;;    // Fall-back to C if VF (long format) is set    tbit.nz p6,p0=r17,8    mov b0=r24    ;;    (p6) mov r24=EVENT_THASH    (p6) br.cond.dpnt.many vmx_virtualization_fault_back    extr.u r28=r28,2,6      // get rr.ps    shl r22=r26,r29         // 1UL << pta.size    ;;    shr.u r23=r19,r28       // vaddr >> rr.ps    adds r26=3,r29          // pta.size + 3    shl r27=r17,3           // pta << 3    ;;    shl r23=r23,3           // (vaddr >> rr.ps) << 3    shr.u r27=r27,r26       // (pta << 3) >> (pta.size+3)    movl r16=VRN_MASK    ;;    adds r22=-1,r22         // (1UL << pta.size) - 1    shl r27=r27,r29         // ((pta<<3)>>(pta.size+3))<<pta.size    and r19=r19,r16         // vaddr & VRN_MASK    ;;    and r22=r22,r23         // vhpt_offset    or r19=r19,r27          // (vadr&VRN_MASK) |(((pta<<3)>>(pta.size + 3))<<pta.size)    adds r26=asm_mov_to_reg-asm_mov_from_reg,r20    ;;    or r19=r19,r22          // calc pval    shladd r17=r18,4,r26    adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20    ;;    mov b0=r17    br.many b0END(vmx_asm_thash)#define MOV_TO_REG0     \{;                      \    nop.b 0x0;          \    nop.b 0x0;          \    nop.b 0x0;          \    ;;                  \};#define MOV_TO_REG(n)   \{;                      \    mov r##n##=r19;     \    mov b0=r30;         \    br.sptk.many b0;    \    ;;                  \};#define MOV_FROM_REG(n) \{;                      \    mov r19=r##n##;     \    mov b0=r30;         \    br.sptk.many b0;    \    ;;                  \};#define MOV_TO_BANK0_REG(n)                 \ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \{;                                          \    mov r26=r2;                             \    mov r2=r19;                             \    bsw.1;                                  \    ;;                                      \};                                          \

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -