📄 optvfault.s
字号:
/* * arch/ia64/vmx/optvfault.S * optimize virtualization fault handler * * Copyright (C) 2006 Intel Co * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> */#include <linux/config.h>#include <asm/config.h>#include <asm/pgtable.h>#include <asm/asmmacro.h>#include <asm/kregs.h>#include <asm/offsets.h>#include <asm/percpu.h>#include <asm/processor.h>#include <asm/vmx_vpd.h>#include <asm/vmx_pal_vsa.h>#include <asm/asm-offsets.h>#include <asm/virt_event.h>#include <asm-ia64/vmx_mm_def.h>#include <asm-ia64/vmx_phy_mode.h>#include "entry.h"// r21 : current// r23 : b0// r31 : pr#define VMX_VIRT_SAVE \ mov r27=ar.rsc; /* M */ \ ;; \ cover; /* B;; (or nothing) */ \ ;; \ /* switch from user to kernel RBS: */ \ invala; /* M */ \ ;; \ mov ar.rsc=0; /* set enforced lazy mode */ \ ;; \ mov.m r26=ar.rnat; \ movl r28=IA64_RBS_OFFSET; /* compute base of RBS */ \ ;; \ mov r22=ar.bspstore; /* save ar.bspstore */ \ add r28=r28,r21; \ ;; \ mov ar.bspstore=r28; /* switch to kernel RBS */ \ ;; \ mov r18=ar.bsp; \ mov ar.rsc=0x3; /* set eager mode */ \ ;; \ alloc r32=ar.pfs,24,0,3,0 /* save pfs */ \ ;; \ sub r18=r18,r28; /* r18=RSE.ndirty*8 */ \ ;; \ shl r33=r18,16; /* save loadrs */ \ mov r35=b6; /* save b6 */ \ mov r36=b7; /* save b7 */ \ mov r37=ar.csd; /* save ar.csd */ \ mov r38=ar.ssd; /* save ar.ssd */ \ mov r39=r8; /* save r8 */ \ mov r40=r9; /* save r9 */ \ mov r41=r10; /* save r10 */ \ mov r42=r11; /* save r11 */ \ mov r43=r27; /* save ar.rsc */ \ mov r44=r26; /* save ar.rnat */ \ mov r45=r22; /* save ar.bspstore */ \ mov r46=r31; /* save pr */ \ mov r47=r23; /* save b0 */ \ mov r48=r1; /* save r1 */ \ mov r49=r12; /* save r12 */ \ mov r50=r13; /* save r13 */ \ mov r51=r15; /* save r15 */ \ mov r52=r14; /* save r14 */ \ mov r53=r2; /* save r2 */ \ mov r54=r3; /* save r3 */ \ mov r34=ar.ccv; /* save ar.ccv */ \ ;; \ movl r1=__gp; \ movl r29=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16; \ ;; \ add r12=r29,r21; /* compute base of memory stack */ \ mov r13=r21; \ ;; \{ .mii; /* call vps sync read */ \ add r25=IA64_VPD_BASE_OFFSET, r21; \ nop 0x0; \ mov r24=ip; \ ;; \}; \{ .mmb; \ add r24 = 0x20, r24; \ ld8 r25=[r25]; /* read vpd base */ \ br.cond.sptk vmx_vps_sync_read; /* call the service */ \ ;; \};ENTRY(ia64_leave_hypervisor_virt) invala /* M */ ;; mov r21=r13 /* get current */ mov b6=r35 /* restore b6 */ mov b7=r36 /* restore b7 */ mov ar.csd=r37 /* restore ar.csd */ mov ar.ssd=r38 /* restore ar.ssd */ mov r8=r39 /* restore r8 */ mov r9=r40 /* restore r9 */ mov r10=r41 /* restore r10 */ mov r11=r42 /* restore r11 */ mov ar.pfs=r32 /* restore ar.pfs */ mov r27=r43 /* restore ar.rsc */ mov r26=r44 /* restore ar.rnat */ mov r25=r45 /* restore ar.bspstore */ mov r23=r46 /* restore predicates */ mov r22=r47 /* restore b0 */ mov r1=r48 /* restore r1 */ mov r12=r49 /* restore r12 */ mov r13=r50 /* restore r13 */ mov r15=r51 /* restore r15 */ mov r14=r52 /* restore r14 */ mov r2=r53 /* restore r2 */ mov r3=r54 /* restore r3 */ mov ar.ccv=r34 /* restore ar.ccv */ mov ar.rsc=r33 /* load ar.rsc to be used for "loadrs" */ ;; alloc r16=ar.pfs,0,0,0,0 /* drop current register frame */ ;; loadrs ;; mov ar.bspstore=r25 ;; mov ar.rnat=r26 ;; mov ar.rsc=r27 adds r18=IA64_VPD_BASE_OFFSET,r21 ;; ld8 r25=[r18] // load vpd mov r17=r0 ;;//vsa_sync_write_start ;; movl r24=ia64_leave_hypervisor_virt_1 // calculate return address br.cond.sptk vmx_vps_sync_write // call the service ;;ia64_leave_hypervisor_virt_1: mov r24=r22 mov r31=r23 br.cond.sptk vmx_resume_to_guestEND(ia64_leave_hypervisor_virt)// Inputs are: r21 (= current), r24 (= cause), r25 (= insn), r31 (=saved pr)#define BACK_TO_SLOW_PATH \{; \ nop.m 0x0; \ mov b0=r23; \ br.many vmx_virtualization_fault_back; \}; \GLOBAL_ENTRY(virtualization_fault_table) BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH{ /* Entry 3 */ cmp.eq p2,p0=r0,r0 mov b0=r23 br.many vmx_asm_mov_from_ar} BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH{ /* Entry 6 */ cmp.eq p2,p0=r0,r0 mov b0=r23 br.many vmx_asm_mov_to_psr} BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH{ /* Entry 10 */ cmp.eq p2,p0=r0,r0 mov b0=r23 br.many vmx_asm_mov_to_rr} BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH{ /* Entry 18 */ cmp.eq p2,p0=r0,r0 mov b0=r23 br.many vmx_asm_mov_from_rr} BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH{ /* Entry 24 */ cmp.eq p2,p0=r0,r0 mov b0=r23 br.many vmx_asm_ssm}{ /* Entry 25 */ cmp.eq p2,p0=r0,r0 mov b0=r23 br.many vmx_asm_rsm} BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH{ /* Entry 31 */ cmp.eq p2,p0=r0,r0 mov b0=r23 br.many vmx_asm_thash} BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH{ /* Entry 37 */ cmp.ne p2,p0=r0,r0 mov b0=r23 br.many vmx_asm_rfi} BACK_TO_SLOW_PATH BACK_TO_SLOW_PATH BACK_TO_SLOW_PATHEND(virtualization_fault_table)ENTRY(vmx_dummy_function) br.sptk.many vmx_dummy_functionEND(vmx_dummy_function)/* * Inputs: * r24 : return address * r25 : vpd * r29 : scratch * */GLOBAL_ENTRY(vmx_vps_sync_read) movl r29 = vmx_dummy_function ;; mov b0=r29 br.sptk.many b0END(vmx_vps_sync_read)/* * Inputs: * r24 : return address * r25 : vpd * r29 : scratch */GLOBAL_ENTRY(vmx_vps_sync_write) movl r29 = vmx_dummy_function ;; mov b0=r29 br.sptk.many b0END(vmx_vps_sync_write)/* * Inputs: * r23 : pr * r24 : guest b0 * r25 : vpd */GLOBAL_ENTRY(vmx_vps_resume_normal) movl r29 = vmx_dummy_function ;; mov b0=r29 mov pr=r23,-2 br.sptk.many b0END(vmx_vps_resume_normal)/* * Inputs: * r23 : pr * r24 : guest b0 * r25 : vpd * r17 : isr */GLOBAL_ENTRY(vmx_vps_resume_handler) movl r29 = vmx_dummy_function ;; ld8 r26=[r25] shr r17=r17,IA64_ISR_IR_BIT ;; dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE mov b0=r29 mov pr=r23,-2 br.sptk.many b0END(vmx_vps_resume_handler)//r13 ->vcpu//call with psr.bn = 0GLOBAL_ENTRY(vmx_asm_bsw0) mov r15=ar.unat ;; adds r14=IA64_VPD_BASE_OFFSET,r13 ;; ld8 r14=[r14] bsw.1 ;; adds r2=IA64_VPD_VB1REG_OFFSET, r14 adds r3=IA64_VPD_VB1REG_OFFSET+8, r14 ;; .mem.offset 0,0; st8.spill [r2]=r16,16 .mem.offset 8,0; st8.spill [r3]=r17,16 ;; .mem.offset 0,0; st8.spill [r2]=r18,16 .mem.offset 8,0; st8.spill [r3]=r19,16 ;; .mem.offset 0,0; st8.spill [r2]=r20,16 .mem.offset 8,0; st8.spill [r3]=r21,16 ;; .mem.offset 0,0; st8.spill [r2]=r22,16 .mem.offset 8,0; st8.spill [r3]=r23,16 ;; .mem.offset 0,0; st8.spill [r2]=r24,16 .mem.offset 8,0; st8.spill [r3]=r25,16 ;; .mem.offset 0,0; st8.spill [r2]=r26,16 .mem.offset 8,0; st8.spill [r3]=r27,16 ;; .mem.offset 0,0; st8.spill [r2]=r28,16 .mem.offset 8,0; st8.spill [r3]=r29,16 ;; .mem.offset 0,0; st8.spill [r2]=r30,16 .mem.offset 8,0; st8.spill [r3]=r31,16 ;; mov r9=ar.unat adds r8=IA64_VPD_VB1NAT_OFFSET, r14 ;; st8 [r8]=r9 adds r8=IA64_VPD_VB0NAT_OFFSET, r14 ;; ld8 r9=[r8] adds r2= IA64_VPD_VB0REG_OFFSET, r14 adds r3= IA64_VPD_VB0REG_OFFSET+8, r14 ;; mov ar.unat=r9 ;; ld8.fill r16=[r2],16 ld8.fill r17=[r3],16 ;; ld8.fill r18=[r2],16 ld8.fill r19=[r3],16 ;; ld8.fill r20=[r2],16 ld8.fill r21=[r3],16 ;; ld8.fill r22=[r2],16 ld8.fill r23=[r3],16 ;; ld8.fill r24=[r2],16 ld8.fill r25=[r3],16 ;; ld8.fill r26=[r2],16 ld8.fill r27=[r3],16 ;; ld8.fill r28=[r2],16 ld8.fill r29=[r3],16 ;; ld8.fill r30=[r2],16 ld8.fill r31=[r3],16 ;; mov ar.unat=r15 ;; bsw.0 ;; br.ret.sptk.many b0END(vmx_asm_bsw0)//r13 ->vcpu//call with psr.bn = 0GLOBAL_ENTRY(vmx_asm_bsw1) mov r15=ar.unat ;; adds r14=IA64_VPD_BASE_OFFSET,r13 ;; ld8 r14=[r14] bsw.1 ;; adds r2=IA64_VPD_VB0REG_OFFSET, r14 adds r3=IA64_VPD_VB0REG_OFFSET+8, r14 ;; .mem.offset 0,0; st8.spill [r2]=r16,16 .mem.offset 8,0; st8.spill [r3]=r17,16
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -