📄 vmx_entry.s
字号:
/* -*- Mode:ASM; c-basic-offset:8; tab-width:8; indent-tabs-mode:t -*- *//* * vmx_entry.S: * Copyright (c) 2005, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) * Kun Tian (Kevin Tian) (kevin.tian@intel.com) */#include <linux/config.h>#include <asm/asmmacro.h>#include <asm/offsets.h>#include "vmx_minstate.h"GLOBAL_ENTRY(ia64_leave_nested) rsm psr.i ;; adds r21=PT(PR)+16,r12 ;; lfetch [r21],PT(CR_IPSR)-PT(PR) adds r2=PT(B6)+16,r12 adds r3=PT(R16)+16,r12 ;; lfetch [r21] ld8 r28=[r2],8 // load b6 adds r29=PT(R24)+16,r12 ld8.fill r16=[r3] adds r3=PT(AR_CSD)-PT(R16),r3 adds r30=PT(AR_CCV)+16,r12 ;; ld8.fill r24=[r29] ld8 r15=[r30] // load ar.ccv ;; ld8 r29=[r2],16 // load b7 ld8 r30=[r3],16 // load ar.csd ;; ld8 r31=[r2],16 // load ar.ssd ld8.fill r8=[r3],16 ;; ld8.fill r9=[r2],16 ld8.fill r10=[r3],PT(R17)-PT(R10) ;; ld8.fill r11=[r2],PT(R18)-PT(R11) ld8.fill r17=[r3],16 ;; ld8.fill r18=[r2],16 ld8.fill r19=[r3],16 ;; ld8.fill r20=[r2],16 ld8.fill r21=[r3],16 mov ar.csd=r30 mov ar.ssd=r31 ;; rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection invala // invalidate ALAT ;; ld8.fill r22=[r2],24 ld8.fill r23=[r3],24 mov b6=r28 ;; ld8.fill r25=[r2],16 ld8.fill r26=[r3],16 mov b7=r29 ;; ld8.fill r27=[r2],16 ld8.fill r28=[r3],16 ;; ld8.fill r29=[r2],16 ld8.fill r30=[r3],24 ;; ld8.fill r31=[r2],PT(F9)-PT(R31) adds r3=PT(F10)-PT(F6),r3 ;; ldf.fill f9=[r2],PT(F6)-PT(F9) ldf.fill f10=[r3],PT(F8)-PT(F10) ;; ldf.fill f6=[r2],PT(F7)-PT(F6) ;; ldf.fill f7=[r2],PT(F11)-PT(F7) ldf.fill f8=[r3],32 ;; srlz.i // ensure interruption collection is off mov ar.ccv=r15 ;; bsw.0 // switch back to bank 0 (no stop bit required beforehand...) ;; ldf.fill f11=[r2] adds r16=PT(CR_IPSR)+16,r12 adds r17=PT(CR_IIP)+16,r12 ;; ld8 r29=[r16],16 // load cr.ipsr ld8 r28=[r17],16 // load cr.iip ;; ld8 r30=[r16],16 // load cr.ifs ld8 r25=[r17],16 // load ar.unat ;;#ifndef XEN ld8 r26=[r16],16 // load ar.pfs ld8 r27=[r17],16 // load ar.rsc cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs ;; ld8 r24=[r16],16 // load ar.rnat (may be garbage) ld8 r23=[r17],16 // load ar.bspstore (may be garbage) ;; ld8 r31=[r16],16 // load predicates#else ld8 r26=[r16],32 // load ar.pfs ld8 r27=[r17],32 // load ar.rsc ;; ld8 r31=[r16],32 // load predicates#endif ld8 r22=[r17],16 // load b0 ;;#ifndef XEN ld8 r19=[r16],16 // load ar.rsc value for "loadrs"#endif ld8.fill r1=[r17],16 // load r1 ;; ld8.fill r12=[r16],16 ld8.fill r13=[r17],16 ;; ld8 r20=[r16],16 // ar.fpsr ld8.fill r15=[r17],16 ;; ld8.fill r14=[r16],16 ld8.fill r2=[r17] ;; ld8.fill r3=[r16]#ifndef XEN ;; mov r16=ar.bsp // get existing backing store pointer ;;#endif mov b0=r22 mov ar.pfs=r26 mov cr.ifs=r30 mov cr.ipsr=r29 mov ar.fpsr=r20 mov cr.iip=r28 ;; mov ar.rsc=r27 mov ar.unat=r25 mov pr=r31,-1 rfiEND(ia64_leave_nested)GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) PT_REGS_UNWIND_INFO(0) /* * work.need_resched etc. mustn't get changed by this CPU before it returns to ;; * user- or fsys-mode, hence we disable interrupts early on: */ adds r2 = PT(R4)+16,r12 adds r3 = PT(R5)+16,r12 adds r8 = PT(EML_UNAT)+16,r12 ;; ld8 r8 = [r8] ;; mov ar.unat=r8 ;; ld8.fill r4=[r2],16 //load r4 ld8.fill r5=[r3],16 //load r5 ;; ld8.fill r6=[r2] //load r6 ld8.fill r7=[r3] //load r7 ;;END(ia64_leave_hypervisor_prepare)//fall throughGLOBAL_ENTRY(ia64_leave_hypervisor) PT_REGS_UNWIND_INFO(0) rsm psr.i ;; br.call.sptk.many b0=leave_hypervisor_tail ;; adds r20=PT(PR)+16,r12 adds r8=PT(EML_UNAT)+16,r12 ;; ld8 r8=[r8] ;; mov ar.unat=r8 ;; lfetch [r20],PT(CR_IPSR)-PT(PR) adds r2 = PT(B6)+16,r12 adds r3 = PT(B7)+16,r12 ;; lfetch [r20] ;; ld8 r24=[r2],16 /* B6 */ ld8 r25=[r3],16 /* B7 */ ;; ld8 r26=[r2],16 /* ar_csd */ ld8 r27=[r3],16 /* ar_ssd */ mov b6 = r24 ;; ld8.fill r8=[r2],16 ld8.fill r9=[r3],16 mov b7 = r25 ;; mov ar.csd = r26 mov ar.ssd = r27 ;; ld8.fill r10=[r2],PT(R15)-PT(R10) ld8.fill r11=[r3],PT(R14)-PT(R11) ;; ld8.fill r15=[r2],PT(R16)-PT(R15) ld8.fill r14=[r3],PT(R17)-PT(R14) ;; ld8.fill r16=[r2],16 ld8.fill r17=[r3],16 ;; ld8.fill r18=[r2],16 ld8.fill r19=[r3],16 ;; ld8.fill r20=[r2],16 ld8.fill r21=[r3],16 ;; ld8.fill r22=[r2],16 ld8.fill r23=[r3],16 ;; ld8.fill r24=[r2],16 ld8.fill r25=[r3],16 ;; ld8.fill r26=[r2],16 ld8.fill r27=[r3],16 ;; ld8.fill r28=[r2],16 ld8.fill r29=[r3],16 ;; ld8.fill r30=[r2],PT(F6)-PT(R30) ld8.fill r31=[r3],PT(F7)-PT(R31) ;; rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection invala // invalidate ALAT ;; ldf.fill f6=[r2],32 ldf.fill f7=[r3],32 ;; ldf.fill f8=[r2],32 ldf.fill f9=[r3],32 ;; ldf.fill f10=[r2],32 ldf.fill f11=[r3],24 ;; srlz.i // ensure interruption collection is off ;; bsw.0 ;; adds r16 = PT(CR_IPSR)+16,r12 adds r17 = PT(CR_IIP)+16,r12 mov r21=r13 // get current ;; ld8 r31=[r16],16 // load cr.ipsr ld8 r30=[r17],16 // load cr.iip ;; ld8 r29=[r16],16 // load cr.ifs ld8 r28=[r17],16 // load ar.unat ;; ld8 r27=[r16],16 // load ar.pfs ld8 r26=[r17],16 // load ar.rsc ;; ld8 r25=[r16],16 // load ar.rnat ld8 r24=[r17],16 // load ar.bspstore ;; ld8 r23=[r16],16 // load predicates ld8 r22=[r17],16 // load b0 ;; ld8 r20=[r16],16 // load ar.rsc value for "loadrs" ld8.fill r1=[r17],16 //load r1 ;; ld8.fill r12=[r16],16 //load r12 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 ;; ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 ;; ld8.fill r3=[r16] //load r3 ld8 r18=[r17] //load ar_ccv ;; mov ar.fpsr=r19 mov ar.ccv=r18 shr.u r18=r20,16 ;;vmx_rbs_switch: movl r19= THIS_CPU(ia64_phys_stacked_size_p8) ;; ld4 r19=[r19] vmx_dont_preserve_current_frame:/* * To prevent leaking bits between the hypervisor and guest domain, * we must clear the stacked registers in the "invalid" partition here. * 5 registers/cycle on McKinley). */# define pRecurse p6# define pReturn p7# define Nregs 14 alloc loc0=ar.pfs,2,Nregs-2,2,0 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize ;; mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" shladd in0=loc1,3,r19 mov in1=0 ;; TEXT_ALIGN(32)vmx_rse_clear_invalid: alloc loc0=ar.pfs,2,Nregs-2,2,0 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse add out0=-Nregs*8,in0 add out1=1,in1 // increment recursion count mov loc1=0 mov loc2=0 ;; mov loc3=0 mov loc4=0 mov loc5=0 mov loc6=0 mov loc7=0(pRecurse) br.call.dptk.few b0=vmx_rse_clear_invalid ;; mov loc8=0 mov loc9=0 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret mov loc10=0 mov loc11=0(pReturn) br.ret.dptk.many b0# undef pRecurse# undef pReturn// loadrs has already been shifted alloc r16=ar.pfs,0,0,0,0 // drop current register frame ;; loadrs ;; mov ar.bspstore=r24 ;; mov ar.unat=r28 mov ar.rnat=r25 mov ar.rsc=r26 ;; mov cr.ipsr=r31 mov cr.iip=r30(pNonSys) mov cr.ifs=r29 mov ar.pfs=r27 adds r18=IA64_VPD_BASE_OFFSET,r21 ;; ld8 r18=[r18] //vpd adds r17=IA64_VCPU_ISR_OFFSET,r21 ;; ld8 r17=[r17] adds r19=VPD(VPSR),r18 ;; ld8 r19=[r19] //vpsr ;;//vsa_sync_write_start movl r24=ia64_vmm_entry // calculate return address mov r25=r18 br.sptk.many vmx_vps_sync_write // call the service ;;END(ia64_leave_hypervisor)// fall through
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -