📄 realmode.c
字号:
/****************************************************************************** * arch/x86/hvm/vmx/realmode.c * * Real-mode emulation for VMX. * * Copyright (c) 2007 Citrix Systems, Inc. * * Authors: * Keir Fraser <keir.fraser@citrix.com> */#include <xen/config.h>#include <xen/init.h>#include <xen/lib.h>#include <xen/sched.h>#include <xen/paging.h>#include <asm/event.h>#include <asm/hvm/hvm.h>#include <asm/hvm/support.h>#include <asm/hvm/vmx/vmx.h>#include <asm/hvm/vmx/vmcs.h>#include <asm/x86_emulate.h>struct realmode_emulate_ctxt { struct x86_emulate_ctxt ctxt; /* Cache of 16 bytes of instruction. */ uint8_t insn_buf[16]; unsigned long insn_buf_eip; struct segment_register seg_reg[10]; union { struct { unsigned int hlt:1; unsigned int mov_ss:1; unsigned int sti:1; } flags; unsigned int flag_word; }; uint8_t exn_vector; uint8_t exn_insn_len; uint32_t intr_shadow;};static void realmode_deliver_exception( unsigned int vector, unsigned int insn_len, struct realmode_emulate_ctxt *rm_ctxt){ struct segment_register *idtr = &rm_ctxt->seg_reg[x86_seg_idtr]; struct segment_register *csr = &rm_ctxt->seg_reg[x86_seg_cs]; struct cpu_user_regs *regs = rm_ctxt->ctxt.regs; uint32_t cs_eip, pstk; uint16_t frame[3]; unsigned int last_byte; again: last_byte = (vector * 4) + 3; if ( idtr->limit < last_byte ) { /* Software interrupt? */ if ( insn_len != 0 ) { insn_len = 0; vector = TRAP_gp_fault; goto again; } /* Exception or hardware interrupt. */ switch ( vector ) { case TRAP_double_fault: hvm_triple_fault(); return; case TRAP_gp_fault: vector = TRAP_double_fault; goto again; default: vector = TRAP_gp_fault; goto again; } } (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4); frame[0] = regs->eip + insn_len; frame[1] = csr->sel; frame[2] = regs->eflags & ~X86_EFLAGS_RF; if ( rm_ctxt->ctxt.addr_size == 32 ) { regs->esp -= 6; pstk = regs->esp; } else { pstk = (uint16_t)(regs->esp - 6); regs->esp &= ~0xffff; regs->esp |= pstk; } pstk += rm_ctxt->seg_reg[x86_seg_ss].base; (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame)); csr->sel = cs_eip >> 16; csr->base = (uint32_t)csr->sel << 4; regs->eip = (uint16_t)cs_eip; regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF); /* Exception delivery clears STI and MOV-SS blocking. */ if ( rm_ctxt->intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) ) { rm_ctxt->intr_shadow &= ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS); __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow); }}static uint32_t virtual_to_linear( enum x86_segment seg, uint32_t offset, struct realmode_emulate_ctxt *rm_ctxt){ uint32_t addr = offset; if ( seg == x86_seg_none ) return addr; ASSERT(is_x86_user_segment(seg)); return addr + rm_ctxt->seg_reg[seg].base;}static intrealmode_read( enum x86_segment seg, unsigned long offset, unsigned long *val, unsigned int bytes, enum hvm_access_type access_type, struct realmode_emulate_ctxt *rm_ctxt){ uint32_t addr = virtual_to_linear(seg, offset, rm_ctxt); *val = 0; if ( hvm_copy_from_guest_virt_nofault(val, addr, bytes) ) { struct vcpu *curr = current; if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) return X86EMUL_UNHANDLEABLE; if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; if ( !curr->arch.hvm_vmx.real_mode_io_completed ) { curr->arch.hvm_vmx.real_mode_io_in_progress = 1; send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes, 0, IOREQ_READ, 0, 0); } if ( !curr->arch.hvm_vmx.real_mode_io_completed ) return X86EMUL_RETRY; *val = curr->arch.hvm_vmx.real_mode_io_data; curr->arch.hvm_vmx.real_mode_io_completed = 0; } return X86EMUL_OKAY;}static intrealmode_emulate_read( enum x86_segment seg, unsigned long offset, unsigned long *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ return realmode_read( seg, offset, val, bytes, hvm_access_read, container_of(ctxt, struct realmode_emulate_ctxt, ctxt));}static intrealmode_emulate_insn_fetch( enum x86_segment seg, unsigned long offset, unsigned long *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); unsigned int insn_off = offset - rm_ctxt->insn_buf_eip; /* Fall back if requested bytes are not in the prefetch cache. */ if ( unlikely((insn_off + bytes) > sizeof(rm_ctxt->insn_buf)) ) return realmode_read( seg, offset, val, bytes, hvm_access_insn_fetch, rm_ctxt); /* Hit the cache. Simple memcpy. */ *val = 0; memcpy(val, &rm_ctxt->insn_buf[insn_off], bytes); return X86EMUL_OKAY;}static intrealmode_emulate_write( enum x86_segment seg, unsigned long offset, unsigned long val, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); uint32_t addr = virtual_to_linear(seg, offset, rm_ctxt); if ( hvm_copy_to_guest_virt_nofault(addr, &val, bytes) ) { struct vcpu *curr = current; if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) return X86EMUL_UNHANDLEABLE; if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; curr->arch.hvm_vmx.real_mode_io_in_progress = 1; send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes, val, IOREQ_WRITE, 0, 0); } return X86EMUL_OKAY;}static int realmode_emulate_cmpxchg( enum x86_segment seg, unsigned long offset, unsigned long old, unsigned long new, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ /* Fix this in case the guest is really relying on r-m-w atomicity. */ return realmode_emulate_write(seg, offset, new, bytes, ctxt);}static int realmode_rep_ins( uint16_t src_port, enum x86_segment dst_seg, unsigned long dst_offset, unsigned int bytes_per_rep, unsigned long *reps, struct x86_emulate_ctxt *ctxt){ struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); struct vcpu *curr = current; uint32_t paddr = virtual_to_linear(dst_seg, dst_offset, rm_ctxt); if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) return X86EMUL_UNHANDLEABLE; if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; if ( !curr->arch.hvm_vmx.real_mode_io_completed ) { curr->arch.hvm_vmx.real_mode_io_in_progress = 1; send_pio_req(src_port, *reps, bytes_per_rep, paddr, IOREQ_READ, !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); } if ( !curr->arch.hvm_vmx.real_mode_io_completed ) return X86EMUL_RETRY; curr->arch.hvm_vmx.real_mode_io_completed = 0; return X86EMUL_OKAY;}static int realmode_rep_outs( enum x86_segment src_seg, unsigned long src_offset, uint16_t dst_port, unsigned int bytes_per_rep, unsigned long *reps, struct x86_emulate_ctxt *ctxt){ struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); struct vcpu *curr = current; uint32_t paddr = virtual_to_linear(src_seg, src_offset, rm_ctxt); if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) return X86EMUL_UNHANDLEABLE; if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; curr->arch.hvm_vmx.real_mode_io_in_progress = 1; send_pio_req(dst_port, *reps, bytes_per_rep, paddr, IOREQ_WRITE, !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); return X86EMUL_OKAY;}static int realmode_rep_movs( enum x86_segment src_seg, unsigned long src_offset, enum x86_segment dst_seg, unsigned long dst_offset, unsigned int bytes_per_rep, unsigned long *reps, struct x86_emulate_ctxt *ctxt){ struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); struct vcpu *curr = current; uint32_t saddr = virtual_to_linear(src_seg, src_offset, rm_ctxt); uint32_t daddr = virtual_to_linear(dst_seg, dst_offset, rm_ctxt); p2m_type_t p2mt; if ( (curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) || curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; mfn_x(gfn_to_mfn_current(saddr >> PAGE_SHIFT, &p2mt)); if ( !p2m_is_ram(p2mt) ) { if ( !curr->arch.hvm_vmx.real_mode_io_completed ) { curr->arch.hvm_vmx.real_mode_io_in_progress = 1; send_mmio_req(IOREQ_TYPE_COPY, saddr, *reps, bytes_per_rep, daddr, IOREQ_READ, !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); } if ( !curr->arch.hvm_vmx.real_mode_io_completed ) return X86EMUL_RETRY; curr->arch.hvm_vmx.real_mode_io_completed = 0; } else { mfn_x(gfn_to_mfn_current(daddr >> PAGE_SHIFT, &p2mt)); if ( p2m_is_ram(p2mt) ) return X86EMUL_UNHANDLEABLE; curr->arch.hvm_vmx.real_mode_io_in_progress = 1; send_mmio_req(IOREQ_TYPE_COPY, daddr, *reps, bytes_per_rep, saddr, IOREQ_WRITE, !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1); } return X86EMUL_OKAY;}static intrealmode_read_segment( enum x86_segment seg, struct segment_register *reg, struct x86_emulate_ctxt *ctxt){ struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); memcpy(reg, &rm_ctxt->seg_reg[seg], sizeof(struct segment_register)); return X86EMUL_OKAY;}static intrealmode_write_segment( enum x86_segment seg, struct segment_register *reg, struct x86_emulate_ctxt *ctxt){ struct realmode_emulate_ctxt *rm_ctxt = container_of(ctxt, struct realmode_emulate_ctxt, ctxt); struct vcpu *curr = current; if ( seg == x86_seg_cs ) { if ( reg->attr.fields.dpl != 0 ) return X86EMUL_UNHANDLEABLE; curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_CS; if ( reg->sel & 3 ) curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_CS; } if ( seg == x86_seg_ss ) { if ( reg->attr.fields.dpl != 0 ) return X86EMUL_UNHANDLEABLE; curr->arch.hvm_vmx.vmxemul &= ~VMXEMUL_BAD_SS; if ( reg->sel & 3 ) curr->arch.hvm_vmx.vmxemul |= VMXEMUL_BAD_SS; rm_ctxt->flags.mov_ss = 1; } memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register)); return X86EMUL_OKAY;}static intrealmode_read_io( unsigned int port, unsigned int bytes, unsigned long *val, struct x86_emulate_ctxt *ctxt){ struct vcpu *curr = current; if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) return X86EMUL_UNHANDLEABLE; if ( !curr->arch.hvm_vmx.real_mode_io_completed ) { curr->arch.hvm_vmx.real_mode_io_in_progress = 1; send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -