⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svm.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 5 页
字号:
    .set_tsc_offset       = svm_set_tsc_offset,    .inject_exception     = svm_inject_exception,    .init_hypercall_page  = svm_init_hypercall_page,    .event_pending        = svm_event_pending};int start_svm(struct cpuinfo_x86 *c){    u32 eax, ecx, edx;    u32 phys_hsa_lo, phys_hsa_hi;       u64 phys_hsa;    int cpu = smp_processor_id();     /* Xen does not fill x86_capability words except 0. */    ecx = cpuid_ecx(0x80000001);    boot_cpu_data.x86_capability[5] = ecx;        if ( !(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)) )        return 0;    /* Check whether SVM feature is disabled in BIOS */    rdmsr(MSR_K8_VM_CR, eax, edx);    if ( eax & K8_VMCR_SVME_DISABLE )    {        printk("AMD SVM Extension is disabled in BIOS.\n");        return 0;    }    if ( ((hsa[cpu] = alloc_host_save_area()) == NULL) ||         ((root_vmcb[cpu] = alloc_vmcb()) == NULL) )        return 0;    write_efer(read_efer() | EFER_SVME);    /* Initialize the HSA for this core. */    phys_hsa = (u64) virt_to_maddr(hsa[cpu]);    phys_hsa_lo = (u32) phys_hsa;    phys_hsa_hi = (u32) (phys_hsa >> 32);        wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);    /* Initialize core's ASID handling. */    svm_asid_init(c);    if ( cpu != 0 )        return 1;    setup_vmcb_dump();    svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ?                         cpuid_edx(0x8000000A) : 0);    svm_function_table.hap_supported = cpu_has_svm_npt;    hvm_enable(&svm_function_table);    return 1;}static void svm_do_nested_pgfault(paddr_t gpa, struct cpu_user_regs *regs){    p2m_type_t p2mt;    mfn_t mfn;    unsigned long gfn = gpa >> PAGE_SHIFT;    /* If this GFN is emulated MMIO, pass the fault to the mmio handler */    mfn = gfn_to_mfn_current(gfn, &p2mt);    if ( p2mt == p2m_mmio_dm )    {        handle_mmio(gpa);        return;    }    /* Log-dirty: mark the page dirty and let the guest write it again */    paging_mark_dirty(current->domain, mfn_x(mfn));    p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);}static void svm_do_no_device_fault(struct vmcb_struct *vmcb){    struct vcpu *curr = current;    svm_fpu_enter(curr);    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )        vmcb->cr0 &= ~X86_CR0_TS;}#define bitmaskof(idx)  (1U << ((idx) & 31))static void svm_vmexit_do_cpuid(struct vmcb_struct *vmcb,                                struct cpu_user_regs *regs){    unsigned long input = regs->eax;    unsigned int eax, ebx, ecx, edx;    struct vcpu *v = current;    int inst_len;    hvm_cpuid(input, &eax, &ebx, &ecx, &edx);    switch ( input )    {    case 0x00000001:        /* Mask Intel-only features. */        ecx &= ~(bitmaskof(X86_FEATURE_SSSE3) |                 bitmaskof(X86_FEATURE_SSE4_1) |                 bitmaskof(X86_FEATURE_SSE4_2));        break;    case 0x80000001:        /* Filter features which are shared with 0x00000001:EDX. */        if ( vlapic_hw_disabled(vcpu_vlapic(v)) )            __clear_bit(X86_FEATURE_APIC & 31, &edx);#if CONFIG_PAGING_LEVELS >= 3        if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )#endif            __clear_bit(X86_FEATURE_PAE & 31, &edx);        __clear_bit(X86_FEATURE_PSE36 & 31, &edx);        /* Filter all other features according to a whitelist. */        ecx &= (bitmaskof(X86_FEATURE_LAHF_LM) |                bitmaskof(X86_FEATURE_ALTMOVCR) |                bitmaskof(X86_FEATURE_ABM) |                bitmaskof(X86_FEATURE_SSE4A) |                bitmaskof(X86_FEATURE_MISALIGNSSE) |                bitmaskof(X86_FEATURE_3DNOWPF));        edx &= (0x0183f3ff | /* features shared with 0x00000001:EDX */                bitmaskof(X86_FEATURE_NX) |                bitmaskof(X86_FEATURE_LM) |                bitmaskof(X86_FEATURE_SYSCALL) |                bitmaskof(X86_FEATURE_MP) |                bitmaskof(X86_FEATURE_MMXEXT) |                bitmaskof(X86_FEATURE_FFXSR));        break;    case 0x80000007:    case 0x8000000A:        /* Mask out features of power management and SVM extension. */        eax = ebx = ecx = edx = 0;        break;    case 0x80000008:        /* Make sure Number of CPU core is 1 when HTT=0 */        ecx &= 0xFFFFFF00;        break;    }    regs->eax = eax;    regs->ebx = ebx;    regs->ecx = ecx;    regs->edx = edx;    HVMTRACE_3D(CPUID, v, input,                ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);    inst_len = __get_instruction_length(v, INSTR_CPUID, NULL);    __update_guest_eip(regs, inst_len);}static unsigned long *get_reg_p(    unsigned int gpreg,     struct cpu_user_regs *regs, struct vmcb_struct *vmcb){    unsigned long *reg_p = NULL;    switch (gpreg)    {    case SVM_REG_EAX:        reg_p = (unsigned long *)&regs->eax;        break;    case SVM_REG_EBX:        reg_p = (unsigned long *)&regs->ebx;        break;    case SVM_REG_ECX:        reg_p = (unsigned long *)&regs->ecx;        break;    case SVM_REG_EDX:        reg_p = (unsigned long *)&regs->edx;        break;    case SVM_REG_EDI:        reg_p = (unsigned long *)&regs->edi;        break;    case SVM_REG_ESI:        reg_p = (unsigned long *)&regs->esi;        break;    case SVM_REG_EBP:        reg_p = (unsigned long *)&regs->ebp;        break;    case SVM_REG_ESP:        reg_p = (unsigned long *)&regs->esp;        break;#ifdef __x86_64__    case SVM_REG_R8:        reg_p = (unsigned long *)&regs->r8;        break;    case SVM_REG_R9:        reg_p = (unsigned long *)&regs->r9;        break;    case SVM_REG_R10:        reg_p = (unsigned long *)&regs->r10;        break;    case SVM_REG_R11:        reg_p = (unsigned long *)&regs->r11;        break;    case SVM_REG_R12:        reg_p = (unsigned long *)&regs->r12;        break;    case SVM_REG_R13:        reg_p = (unsigned long *)&regs->r13;        break;    case SVM_REG_R14:        reg_p = (unsigned long *)&regs->r14;        break;    case SVM_REG_R15:        reg_p = (unsigned long *)&regs->r15;        break;#endif    default:        BUG();    }         return reg_p;}static unsigned long get_reg(    unsigned int gpreg, struct cpu_user_regs *regs, struct vmcb_struct *vmcb){    unsigned long *gp;    gp = get_reg_p(gpreg, regs, vmcb);    return *gp;}static void set_reg(    unsigned int gpreg, unsigned long value,     struct cpu_user_regs *regs, struct vmcb_struct *vmcb){    unsigned long *gp;    gp = get_reg_p(gpreg, regs, vmcb);    *gp = value;}                           static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs){    HVMTRACE_0D(DR_WRITE, v);    __restore_debug_registers(v);}static int svm_get_prefix_info(struct vcpu *v, unsigned int dir,                                 svm_segment_register_t **seg,                                 unsigned int *asize,                                unsigned int isize){    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;    unsigned char inst[MAX_INST_LEN];    int i;    memset(inst, 0, MAX_INST_LEN);        switch ( hvm_fetch_from_guest_virt(inst, svm_rip2pointer(v), isize) )    {        case HVMCOPY_okay:            break;        case HVMCOPY_bad_gva_to_gfn:            /* OK just to give up; we'll have injected #PF already */            return 0;        case HVMCOPY_bad_gfn_to_mfn:            gdprintk(XENLOG_ERR, "Bad prefix fetch at %#lx (%#lx)\n",                     (unsigned long) guest_cpu_user_regs()->eip,                     svm_rip2pointer(v));            domain_crash(v->domain);            return 0;    }    for (i = 0; i < MAX_INST_LEN; i++)    {        switch (inst[i])        {        case 0xf3: /* REPZ */        case 0xf2: /* REPNZ */        case 0xf0: /* LOCK */        case 0x66: /* data32 */#ifdef __x86_64__            /* REX prefixes */        case 0x40:        case 0x41:        case 0x42:        case 0x43:        case 0x44:        case 0x45:        case 0x46:        case 0x47:        case 0x48:        case 0x49:        case 0x4a:        case 0x4b:        case 0x4c:        case 0x4d:        case 0x4e:        case 0x4f:#endif            continue;        case 0x67: /* addr32 */            *asize ^= 48;        /* Switch 16/32 bits */            continue;        case 0x2e: /* CS */            *seg = &vmcb->cs;            continue;        case 0x36: /* SS */            *seg = &vmcb->ss;            continue;        case 0x26: /* ES */            *seg = &vmcb->es;            continue;        case 0x64: /* FS */            svm_sync_vmcb(v);            *seg = &vmcb->fs;            continue;        case 0x65: /* GS */            svm_sync_vmcb(v);            *seg = &vmcb->gs;            continue;        case 0x3e: /* DS */            *seg = &vmcb->ds;            continue;        default:            break;        }        break;    }    return 1;}/* Get the address of INS/OUTS instruction */static int svm_get_io_address(    struct vcpu *v, struct cpu_user_regs *regs,    unsigned int size, ioio_info_t info,    unsigned long *count, unsigned long *addr){    unsigned long        reg;    unsigned int         asize, isize;    int                  long_mode = 0;    svm_segment_register_t *seg = NULL;    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;    /* If we're in long mode, don't check the segment presence & limit */    long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);    /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit.      * l field combined with EFER_LMA says whether it's 16 or 64 bit.      */    asize = (long_mode)?64:((vmcb->cs.attr.fields.db)?32:16);    /* The ins/outs instructions are single byte, so if we have got more      * than one byte (+ maybe rep-prefix), we have some prefix so we need      * to figure out what it is...     */    isize = vmcb->exitinfo2 - regs->eip;    if ( isize > ((info.fields.rep) ? 2 : 1) )         if ( !svm_get_prefix_info(v, info.fields.type, &seg, &asize, isize) )            return 0;    if (info.fields.type == IOREQ_WRITE)    {        reg = regs->esi;        if (!seg)               /* If no prefix, used DS. */            seg = &vmcb->ds;        if (!long_mode && (seg->attr.fields.type & 0xa) == 0x8) {            svm_inject_exception(TRAP_gp_fault, 0, 0);            return 0;        }    }    else    {        reg = regs->edi;        seg = &vmcb->es;        /* Note: This is ALWAYS ES. */        if (!long_mode && (seg->attr.fields.type & 0xa) != 0x2) {            svm_inject_exception(TRAP_gp_fault, 0, 0);            return 0;        }    }    /* If the segment isn't present, give GP fault! */    if (!long_mode && !seg->attr.fields.p)     {        svm_inject_exception(TRAP_gp_fault, 0, 0);        return 0;    }    if (asize == 16)     {        *addr = (reg & 0xFFFF);        *count = regs->ecx & 0xffff;    }    else    {        *addr = reg;        *count = regs->ecx;    }    if (!info.fields.rep)        *count = 1;    if (!long_mode)    {        ASSERT(*addr == (u32)*addr);        if ((u32)(*addr + size - 1) < (u32)*addr ||            (seg->attr.fields.type & 0xc) != 0x4 ?            *addr + size - 1 > seg->limit :            *addr <= seg->limit)        {            svm_inject_exception(TRAP_gp_fault, 0, 0);            return 0;        }        /* Check the limit for repeated instructions, as above we checked only           the first instance. Truncate the count if a limit violation would           occur. Note that the checking is not necessary for page granular           segments as transfers crossing page boundaries will be broken up           anyway. */        if (!seg->attr.fields.g && *count > 1)        {            if ((seg->attr.fields.type & 0xc) != 0x4)            {                /* expand-up */                if (!(regs->eflags & EF_DF))                {                    if (*addr + *count * size - 1 < *addr ||                        *addr + *count * size - 1 > seg->limit)                        *count = (seg->limit + 1UL - *addr) / size;                }                else                {                    if (*count - 1 > *addr / size)                        *count = *addr / size + 1;                }            }            else            {                /* expand-down */                if (!(regs->eflags & EF_DF))                {                    if (*count - 1 > -(s32)*addr / size)                        *count = -(s32)*addr / size + 1UL;                }                else                {                    if (*addr < (*count - 1) * size ||

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -