⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mtrr.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 2 页
字号:
    return effective;}uint32_t get_pat_flags(struct vcpu *v,                       uint32_t gl1e_flags,                       paddr_t gpaddr,                       paddr_t spaddr){    uint8_t guest_eff_mm_type;    uint8_t shadow_mtrr_type;    uint8_t pat_entry_value;    uint64_t pat = v->arch.hvm_vcpu.pat_cr;    struct mtrr_state *g = &v->arch.hvm_vcpu.mtrr;    /* 1. Get the effective memory type of guest physical address,     * with the pair of guest MTRR and PAT     */    guest_eff_mm_type = effective_mm_type(g, pat, gpaddr, gl1e_flags);    /* 2. Get the memory type of host physical address, with MTRR */    shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr);    /* 3. Find the memory type in PAT, with host MTRR memory type     * and guest effective memory type.     */    pat_entry_value = mtrr_epat_tbl[shadow_mtrr_type][guest_eff_mm_type];    /* If conflit occurs(e.g host MTRR is UC, guest memory type is     * WB),set UC as effective memory. Here, returning PAT_TYPE_UNCACHABLE will     * always set effective memory as UC.     */    if ( pat_entry_value == INVALID_MEM_TYPE )    {        gdprintk(XENLOG_WARNING,                 "Conflict occurs for a given guest l1e flags:%x "                 "at %"PRIx64" (the effective mm type:%d), "                 "because the host mtrr type is:%d\n",                 gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type,                 shadow_mtrr_type);        pat_entry_value = PAT_TYPE_UNCACHABLE;    }    /* 4. Get the pte flags */    return pat_type_2_pte_flags(pat_entry_value);}/* Helper funtions for seting mtrr/pat */bool_t pat_msr_set(uint64_t *pat, uint64_t msr_content){    uint8_t *value = (uint8_t*)&msr_content;    int32_t i;    if ( *pat != msr_content )    {        for ( i = 0; i < 8; i++ )            if ( unlikely(!(value[i] == 0 || value[i] == 1 ||                            value[i] == 4 || value[i] == 5 ||                            value[i] == 6 || value[i] == 7)) )                return 0;        *pat = msr_content;    }    return 1;}bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content){    uint8_t def_type = msr_content & 0xff;    uint8_t enabled = (msr_content >> 10) & 0x3;    if ( unlikely(!(def_type == 0 || def_type == 1 || def_type == 4 ||                    def_type == 5 || def_type == 6)) )    {         HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid MTRR def type:%x\n", def_type);         return 0;    }    if ( unlikely(msr_content && (msr_content & ~0xcffUL)) )    {         HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",                     msr_content);         return 0;    }    m->enabled = enabled;    m->def_type = def_type;    return 1;}bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row,                              uint64_t msr_content){    uint64_t *fixed_range_base = (uint64_t *)m->fixed_ranges;    if ( fixed_range_base[row] != msr_content )    {        uint8_t *range = (uint8_t*)&msr_content;        int32_t i, type;        for ( i = 0; i < 8; i++ )        {            type = range[i];            if ( unlikely(!(type == 0 || type == 1 ||                            type == 4 || type == 5 || type == 6)) )                return 0;        }        fixed_range_base[row] = msr_content;    }    return 1;}bool_t mtrr_var_range_msr_set(struct mtrr_state *m, uint32_t msr,                              uint64_t msr_content){    uint32_t index;    uint64_t msr_mask;    uint64_t *var_range_base = (uint64_t*)m->var_ranges;    index = msr - MSR_IA32_MTRR_PHYSBASE0;    if ( var_range_base[index] != msr_content )    {        uint32_t type = msr_content & 0xff;        msr_mask = (index & 1) ? phys_mask_msr_mask : phys_base_msr_mask;        if ( unlikely(!(type == 0 || type == 1 ||                        type == 4 || type == 5 || type == 6)) )            return 0;        if ( unlikely(msr_content && (msr_content & msr_mask)) )        {            HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",                        msr_content);            return 0;        }        var_range_base[index] = msr_content;    }    m->overlapped = is_var_mtrr_overlapped(m);    return 1;}bool_t mtrr_pat_not_equal(struct vcpu *vd, struct vcpu *vs){    struct mtrr_state *md = &vd->arch.hvm_vcpu.mtrr;    struct mtrr_state *ms = &vs->arch.hvm_vcpu.mtrr;    int32_t res;    uint8_t num_var_ranges = (uint8_t)md->mtrr_cap;    /* Test fixed ranges. */    res = memcmp(md->fixed_ranges, ms->fixed_ranges,            NUM_FIXED_RANGES*sizeof(mtrr_type));    if ( res )        return 1;    /* Test var ranges. */    res = memcmp(md->var_ranges, ms->var_ranges,            num_var_ranges*sizeof(struct mtrr_var_range));    if ( res )        return 1;    /* Test default type MSR. */    if ( (md->def_type != ms->def_type)            && (md->enabled != ms->enabled) )        return 1;    /* Test PAT. */    if ( vd->arch.hvm_vcpu.pat_cr != vs->arch.hvm_vcpu.pat_cr )        return 1;    return 0;}void hvm_init_cacheattr_region_list(    struct domain *d){    INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);}void hvm_destroy_cacheattr_region_list(    struct domain *d){    struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;    struct hvm_mem_pinned_cacheattr_range *range;    while ( !list_empty(head) )    {        range = list_entry(head->next,                           struct hvm_mem_pinned_cacheattr_range,                           list);        list_del(&range->list);        xfree(range);    }}int32_t hvm_get_mem_pinned_cacheattr(    struct domain *d,    uint64_t guest_fn,    uint32_t *type){    struct hvm_mem_pinned_cacheattr_range *range;    *type = 0;    if ( !is_hvm_domain(d) )        return 0;    list_for_each_entry_rcu ( range,                              &d->arch.hvm_domain.pinned_cacheattr_ranges,                              list )    {        if ( (guest_fn >= range->start) && (guest_fn <= range->end) )        {            *type = range->type;            return 1;        }    }    return 0;}int32_t hvm_set_mem_pinned_cacheattr(    struct domain *d,    uint64_t gfn_start,    uint64_t gfn_end,    uint32_t  type){    struct hvm_mem_pinned_cacheattr_range *range;    if ( !((type == PAT_TYPE_UNCACHABLE) ||           (type == PAT_TYPE_WRCOMB) ||           (type == PAT_TYPE_WRTHROUGH) ||           (type == PAT_TYPE_WRPROT) ||           (type == PAT_TYPE_WRBACK) ||           (type == PAT_TYPE_UC_MINUS)) ||         !is_hvm_domain(d) )        return -EINVAL;    range = xmalloc(struct hvm_mem_pinned_cacheattr_range);    if ( range == NULL )        return -ENOMEM;    memset(range, 0, sizeof(*range));    range->start = gfn_start;    range->end = gfn_end;    range->type = type;    list_add_rcu(&range->list, &d->arch.hvm_domain.pinned_cacheattr_ranges);    return 0;}static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h){    int i;    struct vcpu *v;    struct hvm_hw_mtrr hw_mtrr;    struct mtrr_state *mtrr_state;    /* save mtrr&pat */    for_each_vcpu(d, v)    {        mtrr_state = &v->arch.hvm_vcpu.mtrr;        hw_mtrr.msr_pat_cr = v->arch.hvm_vcpu.pat_cr;        hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type                                | (mtrr_state->enabled << 10);        hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;        for ( i = 0; i < MTRR_VCNT; i++ )        {            /* save physbase */            hw_mtrr.msr_mtrr_var[i*2] =                ((uint64_t*)mtrr_state->var_ranges)[i*2];            /* save physmask */            hw_mtrr.msr_mtrr_var[i*2+1] =                ((uint64_t*)mtrr_state->var_ranges)[i*2+1];        }        for ( i = 0; i < NUM_FIXED_MSR; i++ )            hw_mtrr.msr_mtrr_fixed[i] =                ((uint64_t*)mtrr_state->fixed_ranges)[i];        if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )            return 1;    }    return 0;}static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h){    int vcpuid, i;    struct vcpu *v;    struct mtrr_state *mtrr_state;    struct hvm_hw_mtrr hw_mtrr;    vcpuid = hvm_load_instance(h);    if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )    {        gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);        return -EINVAL;    }    if ( hvm_load_entry(MTRR, h, &hw_mtrr) != 0 )        return -EINVAL;    mtrr_state = &v->arch.hvm_vcpu.mtrr;    pat_msr_set(&v->arch.hvm_vcpu.pat_cr, hw_mtrr.msr_pat_cr);    mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap;    for ( i = 0; i < NUM_FIXED_MSR; i++ )        mtrr_fix_range_msr_set(mtrr_state, i, hw_mtrr.msr_mtrr_fixed[i]);    for ( i = 0; i < MTRR_VCNT; i++ )    {        mtrr_var_range_msr_set(mtrr_state,                MTRRphysBase_MSR(i), hw_mtrr.msr_mtrr_var[i*2]);        mtrr_var_range_msr_set(mtrr_state,                MTRRphysMask_MSR(i), hw_mtrr.msr_mtrr_var[i*2+1]);    }    mtrr_def_type_msr_set(mtrr_state, hw_mtrr.msr_mtrr_def_type);    return 0;}HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr,                          1, HVMSR_PER_VCPU);uint8_t epte_get_entry_emt(    struct domain *d, unsigned long gfn, unsigned long mfn){    uint8_t gmtrr_mtype, hmtrr_mtype;    uint32_t type;    struct vcpu *v = current;    if ( (current->domain != d) && ((v = d->vcpu[0]) == NULL) )        return MTRR_TYPE_WRBACK;    if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )        return MTRR_TYPE_WRBACK;    if ( (v == current) && v->domain->arch.hvm_domain.is_in_uc_mode )        return MTRR_TYPE_UNCACHABLE;    if ( !mfn_valid(mfn) )        return MTRR_TYPE_UNCACHABLE;    if ( hvm_get_mem_pinned_cacheattr(d, gfn, &type) )        return type;    gmtrr_mtype = get_mtrr_type(&v->arch.hvm_vcpu.mtrr, (gfn << PAGE_SHIFT));    hmtrr_mtype = get_mtrr_type(&mtrr_state, (mfn << PAGE_SHIFT));    return ((gmtrr_mtype <= hmtrr_mtype) ? gmtrr_mtype : hmtrr_mtype);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -