⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mmio.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 2 页
字号:
/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- *//* * mmio.c: MMIO emulation components. * Copyright (c) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) *  Kun Tian (Kevin Tian) (Kevin.tian@intel.com) */#include <linux/sched.h>#include <xen/mm.h>#include <asm/vmx_mm_def.h>#include <asm/gcc_intrin.h>#include <linux/interrupt.h>#include <asm/vmx_vcpu.h>#include <asm/bundle.h>#include <asm/types.h>#include <public/hvm/ioreq.h>#include <asm/vmx.h>#include <public/event_channel.h>#include <public/xen.h>#include <linux/event.h>#include <xen/domain.h>#include <asm/viosapic.h>#include <asm/vlsapic.h>#include <asm/hvm/vacpi.h>#include <asm/hvm/support.h>#include <public/hvm/save.h>#include <public/arch-ia64/hvm/memmap.h>#include <public/arch-ia64/sioemu.h>#include <asm/sioemu.h>#define HVM_BUFFERED_IO_RANGE_NR 1struct hvm_buffered_io_range {    unsigned long start_addr;    unsigned long length;};static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};static struct hvm_buffered_io_range*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] ={    &buffered_stdvga_range};static int hvm_buffered_io_intercept(ioreq_t *p){    struct vcpu *v = current;    buffered_iopage_t *pg =        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buf_ioreq.va);    buf_ioreq_t bp;    int i, qw = 0;    /* Ensure buffered_iopage fits in a page */    BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);    /* ignore READ ioreq_t and anything buffered io can't deal with */    if (p->dir == IOREQ_READ || p->addr > 0xFFFFFUL ||        p->data_is_ptr || p->count != 1)        return 0;    for (i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++) {        if (p->addr >= hvm_buffered_io_ranges[i]->start_addr &&            p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +                                    hvm_buffered_io_ranges[i]->length)            break;    }    if (i == HVM_BUFFERED_IO_RANGE_NR)        return 0;    bp.type = p->type;    bp.dir = p->dir;    switch (p->size) {    case 1:        bp.size = 0;        break;    case 2:        bp.size = 1;        break;    case 4:        bp.size = 2;        break;    case 8:        bp.size = 3;        qw = 1;        break;    default:        gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size);        return 0;    }    bp.data = p->data;    bp.addr = p->addr;    spin_lock(&v->domain->arch.hvm_domain.buf_ioreq.lock);    if (pg->write_pointer - pg->read_pointer >= IOREQ_BUFFER_SLOT_NUM - qw) {        /* the queue is full.         * send the iopacket through the normal path.         * NOTE: The arithimetic operation could handle the situation for         * write_pointer overflow.         */        spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);        return 0;    }    memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],           &bp, sizeof(bp));    if (qw) {        bp.data = p->data >> 32;        memcpy(&pg->buf_ioreq[(pg->write_pointer + 1) % IOREQ_BUFFER_SLOT_NUM],               &bp, sizeof(bp));    }    /* Make the ioreq_t visible before write_pointer */    wmb();    pg->write_pointer += qw ? 2 : 1;    spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);    return 1;}static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir){    struct vcpu *v = current;    vcpu_iodata_t *vio;    ioreq_t *p;    vio = get_vio(v);    if (!vio)        panic_domain(NULL, "bad shared page");    p = &vio->vp_ioreq;    p->addr = pa;    p->size = s;    p->count = 1;    if (dir == IOREQ_WRITE)        p->data = *val;    else        p->data = 0;    p->data_is_ptr = 0;    p->dir = dir;    p->df = 0;    p->type = 1;    p->io_count++;    if (hvm_buffered_io_intercept(p)) {        p->state = STATE_IORESP_READY;        vmx_io_assist(v);        if (dir != IOREQ_READ)            return;    }    vmx_send_assist_req(v);    if (dir == IOREQ_READ)        *val = p->data;    return;}static int vmx_ide_pio_intercept(ioreq_t *p, u64 *val){    struct buffered_piopage *pio_page =        (void *)(current->domain->arch.hvm_domain.buf_pioreq.va);    spinlock_t *pio_lock;    struct pio_buffer *piobuf;    uint32_t pointer, page_offset;    if (p->addr == 0x1F0)        piobuf = &pio_page->pio[PIO_BUFFER_IDE_PRIMARY];    else if (p->addr == 0x170)        piobuf = &pio_page->pio[PIO_BUFFER_IDE_SECONDARY];    else        return 0;    if (p->size != 2 && p->size != 4)        return 0;    pio_lock = &current->domain->arch.hvm_domain.buf_pioreq.lock;    spin_lock(pio_lock);    pointer = piobuf->pointer;    page_offset = piobuf->page_offset;    /* sanity check */    if (page_offset + pointer < offsetof(struct buffered_piopage, buffer))        goto unlock_out;    if (page_offset + piobuf->data_end > PAGE_SIZE)        goto unlock_out;    if (pointer + p->size < piobuf->data_end) {        uint8_t *bufp = (uint8_t *)pio_page + page_offset + pointer;        if (p->dir == IOREQ_WRITE) {            if (likely(p->size == 4 && (((long)bufp & 3) == 0)))                *(uint32_t *)bufp = *val;            else                memcpy(bufp, val, p->size);        } else {            if (likely(p->size == 4 && (((long)bufp & 3) == 0))) {                *val = *(uint32_t *)bufp;            } else {                *val = 0;                memcpy(val, bufp, p->size);            }        }        piobuf->pointer += p->size;        spin_unlock(pio_lock);        p->state = STATE_IORESP_READY;        vmx_io_assist(current);        return 1;    } unlock_out:    spin_unlock(pio_lock);    return 0;}#define TO_LEGACY_IO(pa)  (((pa)>>12<<2)|((pa)&0x3))static void __vmx_identity_mapping_save(int on,        const struct identity_mapping* im,        struct hvm_hw_ia64_identity_mapping *im_save){    im_save->on = !!on;    if (!on) {        im_save->pgprot = 0;        im_save->key    = 0;    } else {        im_save->pgprot = im->pgprot;        im_save->key    = im->key;    }}static int vmx_identity_mappings_save(struct domain *d,                                      hvm_domain_context_t *h){    const struct opt_feature *optf = &d->arch.opt_feature;    struct hvm_hw_ia64_identity_mappings im_save;    __vmx_identity_mapping_save(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4_FLG,                                &optf->im_reg4, &im_save.im_reg4);    __vmx_identity_mapping_save(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5_FLG,                                &optf->im_reg5, &im_save.im_reg5);    __vmx_identity_mapping_save(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG7_FLG,                                &optf->im_reg7, &im_save.im_reg7);    return hvm_save_entry(OPT_FEATURE_IDENTITY_MAPPINGS, 0, h, &im_save);}static int __vmx_identity_mapping_load(struct domain *d, unsigned long cmd,        const struct hvm_hw_ia64_identity_mapping *im_load){    struct xen_ia64_opt_feature optf;    optf.cmd    = cmd;    optf.on     = im_load->on;    optf.pgprot = im_load->pgprot;    optf.key    = im_load->key;    return domain_opt_feature(d, &optf);}static int vmx_identity_mappings_load(struct domain *d,                                      hvm_domain_context_t *h){    struct hvm_hw_ia64_identity_mappings im_load;    int rc;    if (hvm_load_entry(OPT_FEATURE_IDENTITY_MAPPINGS, h, &im_load))        return -EINVAL;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -