⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 platform.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 3 页
字号:
               type, gpa, count, size, value, dir, value_is_ptr);    }    vio = get_ioreq(v);    if (vio == NULL) {        printk("bad shared page\n");        domain_crash_synchronous();    }    p = &vio->vp_ioreq;    if ( p->state != STATE_IOREQ_NONE )        printk("WARNING: send mmio with something already pending (%d)?\n",               p->state);    p->dir = dir;    p->data_is_ptr = value_is_ptr;    p->type = type;    p->size = size;    p->addr = gpa;    p->count = count;    p->df = df;    p->io_count++;    p->data = value;    if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )    {        p->state = STATE_IORESP_READY;        hvm_io_assist();        return;    }    hvm_send_assist_req(v);}void send_timeoffset_req(unsigned long timeoff){    ioreq_t p[1];    if ( timeoff == 0 )        return;    memset(p, 0, sizeof(*p));    p->type = IOREQ_TYPE_TIMEOFFSET;    p->size = 8;    p->count = 1;    p->dir = IOREQ_WRITE;    p->data = timeoff;    p->state = STATE_IOREQ_READY;    if ( !hvm_buffered_io_send(p) )        printk("Unsuccessful timeoffset update\n");}/* Ask ioemu mapcache to invalidate mappings. */void send_invalidate_req(void){    struct vcpu *v = current;    vcpu_iodata_t *vio;    ioreq_t *p;    vio = get_ioreq(v);    if ( vio == NULL )    {        printk("bad shared page: %lx\n", (unsigned long) vio);        domain_crash_synchronous();    }    p = &vio->vp_ioreq;    if ( p->state != STATE_IOREQ_NONE )        printk("WARNING: send invalidate req with something "               "already pending (%d)?\n", p->state);    p->type = IOREQ_TYPE_INVALIDATE;    p->size = 4;    p->dir = IOREQ_WRITE;    p->data = ~0UL; /* flush all */    p->io_count++;    hvm_send_assist_req(v);}static void mmio_operands(int type, unsigned long gpa,                          struct hvm_io_op *mmio_op,                          unsigned char op_size){    unsigned long value = 0;    int df, index, size_reg;    struct cpu_user_regs *regs = &mmio_op->io_context;    df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;    size_reg = operand_size(mmio_op->operand[0]);    if ( mmio_op->operand[0] & REGISTER ) {            /* dest is memory */        index = operand_index(mmio_op->operand[0]);        value = get_reg_value(size_reg, index, 0, regs);        send_mmio_req(type, gpa, 1, op_size, value, IOREQ_WRITE, df, 0);    } else if ( mmio_op->operand[0] & IMMEDIATE ) {    /* dest is memory */        value = mmio_op->immediate;        send_mmio_req(type, gpa, 1, op_size, value, IOREQ_WRITE, df, 0);    } else if ( mmio_op->operand[0] & MEMORY ) {       /* dest is register */        /* send the request and wait for the value */        if ( (mmio_op->instr == INSTR_MOVZX) ||             (mmio_op->instr == INSTR_MOVSX) )            send_mmio_req(type, gpa, 1, size_reg, 0, IOREQ_READ, df, 0);        else            send_mmio_req(type, gpa, 1, op_size, 0, IOREQ_READ, df, 0);    } else {        printk("%s: invalid dest mode.\n", __func__);        domain_crash_synchronous();    }}#define GET_REPEAT_COUNT() \     (mmio_op->flags & REPZ ? (ad_size == WORD ? regs->ecx & 0xFFFF : regs->ecx) : 1)void handle_mmio(paddr_t gpa){    unsigned long inst_addr;    struct hvm_io_op *mmio_op;    struct cpu_user_regs *regs;    unsigned char inst[MAX_INST_LEN], ad_size, op_size, seg_sel;    int i, address_bytes, df, inst_len;    struct vcpu *v = current;    mmio_op = &v->arch.hvm_vcpu.io_op;    regs = &mmio_op->io_context;    /* Copy current guest state into io instruction state structure. */    memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);    df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;    address_bytes = hvm_guest_x86_mode(v);    if (address_bytes < 2)        /* real or vm86 modes */        address_bytes = 2;    inst_addr = hvm_get_segment_base(v, x86_seg_cs) + regs->eip;    memset(inst, 0, MAX_INST_LEN);    inst_len = hvm_instruction_fetch(inst_addr, address_bytes, inst);    if ( inst_len <= 0 )    {        gdprintk(XENLOG_DEBUG, "handle_mmio: failed to get instruction\n");        /* hvm_instruction_fetch() will have injected a #PF; get out now */        return;    }    if ( mmio_decode(address_bytes, inst, mmio_op, &ad_size,                     &op_size, &seg_sel) == DECODE_failure )    {        gdprintk(XENLOG_WARNING,                 "handle_mmio: failed to decode instruction\n");        gdprintk(XENLOG_WARNING,                 "mmio opcode: gpa 0x%"PRIpaddr", len %d:", gpa, inst_len);        for ( i = 0; i < inst_len; i++ )            printk(" %02x", inst[i] & 0xFF);        printk("\n");        hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);        return;    }    regs->eip += inst_len; /* advance %eip */    switch ( mmio_op->instr ) {    case INSTR_MOV:        mmio_operands(IOREQ_TYPE_COPY, gpa, mmio_op, op_size);        break;    case INSTR_MOVS:    {        struct segment_register sreg;        unsigned long count = GET_REPEAT_COUNT();        int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;        unsigned long addr, gfn;         paddr_t paddr;        int dir, size = op_size;        uint32_t pfec;        ASSERT(count);        /* determine non-MMIO address */        addr = regs->edi;        if ( ad_size == WORD )            addr &= 0xFFFF;        addr += hvm_get_segment_base(v, x86_seg_es);                pfec = PFEC_page_present | PFEC_write_access;        hvm_get_segment_register(v, x86_seg_ss, &sreg);        if ( sreg.attr.fields.dpl == 3 )            pfec |= PFEC_user_mode;        gfn = paging_gva_to_gfn(v, addr, &pfec);        paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);        if ( paddr == gpa )        {            enum x86_segment seg;            dir = IOREQ_WRITE;            addr = regs->esi;            if ( ad_size == WORD )                addr &= 0xFFFF;            switch ( seg_sel )            {            case 0x26: seg = x86_seg_es; break;            case 0x2e: seg = x86_seg_cs; break;            case 0x36: seg = x86_seg_ss; break;            case 0:            case 0x3e: seg = x86_seg_ds; break;            case 0x64: seg = x86_seg_fs; break;            case 0x65: seg = x86_seg_gs; break;            default: domain_crash_synchronous();            }            addr += hvm_get_segment_base(v, seg);            pfec &= ~PFEC_write_access;            gfn = paging_gva_to_gfn(v, addr, &pfec);            paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);        }        else            dir = IOREQ_READ;        if ( gfn == INVALID_GFN )         {            /* The guest does not have the non-mmio address mapped.              * Need to send in a page fault */            regs->eip -= inst_len; /* do not advance %eip */            hvm_inject_exception(TRAP_page_fault, pfec, addr);            return;        }        /*         * In case of a movs spanning multiple pages, we break the accesses         * up into multiple pages (the device model works with non-continguous         * physical guest pages). To copy just one page, we adjust %ecx and         * do not advance %eip so that the next rep;movs copies the next page.         * Unaligned accesses, for example movsl starting at PGSZ-2, are         * turned into a single copy where we handle the overlapping memory         * copy ourself. After this copy succeeds, "rep movs" is executed         * again.         */        if ( (addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK) ) {            unsigned long value = 0;            mmio_op->flags |= OVERLAP;            if ( dir == IOREQ_WRITE ) {                if ( hvm_paging_enabled(v) )                {                    int rv = hvm_copy_from_guest_virt(&value, addr, size);                    if ( rv == HVMCOPY_bad_gva_to_gfn )                         return; /* exception already injected */                }                else                    (void)hvm_copy_from_guest_phys(&value, addr, size);            } else /* dir != IOREQ_WRITE */                /* Remember where to write the result, as a *VA*.                 * Must be a VA so we can handle the page overlap                  * correctly in hvm_mmio_assist() */                mmio_op->addr = addr;            if ( count != 1 )                regs->eip -= inst_len; /* do not advance %eip */            send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, df, 0);        } else {            unsigned long last_addr = sign > 0 ? addr + count * size - 1                                               : addr - (count - 1) * size;            if ( (addr & PAGE_MASK) != (last_addr & PAGE_MASK) )            {                regs->eip -= inst_len; /* do not advance %eip */                if ( sign > 0 )                    count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;                else                    count = (addr & ~PAGE_MASK) / size + 1;            }            ASSERT(count);            send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size,                           paddr, dir, df, 1);        }        break;    }    case INSTR_MOVZX:    case INSTR_MOVSX:        mmio_operands(IOREQ_TYPE_COPY, gpa, mmio_op, op_size);        break;    case INSTR_STOS:        /*         * Since the destination is always in (contiguous) mmio space we don't         * need to break it up into pages.         */        send_mmio_req(IOREQ_TYPE_COPY, gpa,                      GET_REPEAT_COUNT(), op_size, regs->eax, IOREQ_WRITE, df, 0);        break;    case INSTR_LODS:        /*         * Since the source is always in (contiguous) mmio space we don't         * need to break it up into pages.         */        mmio_op->operand[0] = mk_operand(op_size, 0, 0, REGISTER);        send_mmio_req(IOREQ_TYPE_COPY, gpa,                      GET_REPEAT_COUNT(), op_size, 0, IOREQ_READ, df, 0);        break;    case INSTR_OR:        mmio_operands(IOREQ_TYPE_OR, gpa, mmio_op, op_size);        break;    case INSTR_AND:        mmio_operands(IOREQ_TYPE_AND, gpa, mmio_op, op_size);        break;    case INSTR_ADD:        mmio_operands(IOREQ_TYPE_ADD, gpa, mmio_op, op_size);        break;    case INSTR_SUB:        mmio_operands(IOREQ_TYPE_SUB, gpa, mmio_op, op_size);        break;    case INSTR_XOR:        mmio_operands(IOREQ_TYPE_XOR, gpa, mmio_op, op_size);        break;    case INSTR_PUSH:        if ( ad_size == WORD )        {            mmio_op->addr = (uint16_t)(regs->esp - op_size);            regs->esp = mmio_op->addr | (regs->esp & ~0xffff);        }        else        {            regs->esp -= op_size;            mmio_op->addr = regs->esp;        }        /* send the request and wait for the value */        send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, op_size, 0, IOREQ_READ, df, 0);        break;    case INSTR_CMP:        /* Pass through */    case INSTR_TEST:        /* send the request and wait for the value */        send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, op_size, 0, IOREQ_READ, df, 0);        break;    case INSTR_BT:    {        unsigned long value = 0;        int index, size;        if ( mmio_op->operand[0] & REGISTER )        {            index = operand_index(mmio_op->operand[0]);            size = operand_size(mmio_op->operand[0]);            value = get_reg_value(size, index, 0, regs);        }        else if ( mmio_op->operand[0] & IMMEDIATE )        {            mmio_op->immediate = mmio_op->immediate;            value = mmio_op->immediate;        }        send_mmio_req(IOREQ_TYPE_COPY, gpa + (value >> 5), 1,                      op_size, 0, IOREQ_READ, df, 0);        break;    }    case INSTR_XCHG:        if ( mmio_op->operand[0] & REGISTER ) {            long value;            unsigned long operand = mmio_op->operand[0];            value = get_reg_value(operand_size(operand),                                  operand_index(operand), 0,                                  regs);            /* send the request and wait for the value */            send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,                          op_size, value, IOREQ_WRITE, df, 0);        } else {            /* the destination is a register */            long value;            unsigned long operand = mmio_op->operand[1];            value = get_reg_value(operand_size(operand),                                  operand_index(operand), 0,                                  regs);            /* send the request and wait for the value */            send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,                          op_size, value, IOREQ_WRITE, df, 0);        }        break;    default:        printk("Unhandled MMIO instruction\n");        domain_crash_synchronous();    }}DEFINE_PER_CPU(int, guest_handles_in_xen_space);/* Note that copy_{to,from}_user_hvm require the PTE to be writable even   when they're only trying to read from it.  The guest is expected to   deal with this. */unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len){    int rc;    if ( this_cpu(guest_handles_in_xen_space) )    {        memcpy(to, from, len);        return 0;    }    rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from, len);    return rc ? len : 0; /* fake a copy_to_user() return code */}unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len){    int rc;    if ( this_cpu(guest_handles_in_xen_space) )    {        memcpy(to, from, len);        return 0;    }    rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len);    return rc ? len : 0; /* fake a copy_from_user() return code */}/* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -