📄 vlsapic.c
字号:
vpsr.val = VCPU(vcpu, vpsr); mask = irq_masked(vcpu, h_pending, h_inservice); if ( vpsr.i && IRQ_NO_MASKED == mask ) { isr = vpsr.val & IA64_PSR_RI; if ( !vpsr.ic ) panic_domain(regs,"Interrupt when IC=0\n"); update_vhpi(vcpu, h_pending); vmx_reflect_interruption(0, isr, 0, 12, regs); // EXT IRQ } else if (mask == IRQ_MASKED_BY_INSVC) { if (VCPU(vcpu, vhpi)) update_vhpi(vcpu, NULL_VECTOR); } else { // masked by vpsr.i or vtpr. update_vhpi(vcpu,h_pending); }chk_irq_exit: return h_pending;}/* * Set a INIT interruption request to vcpu[0] of target domain. * The INIT interruption is injected into each vcpu by guest firmware. */void vmx_pend_pal_init(struct domain *d){ VCPU *vcpu; vcpu = d->vcpu[0]; vcpu->arch.arch_vmx.pal_init_pending = 1;}/* * Only coming from virtualization fault. */void guest_write_eoi(VCPU *vcpu){ int vec; vec = highest_inservice_irq(vcpu); if (vec == NULL_VECTOR) { gdprintk(XENLOG_WARNING, "vcpu(%d): Wrong vector to EOI\n", vcpu->vcpu_id); return; } VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63)); VCPU(vcpu, eoi)=0; // overwrite the data vcpu->arch.irq_new_pending=1; wmb();}int is_unmasked_irq(VCPU *vcpu){ int h_pending, h_inservice; h_pending = highest_pending_irq(vcpu); h_inservice = highest_inservice_irq(vcpu); if ( h_pending == NULL_VECTOR || irq_masked(vcpu, h_pending, h_inservice) != IRQ_NO_MASKED ) { return 0; } else return 1;}uint64_t guest_read_vivr(VCPU *vcpu){ int vec, h_inservice, mask; vec = highest_pending_irq(vcpu); h_inservice = highest_inservice_irq(vcpu); mask = irq_masked(vcpu, vec, h_inservice); if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) { if (VCPU(vcpu, vhpi)) update_vhpi(vcpu, NULL_VECTOR); return IA64_SPURIOUS_INT_VECTOR; } if (mask == IRQ_MASKED_BY_VTPR) { update_vhpi(vcpu, vec); return IA64_SPURIOUS_INT_VECTOR; } VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63)); vmx_vcpu_unpend_interrupt(vcpu, vec); return (uint64_t)vec;}static void generate_exirq(VCPU *vcpu){ IA64_PSR vpsr; uint64_t isr; REGS *regs=vcpu_regs(vcpu); vpsr.val = VCPU(vcpu, vpsr); isr = vpsr.val & IA64_PSR_RI; if ( !vpsr.ic ) panic_domain(regs,"Interrupt when IC=0\n"); vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ}void vhpi_detection(VCPU *vcpu){ uint64_t threshold,vhpi; tpr_t vtpr; IA64_PSR vpsr; vpsr.val = VCPU(vcpu, vpsr); vtpr.val = VCPU(vcpu, tpr); threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic; vhpi = VCPU(vcpu,vhpi); if ( vhpi > threshold ) { // interrupt actived generate_exirq (vcpu); }}void vmx_vexirq(VCPU *vcpu){ generate_exirq (vcpu);}struct vcpu *lid_to_vcpu(struct domain *d, uint16_t dest){ int id = dest >> 8; /* Fast look: assume EID=0 ID=vcpu_id. */ if ((dest & 0xff) == 0 && id < MAX_VIRT_CPUS) return d->vcpu[id]; return NULL;}/* * To inject INIT to guest, we must set the PAL_INIT entry * and set psr to switch to physical mode */#define PAL_INIT_ENTRY 0x80000000ffffffa0#define PSR_SET_BITS (IA64_PSR_DT | IA64_PSR_IT | IA64_PSR_RT | \ IA64_PSR_IC | IA64_PSR_RI)static void vmx_inject_guest_pal_init(VCPU *vcpu){ REGS *regs = vcpu_regs(vcpu); uint64_t psr = vmx_vcpu_get_psr(vcpu); regs->cr_iip = PAL_INIT_ENTRY; psr = psr & ~PSR_SET_BITS; vmx_vcpu_set_psr(vcpu, psr);}/* * Deliver IPI message. (Only U-VP is supported now) * offset: address offset to IPI space. * value: deliver value. */static int vcpu_deliver_int(VCPU *vcpu, uint64_t dm, uint64_t vector){ int running = vcpu->is_running; IPI_DPRINTK("deliver_int %lx %lx\n", dm, vector); switch (dm) { case SAPIC_FIXED: // INT vmx_vcpu_pend_interrupt(vcpu, vector); break; case SAPIC_LOWEST_PRIORITY: { struct vcpu *lowest = vcpu_viosapic(vcpu)->lowest_vcpu; if (lowest == NULL) lowest = vcpu; vmx_vcpu_pend_interrupt(lowest, vector); break; } case SAPIC_PMI: // TODO -- inject guest PMI panic_domain(NULL, "Inject guest PMI!\n"); break; case SAPIC_NMI: vmx_vcpu_pend_interrupt(vcpu, 2); break; case SAPIC_INIT: vmx_inject_guest_pal_init(vcpu); break; case SAPIC_EXTINT: // ExtINT vmx_vcpu_pend_interrupt(vcpu, 0); break; default: return -EINVAL; } /* Kick vcpu. */ vcpu_unblock(vcpu); if (running) smp_send_event_check_cpu(vcpu->processor); return 0;}int vlsapic_deliver_int(struct domain *d, uint16_t dest, uint64_t dm, uint64_t vector){ VCPU *vcpu; vcpu = lid_to_vcpu(d, dest); if (vcpu == NULL) return -ESRCH; if (!vcpu->is_initialised || test_bit(_VPF_down, &vcpu->pause_flags)) return -ENOEXEC; return vcpu_deliver_int (vcpu, dm, vector);}/* * Deliver the INIT interruption to guest. */void deliver_pal_init(VCPU *vcpu){ vcpu_deliver_int(vcpu, SAPIC_INIT, 0);}/* * execute write IPI op. */static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value){ VCPU *targ; struct domain *d = vcpu->domain; targ = lid_to_vcpu(vcpu->domain, (((ipi_a_t)addr).id << 8) | ((ipi_a_t)addr).eid); if (targ == NULL) panic_domain(NULL, "Unknown IPI cpu\n"); if (!targ->is_initialised || test_bit(_VPF_down, &targ->pause_flags)) { struct pt_regs *targ_regs = vcpu_regs(targ); if (arch_set_info_guest(targ, NULL) != 0) { printk("arch_boot_vcpu: failure\n"); return; } /* First or next rendez-vous: set registers. */ vcpu_init_regs(targ); targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip; targ_regs->r1 = d->arch.sal_data->boot_rdv_r1; if (test_and_clear_bit(_VPF_down,&targ->pause_flags)) { vcpu_wake(targ); printk(XENLOG_DEBUG "arch_boot_vcpu: vcpu %d awaken %016lx!\n", targ->vcpu_id, targ_regs->cr_iip); } else { printk("arch_boot_vcpu: huh, already awake!"); } } else { if (((ipi_d_t)value).dm == SAPIC_LOWEST_PRIORITY || vcpu_deliver_int(targ, ((ipi_d_t)value).dm, ((ipi_d_t)value).vector) < 0) panic_domain(NULL, "Deliver reserved interrupt!\n"); } return;}unsigned long vlsapic_read(struct vcpu *v, unsigned long addr, unsigned long length){ uint64_t result = 0; addr &= (PIB_SIZE - 1); switch (addr) { case PIB_OFST_INTA: if (length == 1) // 1 byte load ; // There is no i8259, there is no INTA access else panic_domain(NULL,"Undefined read on PIB INTA\n"); break; case PIB_OFST_XTP: if (length == 1) { result = VLSAPIC_XTP(v); // printk("read xtp %lx\n", result); } else { panic_domain(NULL, "Undefined read on PIB XTP\n"); } break; default: if (PIB_LOW_HALF(addr)) { // lower half if (length != 8 ) panic_domain(NULL, "Undefined IPI-LHF read!\n"); else IPI_DPRINTK("IPI-LHF read %lx\n", pib_off); } else { // upper half IPI_DPRINTK("IPI-UHF read %lx\n", addr); } break; } return result;}static void vlsapic_write_xtp(struct vcpu *v, uint8_t val){ struct viosapic * viosapic; struct vcpu *lvcpu, *vcpu; viosapic = vcpu_viosapic(v); spin_lock(&viosapic->lock); lvcpu = viosapic->lowest_vcpu; VLSAPIC_XTP(v) = val; for_each_vcpu(v->domain, vcpu) { if (VLSAPIC_XTP(lvcpu) > VLSAPIC_XTP(vcpu)) lvcpu = vcpu; } if (VLSAPIC_XTP(lvcpu) & 0x80) // Disabled lvcpu = NULL; viosapic->lowest_vcpu = lvcpu; spin_unlock(&viosapic->lock);}void vlsapic_write(struct vcpu *v, unsigned long addr, unsigned long length, unsigned long val){ addr &= (PIB_SIZE - 1); switch (addr) { case PIB_OFST_INTA: panic_domain(NULL, "Undefined write on PIB INTA\n"); break; case PIB_OFST_XTP: if (length == 1) { // printk("write xtp %lx\n", val); vlsapic_write_xtp(v, val); } else { panic_domain(NULL, "Undefined write on PIB XTP\n"); } break; default: if (PIB_LOW_HALF(addr)) { // lower half if (length != 8) panic_domain(NULL, "Undefined IPI-LHF write with size %ld!\n", length); else vlsapic_write_ipi(v, addr, val); } else { // upper half // printk("IPI-UHF write %lx\n",addr); panic_domain(NULL, "No support for SM-VP yet\n"); } break; }}static int vlsapic_save(struct domain *d, hvm_domain_context_t *h){ struct vcpu *v; for_each_vcpu(d, v) { struct hvm_hw_ia64_vlsapic vlsapic; int i; if (test_bit(_VPF_down, &v->pause_flags)) continue; memset(&vlsapic, 0, sizeof(vlsapic)); for (i = 0; i < 4; i++) vlsapic.insvc[i] = VLSAPIC_INSVC(v,i); vlsapic.vhpi = VCPU(v, vhpi); vlsapic.xtp = VLSAPIC_XTP(v); vlsapic.pal_init_pending = v->arch.arch_vmx.pal_init_pending; if (hvm_save_entry(VLSAPIC, v->vcpu_id, h, &vlsapic)) return -EINVAL; } return 0;}static int vlsapic_load(struct domain *d, hvm_domain_context_t *h){ uint16_t vcpuid; struct vcpu *v; struct hvm_hw_ia64_vlsapic vlsapic; int i; vcpuid = hvm_load_instance(h); if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) { gdprintk(XENLOG_ERR, "%s: domain has no vlsapic %u\n", __func__, vcpuid); return -EINVAL; } if (hvm_load_entry(VLSAPIC, h, &vlsapic) != 0) return -EINVAL; for (i = 0; i < 4; i++) VLSAPIC_INSVC(v,i) = vlsapic.insvc[i]; VCPU(v, vhpi) = vlsapic.vhpi; VLSAPIC_XTP(v) = vlsapic.xtp; v->arch.arch_vmx.pal_init_pending = vlsapic.pal_init_pending; v->arch.irq_new_pending = 1; /* to force checking irq */ return 0;}HVM_REGISTER_SAVE_RESTORE(VLSAPIC, vlsapic_save, vlsapic_load, 1, HVMSR_PER_VCPU);static int vtime_save(struct domain *d, hvm_domain_context_t *h){ struct vcpu *v; for_each_vcpu(d, v) { vtime_t *vtm = &VMX(v, vtm); struct hvm_hw_ia64_vtime vtime; if (test_bit(_VPF_down, &v->pause_flags)) continue; stop_timer(&vtm->vtm_timer);//XXX should wait for callback not running. memset(&vtime, 0, sizeof(vtime)); vtime.itc = now_itc(vtm); vtime.itm = VCPU(v, itm); vtime.last_itc = vtm->last_itc; vtime.pending = vtm->pending; vtm_set_itm(v, vtime.itm);// this may start timer. if (hvm_save_entry(VTIME, v->vcpu_id, h, &vtime)) return -EINVAL; } return 0;}static int vtime_load(struct domain *d, hvm_domain_context_t *h){ uint16_t vcpuid; struct vcpu *v; struct hvm_hw_ia64_vtime vtime; vtime_t *vtm; vcpuid = hvm_load_instance(h); if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) { gdprintk(XENLOG_ERR, "%s: domain has no vtime %u\n", __func__, vcpuid); return -EINVAL; } if (hvm_load_entry(VTIME, h, &vtime) != 0) return -EINVAL; vtm = &VMX(v, vtm); stop_timer(&vtm->vtm_timer); //XXX should wait for callback not running. vtm->last_itc = vtime.last_itc; vtm->pending = vtime.pending; migrate_timer(&vtm->vtm_timer, v->processor); vtm_set_itm(v, vtime.itm); vtm_set_itc(v, vtime.itc); // This may start timer. if (test_and_clear_bit(_VPF_down, &v->pause_flags)) vcpu_wake(v); return 0;}HVM_REGISTER_SAVE_RESTORE(VTIME, vtime_save, vtime_load, 1, HVMSR_PER_VCPU);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -