📄 vcpu.c
字号:
ipsr->pk = 1; } else vcpu_pkr_use_unset(vcpu); if (enabling_interrupts && vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) PSCB(vcpu, pending_interruption) = 1; return IA64_NO_FAULT;}IA64FAULT vcpu_set_psr(VCPU * vcpu, u64 val){ IA64_PSR newpsr, vpsr; REGS *regs = vcpu_regs(vcpu); u64 enabling_interrupts = 0; /* Copy non-virtualized bits. */ newpsr.val = val & IA64_PSR_NON_VIRT_BITS; /* Bits forced to 1 (psr.si, psr.is and psr.mc are forced to 0) */ newpsr.val |= IA64_PSR_DI; newpsr.val |= IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT | IA64_PSR_BN | IA64_PSR_DI; /* * xenoprof: * keep psr.pp unchanged for xenoprof. */ if (regs->cr_ipsr & IA64_PSR_PP) newpsr.val |= IA64_PSR_PP; else newpsr.val &= ~IA64_PSR_PP; vpsr.val = val; if (val & IA64_PSR_DFH) { newpsr.dfh = 1; PSCB(vcpu, vpsr_dfh) = 1; } else { newpsr.dfh = PSCB(vcpu, hpsr_dfh); PSCB(vcpu, vpsr_dfh) = 0; } PSCB(vcpu, vpsr_pp) = vpsr.pp; if (vpsr.i) { if (vcpu->vcpu_info->evtchn_upcall_mask) enabling_interrupts = 1; vcpu->vcpu_info->evtchn_upcall_mask = 0; if (enabling_interrupts && vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) PSCB(vcpu, pending_interruption) = 1; } else vcpu->vcpu_info->evtchn_upcall_mask = 1; PSCB(vcpu, interrupt_collection_enabled) = vpsr.ic; vcpu_set_metaphysical_mode(vcpu, !(vpsr.dt && vpsr.rt && vpsr.it)); newpsr.cpl |= max_t(u64, vpsr.cpl, CONFIG_CPL0_EMUL); if (PSCB(vcpu, banknum) != vpsr.bn) { if (vpsr.bn) vcpu_bsw1(vcpu); else vcpu_bsw0(vcpu); } if (vpsr.pk) { vcpu_pkr_set_psr_handling(vcpu); newpsr.pk = 1; } else vcpu_pkr_use_unset(vcpu); regs->cr_ipsr = newpsr.val; return IA64_NO_FAULT;}u64 vcpu_get_psr(VCPU * vcpu){ REGS *regs = vcpu_regs(vcpu); PSR newpsr; PSR ipsr; ipsr.i64 = regs->cr_ipsr; /* Copy non-virtualized bits. */ newpsr.i64 = ipsr.i64 & IA64_PSR_NON_VIRT_BITS; /* Bits forced to 1 (psr.si and psr.is are forced to 0) */ newpsr.i64 |= IA64_PSR_DI; /* System mask. */ newpsr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled); newpsr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask; if (!PSCB(vcpu, metaphysical_mode)) newpsr.i64 |= IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT; newpsr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh); newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp); /* Fool cpl. */ if (ipsr.ia64_psr.cpl <= CONFIG_CPL0_EMUL) newpsr.ia64_psr.cpl = 0; else newpsr.ia64_psr.cpl = ipsr.ia64_psr.cpl; newpsr.ia64_psr.bn = PSCB(vcpu, banknum); return newpsr.i64;}IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval){ u64 psr = vcpu_get_psr(vcpu); *pval = psr & (MASK(0, 32) | MASK(35, 2)); return IA64_NO_FAULT;}BOOLEAN vcpu_get_psr_ic(VCPU * vcpu){ return !!PSCB(vcpu, interrupt_collection_enabled);}BOOLEAN vcpu_get_psr_i(VCPU * vcpu){ return !vcpu->vcpu_info->evtchn_upcall_mask;}/************************************************************************** VCPU control register access routines**************************************************************************/IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval){ *pval = PSCB(vcpu, dcr); return IA64_NO_FAULT;}IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval){ if (VMX_DOMAIN(vcpu)) *pval = PSCB(vcpu, iva) & ~0x7fffL; else *pval = PSCBX(vcpu, iva) & ~0x7fffL; return IA64_NO_FAULT;}IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval){ *pval = PSCB(vcpu, pta); return IA64_NO_FAULT;}IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval){ //REGS *regs = vcpu_regs(vcpu); //*pval = regs->cr_ipsr; *pval = PSCB(vcpu, ipsr); return IA64_NO_FAULT;}IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval){ *pval = PSCB(vcpu, isr); return IA64_NO_FAULT;}IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval){ //REGS *regs = vcpu_regs(vcpu); //*pval = regs->cr_iip; *pval = PSCB(vcpu, iip); return IA64_NO_FAULT;}IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval){ PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_get_ifa); *pval = PSCB(vcpu, ifa); return IA64_NO_FAULT;}unsigned long vcpu_get_rr_ps(VCPU * vcpu, u64 vadr){ ia64_rr rr; rr.rrval = PSCB(vcpu, rrs)[vadr >> 61]; return rr.ps;}unsigned long vcpu_get_rr_rid(VCPU * vcpu, u64 vadr){ ia64_rr rr; rr.rrval = PSCB(vcpu, rrs)[vadr >> 61]; return rr.rid;}unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa){ ia64_rr rr; rr.rrval = 0; rr.ps = vcpu_get_rr_ps(vcpu, ifa); rr.rid = vcpu_get_rr_rid(vcpu, ifa); return rr.rrval;}IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval){ u64 val = PSCB(vcpu, itir); *pval = val; return IA64_NO_FAULT;}IA64FAULT vcpu_get_iipa(VCPU * vcpu, u64 * pval){ u64 val = PSCB(vcpu, iipa); // SP entry code does not save iipa yet nor does it get // properly delivered in the pscb// printk("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n"); *pval = val; return IA64_NO_FAULT;}IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval){ //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs; //*pval = PSCB(vcpu,regs).cr_ifs; *pval = PSCB(vcpu, ifs); return IA64_NO_FAULT;}IA64FAULT vcpu_get_iim(VCPU * vcpu, u64 * pval){ u64 val = PSCB(vcpu, iim); *pval = val; return IA64_NO_FAULT;}IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 * pval){ PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_thash); *pval = PSCB(vcpu, iha); return IA64_NO_FAULT;}IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val){ PSCB(vcpu, dcr) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_set_iva(VCPU * vcpu, u64 val){ if (VMX_DOMAIN(vcpu)) PSCB(vcpu, iva) = val & ~0x7fffL; else PSCBX(vcpu, iva) = val & ~0x7fffL; return IA64_NO_FAULT;}IA64FAULT vcpu_set_pta(VCPU * vcpu, u64 val){ if (val & (0x3f << 9)) /* reserved fields */ return IA64_RSVDREG_FAULT; if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT; PSCB(vcpu, pta) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_set_ipsr(VCPU * vcpu, u64 val){ PSCB(vcpu, ipsr) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_set_isr(VCPU * vcpu, u64 val){ PSCB(vcpu, isr) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_set_iip(VCPU * vcpu, u64 val){ PSCB(vcpu, iip) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_increment_iip(VCPU * vcpu){ REGS *regs = vcpu_regs(vcpu); struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; if (ipsr->ri == 2) { ipsr->ri = 0; regs->cr_iip += 16; } else ipsr->ri++; return IA64_NO_FAULT;}IA64FAULT vcpu_decrement_iip(VCPU * vcpu){ REGS *regs = vcpu_regs(vcpu); struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; if (ipsr->ri == 0) { ipsr->ri = 2; regs->cr_iip -= 16; } else ipsr->ri--; return IA64_NO_FAULT;}IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val){ PSCB(vcpu, ifa) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_set_itir(VCPU * vcpu, u64 val){ PSCB(vcpu, itir) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_set_iipa(VCPU * vcpu, u64 val){ // SP entry code does not save iipa yet nor does it get // properly delivered in the pscb// printk("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n"); PSCB(vcpu, iipa) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_set_ifs(VCPU * vcpu, u64 val){ //REGS *regs = vcpu_regs(vcpu); PSCB(vcpu, ifs) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_set_iim(VCPU * vcpu, u64 val){ PSCB(vcpu, iim) = val; return IA64_NO_FAULT;}IA64FAULT vcpu_set_iha(VCPU * vcpu, u64 val){ PSCB(vcpu, iha) = val; return IA64_NO_FAULT;}/************************************************************************** VCPU interrupt control register access routines**************************************************************************/void vcpu_pend_unspecified_interrupt(VCPU * vcpu){ PSCB(vcpu, pending_interruption) = 1;}void vcpu_pend_interrupt(VCPU * vcpu, u64 vector){ if (vector & ~0xff) { printk("vcpu_pend_interrupt: bad vector\n"); return; } if (vcpu->arch.event_callback_ip) { printk("Deprecated interface. Move to new event based " "solution\n"); return; } if (VMX_DOMAIN(vcpu)) { set_bit(vector, VCPU(vcpu, irr)); } else { set_bit(vector, PSCBX(vcpu, irr)); PSCB(vcpu, pending_interruption) = 1; }}#define IA64_TPR_MMI 0x10000#define IA64_TPR_MIC 0x000f0/* checks to see if a VCPU has any unmasked pending interrupts * if so, returns the highest, else returns SPURIOUS_VECTOR *//* NOTE: Since this gets called from vcpu_get_ivr() and the * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit, * this routine also ignores pscb.interrupt_delivery_enabled * and this must be checked independently; see vcpu_deliverable interrupts() */u64 vcpu_check_pending_interrupts(VCPU * vcpu){ u64 *p, *r, bits, bitnum, mask, i, vector; if (vcpu->arch.event_callback_ip) return SPURIOUS_VECTOR; /* Always check pending event, since guest may just ack the * event injection without handle. Later guest may throw out * the event itself. */ check_start: if (event_pending(vcpu) && !test_bit(vcpu->domain->shared_info->arch.evtchn_vector, &PSCBX(vcpu, insvc[0]))) vcpu_pend_interrupt(vcpu, vcpu->domain->shared_info->arch. evtchn_vector); p = &PSCBX(vcpu, irr[3]); r = &PSCBX(vcpu, insvc[3]); for (i = 3 ;; p--, r--, i--) { bits = *p; if (bits) break; // got a potential interrupt if (*r) { // nothing in this word which is pending+inservice // but there is one inservice which masks lower return SPURIOUS_VECTOR; } if (i == 0) { // checked all bits... nothing pending+inservice return SPURIOUS_VECTOR; } } // have a pending,deliverable interrupt... see if it is masked bitnum = ia64_fls(bits);//printk("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...\n",bitnum); vector = bitnum + (i * 64); mask = 1L << bitnum; /* sanity check for guest timer interrupt */ if (vector == (PSCB(vcpu, itv) & 0xff)) { uint64_t now = ia64_get_itc(); if (now < PSCBX(vcpu, domain_itm)) {// printk("Ooops, pending guest timer before its due\n"); PSCBX(vcpu, irr[i]) &= ~mask; goto check_start; } }//printk("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...\n",vector); if (*r >= mask) { // masked by equal inservice//printk("but masked by equal inservice\n"); return SPURIOUS_VECTOR; } if (PSCB(vcpu, tpr) & IA64_TPR_MMI) { // tpr.mmi is set//printk("but masked by tpr.mmi\n"); return SPURIOUS_VECTOR; } if (((PSCB(vcpu, tpr) & IA64_TPR_MIC) + 15) >= vector) { //tpr.mic masks class//printk("but masked by tpr.mic\n"); return SPURIOUS_VECTOR; }//printk("returned to caller\n"); return vector;}u64 vcpu_deliverable_interrupts(VCPU * vcpu){ return (vcpu_get_psr_i(vcpu) && vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);}u64 vcpu_deliverable_timer(VCPU * vcpu){ return (vcpu_get_psr_i(vcpu) && vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu, itv));}IA64FAULT vcpu_get_lid(VCPU * vcpu, u64 * pval){ /* Use EID=0, ID=vcpu_id. */ *pval = vcpu->vcpu_id << 24; return IA64_NO_FAULT;}IA64FAULT vcpu_get_ivr(VCPU * vcpu, u64 * pval){ int i; u64 vector, mask;#define HEARTBEAT_FREQ 16 // period in seconds#ifdef HEARTBEAT_FREQ#define N_DOMS 16 // period in seconds#if 0 static long count[N_DOMS] = { 0 };#endif static long nonclockcount[N_DOMS] = { 0 }; unsigned domid = vcpu->domain->domain_id;#endif#ifdef IRQ_DEBUG static char firstivr = 1; static char firsttime[256]; if (firstivr) { int i; for (i = 0; i < 256; i++) firsttime[i] = 1; firstivr = 0;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -