fw_emul.c
来自「xen虚拟机源代码安装包」· C语言 代码 · 共 1,592 行 · 第 1/3 页
C
1,592 行
/* * Somehow ACPI breaks if allowing this one */ case SN_SAL_SET_CPU_NUMBER: status = -1; if (current->domain == dom0) { printk("*** Emulating SN_SAL_SET_CPU_NUMBER ***\n"); SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SET_CPU_NUMBER, in1, 0, 0, 0, 0, 0, 0); status = ret_stuff.status; r9 = ret_stuff.v0; r10 = ret_stuff.v1; r11 = ret_stuff.v2; } break;#endif case SN_SAL_LOG_CE: status = -1; if (current->domain == dom0) { static int log_ce = 0; if (!log_ce) { printk("*** Emulating SN_SAL_LOG_CE *** " " this will only be printed once\n"); log_ce = 1; } SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0); status = ret_stuff.status; r9 = ret_stuff.v0; r10 = ret_stuff.v1; r11 = ret_stuff.v2; } break; case SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST: status = -1; if (current->domain == dom0) { struct sn_flush_device_common flush; int flush_size; flush_size = sizeof(struct sn_flush_device_common); memset(&flush, 0, flush_size); SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST, in1, in2, in3, &flush, 0, 0, 0);#if 0 printk("*** Emulating " "SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST ***\n");#endif if (ret_stuff.status == SALRET_OK) { XEN_GUEST_HANDLE(void) handle = *(XEN_GUEST_HANDLE(void)*)&in4; if (copy_to_guest(handle, &flush, 1)) { printk("SN_SAL_IOIF_GET_DEVICE_" "DMAFLUSH_LIST can't copy " "to user!\n"); ret_stuff.status = SALRET_ERROR; } } status = ret_stuff.status; r9 = ret_stuff.v0; r10 = ret_stuff.v1; r11 = ret_stuff.v2; } break; default: printk("*** CALLED SAL_ WITH UNKNOWN INDEX (%lx). " "IGNORED...\n", index); status = -1; break; } return ((struct sal_ret_values) {status, r9, r10, r11});}static intsafe_copy_to_guest(unsigned long to, void *from, long size){ BUG_ON((unsigned)size > PAGE_SIZE); if (VMX_DOMAIN(current)) { if (is_virtual_mode(current)) { thash_data_t *data; unsigned long gpa, poff; /* The caller must provide a DTR or DTC mapping */ data = vtlb_lookup(current, to, DSIDE_TLB); if (data) { gpa = data->page_flags & _PAGE_PPN_MASK; } else { data = vhpt_lookup(to); if (!data) return -1; gpa = __mpa_to_gpa( data->page_flags & _PAGE_PPN_MASK); gpa &= _PAGE_PPN_MASK; } poff = POFFSET(to, data->ps); if (poff + size > PSIZE(data->ps)) return -1; to = PAGEALIGN(gpa, data->ps) | poff; } to |= XENCOMM_INLINE_FLAG; if (xencomm_copy_to_guest((void *)to, from, size, 0) != 0) return -1; return 0; } else { /* check for vulnerability */ if (IS_VMM_ADDRESS(to) || IS_VMM_ADDRESS(to + size - 1)) panic_domain(NULL, "copy to bad address:0x%lx\n", to); return copy_to_user((void __user *)to, from, size); }}cpumask_t cpu_cache_coherent_map;struct cache_flush_args { u64 cache_type; u64 operation; u64 progress; long status;};static voidremote_pal_cache_flush(void *v){ struct cache_flush_args *args = v; long status; u64 progress = args->progress; status = ia64_pal_cache_flush(args->cache_type, args->operation, &progress, NULL); if (status != 0) args->status = status;}static voidremote_pal_prefetch_visibility(void *v){ s64 trans_type = (s64)v; ia64_pal_prefetch_visibility(trans_type);}static voidremote_pal_mc_drain(void *v){ ia64_pal_mc_drain();}struct ia64_pal_retvalxen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3){ unsigned long r9 = 0; unsigned long r10 = 0; unsigned long r11 = 0; long status = PAL_STATUS_UNIMPLEMENTED; unsigned long flags; int processor; if (unlikely(running_on_sim)) return pal_emulator_static(index); debugger_event(XEN_IA64_DEBUG_ON_PAL); // pal code must be mapped by a TR when pal is called, however // calls are rare enough that we will map it lazily rather than // at every context switch //efi_map_pal_code(); switch (index) { case PAL_MEM_ATTRIB: status = ia64_pal_mem_attrib(&r9); break; case PAL_FREQ_BASE: status = ia64_pal_freq_base(&r9); if (status == PAL_STATUS_UNIMPLEMENTED) { status = ia64_sal_freq_base(0, &r9, &r10); r10 = 0; } break; case PAL_PROC_GET_FEATURES: status = ia64_pal_proc_get_features(&r9,&r10,&r11); break; case PAL_BUS_GET_FEATURES: status = ia64_pal_bus_get_features( (pal_bus_features_u_t *) &r9, (pal_bus_features_u_t *) &r10, (pal_bus_features_u_t *) &r11); break; case PAL_FREQ_RATIOS: status = ia64_pal_freq_ratios( (struct pal_freq_ratio *) &r9, (struct pal_freq_ratio *) &r10, (struct pal_freq_ratio *) &r11); break; case PAL_PTCE_INFO: /* * return hard-coded xen-specific values because ptc.e * is emulated on xen to always flush everything * these values result in only one ptc.e instruction */ status = PAL_STATUS_SUCCESS; r10 = (1L << 32) | 1L; break; case PAL_VERSION: status = ia64_pal_version( (pal_version_u_t *) &r9, (pal_version_u_t *) &r10); break; case PAL_VM_PAGE_SIZE: status = ia64_pal_vm_page_size(&r9,&r10); break; case PAL_DEBUG_INFO: status = ia64_pal_debug_info(&r9,&r10); break; case PAL_CACHE_SUMMARY: status = ia64_pal_cache_summary(&r9,&r10); break; case PAL_VM_SUMMARY: if (VMX_DOMAIN(current)) { pal_vm_info_1_u_t v1; pal_vm_info_2_u_t v2; status = ia64_pal_vm_summary((pal_vm_info_1_u_t *)&v1, (pal_vm_info_2_u_t *)&v2); v1.pal_vm_info_1_s.max_itr_entry = NITRS - 1; v1.pal_vm_info_1_s.max_dtr_entry = NDTRS - 1; v2.pal_vm_info_2_s.impl_va_msb -= 1; v2.pal_vm_info_2_s.rid_size = current->domain->arch.rid_bits; r9 = v1.pvi1_val; r10 = v2.pvi2_val; } else { /* Use xen-specific values. hash_tag_id is somewhat random! */ static const pal_vm_info_1_u_t v1 = {.pal_vm_info_1_s = { .vw = 1, .phys_add_size = 44, .key_size = 16, .max_pkr = XEN_IA64_NPKRS, .hash_tag_id = 0x30, .max_dtr_entry = NDTRS - 1, .max_itr_entry = NITRS - 1, .max_unique_tcs = 3, .num_tc_levels = 2 }}; pal_vm_info_2_u_t v2; v2.pvi2_val = 0; v2.pal_vm_info_2_s.rid_size = current->domain->arch.rid_bits; v2.pal_vm_info_2_s.impl_va_msb = 50; r9 = v1.pvi1_val; r10 = v2.pvi2_val; status = PAL_STATUS_SUCCESS; } break; case PAL_VM_INFO: if (VMX_DOMAIN(current)) { status = ia64_pal_vm_info(in1, in2, (pal_tc_info_u_t *)&r9, &r10); break; } if (in1 == 0 && in2 == 2) { /* Level 1: VHPT */ const pal_tc_info_u_t v = { .pal_tc_info_s = {.num_sets = 128, .associativity = 1, .num_entries = 128, .pf = 1, .unified = 1, .reduce_tr = 0, .reserved = 0}}; r9 = v.pti_val; /* Only support PAGE_SIZE tc. */ r10 = PAGE_SIZE; status = PAL_STATUS_SUCCESS; } else if (in1 == 1 && (in2 == 1 || in2 == 2)) { /* Level 2: itlb/dtlb, 1 entry. */ const pal_tc_info_u_t v = { .pal_tc_info_s = {.num_sets = 1, .associativity = 1, .num_entries = 1, .pf = 1, .unified = 0, .reduce_tr = 0, .reserved = 0}}; r9 = v.pti_val; /* Only support PAGE_SIZE tc. */ r10 = PAGE_SIZE; status = PAL_STATUS_SUCCESS; } else status = PAL_STATUS_EINVAL; break; case PAL_RSE_INFO: status = ia64_pal_rse_info(&r9, (pal_hints_u_t *)&r10); break; case PAL_REGISTER_INFO: status = ia64_pal_register_info(in1, &r9, &r10); break; case PAL_CACHE_FLUSH: if (in3 != 0) /* Initially progress_indicator must be 0 */ panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, " "progress_indicator=%lx", in3); /* Always call Host Pal in int=0 */ in2 &= ~PAL_CACHE_FLUSH_CHK_INTRS; if (in1 != PAL_CACHE_TYPE_COHERENT) { struct cache_flush_args args = { .cache_type = in1, .operation = in2, .progress = 0, .status = 0 }; smp_call_function(remote_pal_cache_flush, (void *)&args, 1, 1); if (args.status != 0) panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, " "remote status %lx", args.status); } /* * Call Host PAL cache flush * Clear psr.ic when call PAL_CACHE_FLUSH */ r10 = in3; local_irq_save(flags); processor = current->processor; status = ia64_pal_cache_flush(in1, in2, &r10, &r9); local_irq_restore(flags); if (status != 0) panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, " "status %lx", status); if (in1 == PAL_CACHE_TYPE_COHERENT) { cpus_setall(current->arch.cache_coherent_map); cpu_clear(processor, current->arch.cache_coherent_map); cpus_setall(cpu_cache_coherent_map); cpu_clear(processor, cpu_cache_coherent_map); } break; case PAL_PERF_MON_INFO: { unsigned long pm_buffer[16]; status = ia64_pal_perf_mon_info( pm_buffer, (pal_perf_mon_info_u_t *) &r9); if (status != 0) { printk("PAL_PERF_MON_INFO fails ret=%ld\n", status); break; } if (safe_copy_to_guest( in1, pm_buffer, sizeof(pm_buffer))) { status = PAL_STATUS_EINVAL; goto fail_to_copy; } } break; case PAL_CACHE_INFO: { pal_cache_config_info_t ci; status = ia64_pal_cache_config_info(in1,in2,&ci); if (status != 0) break; r9 = ci.pcci_info_1.pcci1_data; r10 = ci.pcci_info_2.pcci2_data; } break; case PAL_VM_TR_READ: /* FIXME: vcpu_get_tr?? */ printk("%s: PAL_VM_TR_READ unimplmented, ignored\n", __func__); break; case PAL_HALT_INFO: { /* 1000 cycles to enter/leave low power state, consumes 10 mW, implemented and cache/TLB coherent. */ unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32) | (1UL << 61) | (1UL << 60); if (safe_copy_to_guest (in1, &res, sizeof (res))) { status = PAL_STATUS_EINVAL; goto fail_to_copy; } status = PAL_STATUS_SUCCESS; } break; case PAL_HALT: set_bit(_VPF_down, ¤t->pause_flags); vcpu_sleep_nosync(current); status = PAL_STATUS_SUCCESS; break; case PAL_HALT_LIGHT: if (VMX_DOMAIN(current)) { /* Called by VTI. */ if (!is_unmasked_irq(current)) { do_sched_op_compat(SCHEDOP_block, 0); do_softirq(); } status = PAL_STATUS_SUCCESS; } break; case PAL_PLATFORM_ADDR: if (VMX_DOMAIN(current)) status = PAL_STATUS_SUCCESS; break; case PAL_FIXED_ADDR: status = PAL_STATUS_SUCCESS; r9 = current->vcpu_id; break; case PAL_PREFETCH_VISIBILITY: status = ia64_pal_prefetch_visibility(in1); if (status == 0) { /* must be performed on all remote processors in the coherence domain. */ smp_call_function(remote_pal_prefetch_visibility, (void *)in1, 1, 1); status = 1; /* no more necessary on remote processor */ } break; case PAL_MC_DRAIN: status = ia64_pal_mc_drain(); /* FIXME: All vcpus likely call PAL_MC_DRAIN. That causes the congestion. */ smp_call_function(remote_pal_mc_drain, NULL, 1, 1); break; case PAL_BRAND_INFO: if (in1 == 0) { char brand_info[128]; status = ia64_pal_get_brand_info(brand_info); if (status != PAL_STATUS_SUCCESS) break; if (safe_copy_to_guest(in2, brand_info, sizeof(brand_info))) { status = PAL_STATUS_EINVAL; goto fail_to_copy; } } else { status = PAL_STATUS_EINVAL; } break; case PAL_LOGICAL_TO_PHYSICAL: case PAL_GET_PSTATE: case PAL_CACHE_SHARED_INFO: /* Optional, no need to complain about being unimplemented */ break; default: printk("%s: Unimplemented PAL Call %lu\n", __func__, index); break; } return ((struct ia64_pal_retval) {status, r9, r10, r11});fail_to_copy: gdprintk(XENLOG_WARNING, "PAL(%ld) fail to copy!!! args 0x%lx 0x%lx 0x%lx\n", index, in1, in2, in3); return ((struct ia64_pal_retval) {status, r9, r10, r11});}// given a current domain (virtual or metaphysical) address, return the virtual addressstatic unsigned longefi_translate_domain_addr(unsigned long domain_addr, IA64FAULT *fault, struct page_info** page){ struct vcpu *v = current; unsigned long mpaddr = domain_addr; unsigned long virt; *fault = IA64_NO_FAULT;again: if (v->domain->arch.sal_data->efi_virt_mode) { *fault = vcpu_tpa(v, domain_addr, &mpaddr); if (*fault != IA64_NO_FAULT) return 0; } virt = (unsigned long)domain_mpa_to_imva(v->domain, mpaddr); *page = virt_to_page(virt); if (get_page(*page, current->domain) == 0) { if (page_get_owner(*page) != current->domain) { // which code is appropriate? *fault = IA64_FAULT; return 0; } goto again; } return virt;}static efi_status_tefi_emulate_get_time( unsigned long tv_addr, unsigned long tc_addr, IA64FAULT *fault){ unsigned long tv, tc = 0; struct page_info *tv_page = NULL; struct page_info *tc_page = NULL; efi_status_t status = 0; efi_time_t *tvp; struct tm timeptr; unsigned long xtimesec; tv = efi_translate_domain_addr(tv_addr, fault, &tv_page); if (*fault != IA64_NO_FAULT) goto errout; if (tc_addr) { tc = efi_translate_domain_addr(tc_addr, fault, &tc_page); if (*fault != IA64_NO_FAULT) goto errout; } spin_lock(&efi_time_services_lock); status = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc); tvp = (efi_time_t *)tv; xtimesec = mktime(tvp->year, tvp->month, tvp->day, tvp->hour, tvp->minute, tvp->second); xtimesec += current->domain->time_offset_seconds; timeptr = gmtime(xtimesec); tvp->second = timeptr.tm_sec; tvp->minute = timeptr.tm_min; tvp->hour = timeptr.tm_hour; tvp->day = timeptr.tm_mday; tvp->month = timeptr.tm_mon + 1; tvp->year = timeptr.tm_year + 1900; spin_unlock(&efi_time_services_lock);errout: if (tc_page != NULL) put_page(tc_page); if (tv_page != NULL) put_page(tv_page); return status;}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?