exec-dm.c
来自「xen虚拟机源代码安装包」· C语言 代码 · 共 700 行 · 第 1/2 页
C
700 行
* So to emulate right behavior that guest OS is assumed, we need to flush * I/D cache here. */static void sync_icache(uint8_t *address, int len){ unsigned long addr = (unsigned long)address; unsigned long end = addr + len; for (addr &= ~(32UL-1); addr < end; addr += 32UL) __ia64_fc(addr); ia64_sync_i(); ia64_srlz_i();}#endif /* physical memory access (slow version, mainly for debug) */#if defined(CONFIG_USER_ONLY)void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, int len, int is_write){ int l, flags; target_ulong page; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; flags = page_get_flags(page); if (!(flags & PAGE_VALID)) return; if (is_write) { if (!(flags & PAGE_WRITE)) return; memcpy((uint8_t *)addr, buf, len); } else { if (!(flags & PAGE_READ)) return; memcpy(buf, (uint8_t *)addr, len); } len -= l; buf += l; addr += l; }}#elseint iomem_index(target_phys_addr_t addr){ int i; for (i = 0; i < mmio_cnt; i++) { unsigned long start, end; start = mmio[i].start; end = mmio[i].start + mmio[i].size; if ((addr >= start) && (addr < end)){ return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); } } return 0;}#if defined(__i386__) || defined(__x86_64__)#define phys_ram_addr(x) (qemu_map_cache(x))#elif defined(__ia64__)#define phys_ram_addr(x) (((x) < ram_size) ? (phys_ram_base + (x)) : NULL)#endifunsigned long *logdirty_bitmap;unsigned long logdirty_bitmap_size;/* * Replace the standard byte memcpy with a word memcpy for appropriately sized * memory copy operations. Some users (USB-UHCI) can not tolerate the possible * word tearing that can result from a guest concurrently writing a memory * structure while the qemu device model is modifying the same location. * Forcing a word-sized read/write prevents the guest from seeing a partially * written word-sized atom. */#if defined(__x86_64__) || defined(__i386__)static void memcpy_words(void *dst, void *src, size_t n){ asm volatile ( " movl %%edx,%%ecx \n"#ifdef __x86_64__ " shrl $3,%%ecx \n" " rep movsq \n" " test $4,%%edx \n" " jz 1f \n" " movsl \n"#else /* __i386__ */ " shrl $2,%%ecx \n" " rep movsl \n"#endif "1: test $2,%%edx \n" " jz 1f \n" " movsw \n" "1: test $1,%%edx \n" " jz 1f \n" " movsb \n" "1: \n" : "+S" (src), "+D" (dst) : "d" (n) : "ecx", "memory" );}#elsestatic void memcpy_words(void *dst, void *src, size_t n){ /* Some architectures do not like unaligned accesses. */ if (((unsigned long)dst | (unsigned long)src) & 3) { memcpy(dst, src, n); return; } while (n >= sizeof(uint32_t)) { *((uint32_t *)dst) = *((uint32_t *)src); dst = ((uint32_t *)dst) + 1; src = ((uint32_t *)src) + 1; n -= sizeof(uint32_t); } if (n & 2) { *((uint16_t *)dst) = *((uint16_t *)src); dst = ((uint16_t *)dst) + 1; src = ((uint16_t *)src) + 1; } if (n & 1) { *((uint8_t *)dst) = *((uint8_t *)src); dst = ((uint8_t *)dst) + 1; src = ((uint8_t *)src) + 1; }}#endifvoid cpu_physical_memory_rw(target_phys_addr_t _addr, uint8_t *buf, int _len, int is_write){ target_phys_addr_t addr = _addr; int len = _len; int l, io_index; uint8_t *ptr; uint32_t val; mapcache_lock(); while (len > 0) { /* How much can we copy before the next page boundary? */ l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK); if (l > len) l = len; io_index = iomem_index(addr); if (is_write) { if (io_index) { if (l >= 4 && ((addr & 3) == 0)) { /* 32 bit read access */ val = ldl_raw(buf); io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); l = 4; } else if (l >= 2 && ((addr & 1) == 0)) { /* 16 bit read access */ val = lduw_raw(buf); io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); l = 2; } else { /* 8 bit access */ val = ldub_raw(buf); io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); l = 1; } } else if ((ptr = phys_ram_addr(addr)) != NULL) { /* Writing to RAM */ memcpy_words(ptr, buf, l);#ifndef CONFIG_STUBDOM if (logdirty_bitmap != NULL) { /* Record that we have dirtied this frame */ unsigned long pfn = addr >> TARGET_PAGE_BITS; if (pfn / 8 >= logdirty_bitmap_size) { fprintf(logfile, "dirtying pfn %lx >= bitmap " "size %lx\n", pfn, logdirty_bitmap_size * 8); } else { logdirty_bitmap[pfn / HOST_LONG_BITS] |= 1UL << pfn % HOST_LONG_BITS; } }#endif#ifdef __ia64__ sync_icache(ptr, l);#endif } } else { if (io_index) { if (l >= 4 && ((addr & 3) == 0)) { /* 32 bit read access */ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); stl_raw(buf, val); l = 4; } else if (l >= 2 && ((addr & 1) == 0)) { /* 16 bit read access */ val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); stw_raw(buf, val); l = 2; } else { /* 8 bit access */ val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); stb_raw(buf, val); l = 1; } } else if ((ptr = phys_ram_addr(addr)) != NULL) { /* Reading from RAM */ memcpy_words(buf, ptr, l); } else { /* Neither RAM nor known MMIO space */ memset(buf, 0xff, len); } } len -= l; buf += l; addr += l; }#ifdef CONFIG_STUBDOM if (logdirty_bitmap != NULL) xc_hvm_modified_memory(xc_handle, domid, _addr >> TARGET_PAGE_BITS, (_addr + _len + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS - _addr >> TARGET_PAGE_BITS);#endif mapcache_unlock();}#endif/* virtual memory access for debug */int cpu_memory_rw_debug(CPUState *env, target_ulong addr, uint8_t *buf, int len, int is_write){ int l; target_ulong page, phys_addr; while (len > 0) { page = addr & TARGET_PAGE_MASK; phys_addr = cpu_get_phys_page_debug(env, page); /* if no physical page mapped, return an error */ if (phys_addr == -1) return -1; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), buf, l, is_write); len -= l; buf += l; addr += l; } return 0;}void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags){ unsigned long length; int i, mask, len; uint8_t *p; start &= TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); length = end - start; if (length == 0) return; mask = ~dirty_flags; p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); len = length >> TARGET_PAGE_BITS; for(i = 0; i < len; i++) p[i] &= mask; return;}/* Unoptimised in Xen DM, nicked from git * aab33094073678d459ccaac5c60ea7533e8d1d8e */uint32_t ldub_phys(target_phys_addr_t addr){ uint8_t val; cpu_physical_memory_read(addr, &val, 1); return val;}uint32_t lduw_phys(target_phys_addr_t addr){ uint16_t val; cpu_physical_memory_read(addr, (uint8_t *)&val, 2); return tswap16(val);}uint64_t ldq_phys(target_phys_addr_t addr){ uint64_t val; cpu_physical_memory_read(addr, (uint8_t *)&val, 8); return tswap64(val);}void stb_phys(target_phys_addr_t addr, uint32_t val){ uint8_t v = val; cpu_physical_memory_write(addr, &v, 1);}void stw_phys(target_phys_addr_t addr, uint32_t val){ uint16_t v = tswap16(val); cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);}void stq_phys(target_phys_addr_t addr, uint64_t val){ val = tswap64(val); cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);}/* stubs which we hope (think!) are OK for Xen DM */void stl_phys(target_phys_addr_t addr, uint32_t val){ val = tswap32(val); cpu_physical_memory_write(addr, (const uint8_t *)&val, 4);}void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val){ stl_phys(addr, val);}uint32_t ldl_phys(target_phys_addr_t addr){ uint32_t val; cpu_physical_memory_read(addr, (uint8_t *)&val, 4); return tswap32(val);}void cpu_physical_memory_write_rom(target_phys_addr_t addr, const uint8_t *buf, int len) { return cpu_physical_memory_write(addr,buf,len);}/* stub out various functions for Xen DM */void dump_exec_info(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) {}void monitor_disas(CPUState *env, target_ulong pc, int nb_insn, int is_physical, int flags) {}void irq_info(void) { }void pic_info(void) { }
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?