📄 exec-dm.c
字号:
if (io_index <= 0) { if (io_index >= IO_MEM_NB_ENTRIES) return -1; io_index = io_mem_nb++; } else { if (io_index >= IO_MEM_NB_ENTRIES) return -1; } for(i = 0;i < 3; i++) { io_mem_read[io_index][i] = mem_read[i]; io_mem_write[io_index][i] = mem_write[i]; } io_mem_opaque[io_index] = opaque; return io_index << IO_MEM_SHIFT;}CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index){ return io_mem_write[io_index >> IO_MEM_SHIFT];}CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index){ return io_mem_read[io_index >> IO_MEM_SHIFT];}#ifdef __ia64__#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")/* IA64 has seperate I/D cache, with coherence maintained by DMA controller. * So to emulate right behavior that guest OS is assumed, we need to flush * I/D cache here. */static void sync_icache(uint8_t *address, int len){ unsigned long addr = (unsigned long)address; unsigned long end = addr + len; for (addr &= ~(32UL-1); addr < end; addr += 32UL) __ia64_fc(addr); ia64_sync_i(); ia64_srlz_i();}#endif /* physical memory access (slow version, mainly for debug) */#if defined(CONFIG_USER_ONLY)void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, int len, int is_write){ int l, flags; target_ulong page; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; flags = page_get_flags(page); if (!(flags & PAGE_VALID)) return; if (is_write) { if (!(flags & PAGE_WRITE)) return; memcpy((uint8_t *)addr, buf, len); } else { if (!(flags & PAGE_READ)) return; memcpy(buf, (uint8_t *)addr, len); } len -= l; buf += l; addr += l; }}#elseint iomem_index(target_phys_addr_t addr){ int i; for (i = 0; i < mmio_cnt; i++) { unsigned long start, end; start = mmio[i].start; end = mmio[i].start + mmio[i].size; if ((addr >= start) && (addr < end)){ return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); } } return 0;}#if defined(__i386__) || defined(__x86_64__)#define phys_ram_addr(x) (qemu_map_cache(x))#elif defined(__ia64__)#define phys_ram_addr(x) (((x) < ram_size) ? (phys_ram_base + (x)) : NULL)#endifextern unsigned long *logdirty_bitmap;extern unsigned long logdirty_bitmap_size;/* * Replace the standard byte memcpy with a word memcpy for appropriately sized * memory copy operations. Some users (USB-UHCI) can not tolerate the possible * word tearing that can result from a guest concurrently writing a memory * structure while the qemu device model is modifying the same location. * Forcing a word-sized read/write prevents the guest from seeing a partially * written word-sized atom. */#if defined(__x86_64__) || defined(__i386__)static void memcpy_words(void *dst, void *src, size_t n){ asm volatile ( " movl %%edx,%%ecx \n"#ifdef __x86_64__ " shrl $3,%%ecx \n" " rep movsq \n" " test $4,%%edx \n" " jz 1f \n" " movsl \n"#else /* __i386__ */ " shrl $2,%%ecx \n" " rep movsl \n"#endif "1: test $2,%%edx \n" " jz 1f \n" " movsw \n" "1: test $1,%%edx \n" " jz 1f \n" " movsb \n" "1: \n" : "+S" (src), "+D" (dst) : "d" (n) : "ecx", "memory" );}#elsestatic void memcpy_words(void *dst, void *src, size_t n){ /* Some architectures do not like unaligned accesses. */ if (((unsigned long)dst | (unsigned long)src) & 3) { memcpy(dst, src, n); return; } while (n >= sizeof(uint32_t)) { *((uint32_t *)dst) = *((uint32_t *)src); dst = ((uint32_t *)dst) + 1; src = ((uint32_t *)src) + 1; n -= sizeof(uint32_t); } if (n & 2) { *((uint16_t *)dst) = *((uint16_t *)src); dst = ((uint16_t *)dst) + 1; src = ((uint16_t *)src) + 1; } if (n & 1) { *((uint8_t *)dst) = *((uint8_t *)src); dst = ((uint8_t *)dst) + 1; src = ((uint8_t *)src) + 1; }}#endifvoid cpu_physical_memory_rw(target_phys_addr_t _addr, uint8_t *buf, int _len, int is_write){ target_phys_addr_t addr = _addr; int len = _len; int l, io_index; uint8_t *ptr; uint32_t val; mapcache_lock(); while (len > 0) { /* How much can we copy before the next page boundary? */ l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK); if (l > len) l = len; io_index = iomem_index(addr); if (is_write) { if (io_index) { if (l >= 4 && ((addr & 3) == 0)) { /* 32 bit read access */ val = ldl_raw(buf); io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); l = 4; } else if (l >= 2 && ((addr & 1) == 0)) { /* 16 bit read access */ val = lduw_raw(buf); io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); l = 2; } else { /* 8 bit access */ val = ldub_raw(buf); io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); l = 1; } } else if ((ptr = phys_ram_addr(addr)) != NULL) { /* Writing to RAM */ memcpy_words(ptr, buf, l);#ifndef CONFIG_STUBDOM if (logdirty_bitmap != NULL) { /* Record that we have dirtied this frame */ unsigned long pfn = addr >> TARGET_PAGE_BITS; if (pfn / 8 >= logdirty_bitmap_size) { fprintf(logfile, "dirtying pfn %lx >= bitmap " "size %lx\n", pfn, logdirty_bitmap_size * 8); } else { logdirty_bitmap[pfn / HOST_LONG_BITS] |= 1UL << pfn % HOST_LONG_BITS; } }#endif#ifdef __ia64__ sync_icache(ptr, l);#endif } } else { if (io_index) { if (l >= 4 && ((addr & 3) == 0)) { /* 32 bit read access */ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); stl_raw(buf, val); l = 4; } else if (l >= 2 && ((addr & 1) == 0)) { /* 16 bit read access */ val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); stw_raw(buf, val); l = 2; } else { /* 8 bit access */ val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); stb_raw(buf, val); l = 1; } } else if ((ptr = phys_ram_addr(addr)) != NULL) { /* Reading from RAM */ memcpy_words(buf, ptr, l); } else { /* Neither RAM nor known MMIO space */ memset(buf, 0xff, len); } } len -= l; buf += l; addr += l; }#ifdef CONFIG_STUBDOM if (logdirty_bitmap != NULL) xc_hvm_modified_memory(xc_handle, domid, _addr >> TARGET_PAGE_BITS, ((_addr + _len + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) - (_addr >> TARGET_PAGE_BITS));#endif mapcache_unlock();}#endif/* virtual memory access for debug */int cpu_memory_rw_debug(CPUState *env, target_ulong addr, uint8_t *buf, int len, int is_write){ int l; target_ulong page, phys_addr; while (len > 0) { page = addr & TARGET_PAGE_MASK; phys_addr = cpu_get_phys_page_debug(env, page); /* if no physical page mapped, return an error */ if (phys_addr == -1) return -1; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), buf, l, is_write); len -= l; buf += l; addr += l; } return 0;}void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags){ unsigned long length; int i, mask, len; uint8_t *p; start &= TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); length = end - start; if (length == 0) return; mask = ~dirty_flags; p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); len = length >> TARGET_PAGE_BITS; for(i = 0; i < len; i++) p[i] &= mask; return;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -