📄 qemu-target-i386-dm
字号:
+ logfilename = strdup(filename);+}++/* mask must never be zero, except for A20 change call */+void cpu_interrupt(CPUState *env, int mask)+{+ env->interrupt_request |= mask;+}++void cpu_reset_interrupt(CPUState *env, int mask)+{+ env->interrupt_request &= ~mask;+}++CPULogItem cpu_log_items[] = {+ { CPU_LOG_TB_OUT_ASM, "out_asm", + "show generated host assembly code for each compiled TB" },+ { CPU_LOG_TB_IN_ASM, "in_asm",+ "show target assembly code for each compiled TB" },+ { CPU_LOG_TB_OP, "op", + "show micro ops for each compiled TB (only usable if 'in_asm' used)" },+#ifdef TARGET_I386+ { CPU_LOG_TB_OP_OPT, "op_opt",+ "show micro ops after optimization for each compiled TB" },+#endif+ { CPU_LOG_INT, "int",+ "show interrupts/exceptions in short format" },+ { CPU_LOG_EXEC, "exec",+ "show trace before each executed TB (lots of logs)" },+ { CPU_LOG_TB_CPU, "cpu",+ "show CPU state before bloc translation" },+#ifdef TARGET_I386+ { CPU_LOG_PCALL, "pcall",+ "show protected mode far calls/returns/exceptions" },+#endif+#ifdef DEBUG_IOPORT+ { CPU_LOG_IOPORT, "ioport",+ "show all i/o ports accesses" },+#endif+ { 0, NULL, NULL },+};++static int cmp1(const char *s1, int n, const char *s2)+{+ if (strlen(s2) != n)+ return 0;+ return memcmp(s1, s2, n) == 0;+}+ +/* takes a comma separated list of log masks. Return 0 if error. */+int cpu_str_to_log_mask(const char *str)+{+ CPULogItem *item;+ int mask;+ const char *p, *p1;++ p = str;+ mask = 0;+ for(;;) {+ p1 = strchr(p, ',');+ if (!p1)+ p1 = p + strlen(p);+ if(cmp1(p,p1-p,"all")) {+ for(item = cpu_log_items; item->mask != 0; item++) {+ mask |= item->mask;+ }+ } else {+ for(item = cpu_log_items; item->mask != 0; item++) {+ if (cmp1(p, p1 - p, item->name))+ goto found;+ }+ return 0;+ }+ found:+ mask |= item->mask;+ if (*p1 != ',')+ break;+ p = p1 + 1;+ }+ return mask;+}++void cpu_abort(CPUState *env, const char *fmt, ...)+{+ va_list ap;++ va_start(ap, fmt);+ fprintf(stderr, "qemu: fatal: ");+ vfprintf(stderr, fmt, ap);+ fprintf(stderr, "\n");+ va_end(ap);+ abort();+}+++/* XXX: Simple implementation. Fix later */+#define MAX_MMIO 32+struct mmio_space {+ target_phys_addr_t start;+ unsigned long size;+ unsigned long io_index;+} mmio[MAX_MMIO];+unsigned long mmio_cnt;++/* register physical memory. 'size' must be a multiple of the target+ page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an+ io memory page */+void cpu_register_physical_memory(target_phys_addr_t start_addr, + unsigned long size,+ unsigned long phys_offset)+{+ int i;++ for (i = 0; i < mmio_cnt; i++) { + if(mmio[i].start == start_addr) {+ mmio[i].io_index = phys_offset;+ mmio[i].size = size;+ return;+ }+ }++ if (mmio_cnt == MAX_MMIO) {+ fprintf(logfile, "too many mmio regions\n");+ exit(-1);+ }++ mmio[mmio_cnt].io_index = phys_offset;+ mmio[mmio_cnt].start = start_addr;+ mmio[mmio_cnt++].size = size;+}++/* mem_read and mem_write are arrays of functions containing the+ function to access byte (index 0), word (index 1) and dword (index+ 2). All functions must be supplied. If io_index is non zero, the+ corresponding io zone is modified. If it is zero, a new io zone is+ allocated. The return value can be used with+ cpu_register_physical_memory(). (-1) is returned if error. */+int cpu_register_io_memory(int io_index,+ CPUReadMemoryFunc **mem_read,+ CPUWriteMemoryFunc **mem_write,+ void *opaque)+{+ int i;++ if (io_index <= 0) {+ if (io_index >= IO_MEM_NB_ENTRIES)+ return -1;+ io_index = io_mem_nb++;+ } else {+ if (io_index >= IO_MEM_NB_ENTRIES)+ return -1;+ }+ + for(i = 0;i < 3; i++) {+ io_mem_read[io_index][i] = mem_read[i];+ io_mem_write[io_index][i] = mem_write[i];+ }+ io_mem_opaque[io_index] = opaque;+ return io_index << IO_MEM_SHIFT;+}++CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)+{+ return io_mem_write[io_index >> IO_MEM_SHIFT];+}++CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)+{+ return io_mem_read[io_index >> IO_MEM_SHIFT];+}++#ifdef __ia64__++#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")+#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")+#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")++/* IA64 has seperate I/D cache, with coherence maintained by DMA controller.+ * So to emulate right behavior that guest OS is assumed, we need to flush+ * I/D cache here.+ */+static void sync_icache(unsigned long address, int len)+{+ int l;++ for(l = 0; l < (len + 32); l += 32)+ __ia64_fc(address + l);++ ia64_sync_i();+ ia64_srlz_i();+}+#endif ++/* physical memory access (slow version, mainly for debug) */+#if defined(CONFIG_USER_ONLY)+void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, + int len, int is_write)+{+ int l, flags;+ target_ulong page;++ while (len > 0) {+ page = addr & TARGET_PAGE_MASK;+ l = (page + TARGET_PAGE_SIZE) - addr;+ if (l > len)+ l = len;+ flags = page_get_flags(page);+ if (!(flags & PAGE_VALID))+ return;+ if (is_write) {+ if (!(flags & PAGE_WRITE))+ return;+ memcpy((uint8_t *)addr, buf, len);+ } else {+ if (!(flags & PAGE_READ))+ return;+ memcpy(buf, (uint8_t *)addr, len);+ }+ len -= l;+ buf += l;+ addr += l;+ }+}+#else++int iomem_index(target_phys_addr_t addr)+{+ int i;++ for (i = 0; i < mmio_cnt; i++) {+ unsigned long start, end;++ start = mmio[i].start;+ end = mmio[i].start + mmio[i].size;++ if ((addr >= start) && (addr < end)){+ return (mmio[i].io_index >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);+ }+ }+ return 0;+}++static inline int paddr_is_ram(target_phys_addr_t addr)+{+ /* Is this guest physical address RAM-backed? */+#if defined(CONFIG_DM) && (defined(__i386__) || defined(__x86_64__))+ return ((addr < HVM_BELOW_4G_MMIO_START) ||+ (addr >= HVM_BELOW_4G_MMIO_START + HVM_BELOW_4G_MMIO_LENGTH));+#else+ return (addr < ram_size);+#endif+}++void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, + int len, int is_write)+{+ int l, io_index;+ uint8_t *ptr;+ uint32_t val;+ + while (len > 0) {+ /* How much can we copy before the next page boundary? */+ l = TARGET_PAGE_SIZE - (addr & ~TARGET_PAGE_MASK); + if (l > len)+ l = len;+ + io_index = iomem_index(addr);+ if (is_write) {+ if (io_index) {+ if (l >= 4 && ((addr & 3) == 0)) {+ /* 32 bit read access */+ val = ldl_raw(buf);+ io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);+ l = 4;+ } else if (l >= 2 && ((addr & 1) == 0)) {+ /* 16 bit read access */+ val = lduw_raw(buf);+ io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);+ l = 2;+ } else {+ /* 8 bit access */+ val = ldub_raw(buf);+ io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);+ l = 1;+ }+ } else if (paddr_is_ram(addr)) {+ /* Reading from RAM */+ memcpy(phys_ram_base + addr, buf, l);+#ifdef __ia64__+ sync_icache((unsigned long)(phys_ram_base + addr), l);+#endif + }+ } else {+ if (io_index) {+ if (l >= 4 && ((addr & 3) == 0)) {+ /* 32 bit read access */+ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);+ stl_raw(buf, val);+ l = 4;+ } else if (l >= 2 && ((addr & 1) == 0)) {+ /* 16 bit read access */+ val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);+ stw_raw(buf, val);+ l = 2;+ } else {+ /* 8 bit access */+ val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);+ stb_raw(buf, val);+ l = 1;+ }+ } else if (paddr_is_ram(addr)) {+ /* Reading from RAM */+ memcpy(buf, phys_ram_base + addr, l);+ } else {+ /* Neither RAM nor known MMIO space */+ memset(buf, 0xff, len); + }+ }+ len -= l;+ buf += l;+ addr += l;+ }+}+#endif++/* virtual memory access for debug */+int cpu_memory_rw_debug(CPUState *env, target_ulong addr, + uint8_t *buf, int len, int is_write)+{+ int l;+ target_ulong page, phys_addr;++ while (len > 0) {+ page = addr & TARGET_PAGE_MASK;+ phys_addr = cpu_get_phys_page_debug(env, page);+ /* if no physical page mapped, return an error */+ if (phys_addr == -1)+ return -1;+ l = (page + TARGET_PAGE_SIZE) - addr;+ if (l > len)+ l = len;+ cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), + buf, l, is_write);+ len -= l;+ buf += l;+ addr += l;+ }+ return 0;+}++void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,+ int dirty_flags)+{+ unsigned long length;+ int i, mask, len;+ uint8_t *p;++ start &= TARGET_PAGE_MASK;+ end = TARGET_PAGE_ALIGN(end);++ length = end - start;+ if (length == 0)+ return;+ mask = ~dirty_flags;+ p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);+ len = length >> TARGET_PAGE_BITS;+ for(i = 0; i < len; i++)+ p[i] &= mask;++ return;+}Index: ioemu/target-i386-dm/helper2.c===================================================================--- /dev/null 1970-01-01 00:00:00.000000000 +0000+++ ioemu/target-i386-dm/helper2.c 2007-05-11 10:04:05.000000000 +0100@@ -0,0 +1,542 @@+/*+ * i386 helpers (without register variable usage)+ *+ * Copyright (c) 2003 Fabrice Bellard+ *+ * This library is free software; you can redistribute it and/or+ * modify it under the terms of the GNU Lesser General Public+ * License as published by the Free Software Foundation; either+ * version 2 of the License, or (at your option) any later version.+ *+ * This library is distributed in the hope that it will be useful,+ * but WITHOUT ANY WARRANTY; without even the implied warranty of+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU+ * Lesser General Public License for more details.+ *+ * You should have received a copy of the GNU Lesser General Public+ * License along with this library; if not, write to the Free Software+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA+ */++/*+ * Main cpu loop for handling I/O requests coming from a virtual machine+ * Copyright
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -