📄 exec.c
字号:
#endif /* TARGET_HAS_SMC */}/* Allocate a new translation block. Flush the translation buffer if too many translation blocks or too much generated code. */TranslationBlock *tb_alloc(target_ulong pc){ TranslationBlock *tb; if (nb_tbs >= CODE_GEN_MAX_BLOCKS || (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) return NULL; tb = &tbs[nb_tbs++]; tb->pc = pc; tb->cflags = 0; return tb;}/* add a new TB and link it to the physical page tables. phys_page2 is (-1) to indicate that only one page contains the TB. */void tb_link_phys(TranslationBlock *tb, target_ulong phys_pc, target_ulong phys_page2){ unsigned int h; TranslationBlock **ptb; /* add in the physical hash table */ h = tb_phys_hash_func(phys_pc); ptb = &tb_phys_hash[h]; tb->phys_hash_next = *ptb; *ptb = tb; /* add in the page list */ tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); if (phys_page2 != -1) tb_alloc_page(tb, 1, phys_page2); else tb->page_addr[1] = -1; tb->jmp_first = (TranslationBlock *)((long)tb | 2); tb->jmp_next[0] = NULL; tb->jmp_next[1] = NULL;#ifdef USE_CODE_COPY tb->cflags &= ~CF_FP_USED; if (tb->cflags & CF_TB_FP_USED) tb->cflags |= CF_FP_USED;#endif /* init original jump addresses */ if (tb->tb_next_offset[0] != 0xffff) tb_reset_jump(tb, 0); if (tb->tb_next_offset[1] != 0xffff) tb_reset_jump(tb, 1);#ifdef DEBUG_TB_CHECK tb_page_check();#endif}/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < tb[1].tc_ptr. Return NULL if not found */TranslationBlock *tb_find_pc(unsigned long tc_ptr){ int m_min, m_max, m; unsigned long v; TranslationBlock *tb; if (nb_tbs <= 0) return NULL; if (tc_ptr < (unsigned long)code_gen_buffer || tc_ptr >= (unsigned long)code_gen_ptr) return NULL; /* binary search (cf Knuth) */ m_min = 0; m_max = nb_tbs - 1; while (m_min <= m_max) { m = (m_min + m_max) >> 1; tb = &tbs[m]; v = (unsigned long)tb->tc_ptr; if (v == tc_ptr) return tb; else if (tc_ptr < v) { m_max = m - 1; } else { m_min = m + 1; } } return &tbs[m_max];}static void tb_reset_jump_recursive(TranslationBlock *tb);static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n){ TranslationBlock *tb1, *tb_next, **ptb; unsigned int n1; tb1 = tb->jmp_next[n]; if (tb1 != NULL) { /* find head of list */ for(;;) { n1 = (long)tb1 & 3; tb1 = (TranslationBlock *)((long)tb1 & ~3); if (n1 == 2) break; tb1 = tb1->jmp_next[n1]; } /* we are now sure now that tb jumps to tb1 */ tb_next = tb1; /* remove tb from the jmp_first list */ ptb = &tb_next->jmp_first; for(;;) { tb1 = *ptb; n1 = (long)tb1 & 3; tb1 = (TranslationBlock *)((long)tb1 & ~3); if (n1 == n && tb1 == tb) break; ptb = &tb1->jmp_next[n1]; } *ptb = tb->jmp_next[n]; tb->jmp_next[n] = NULL; /* suppress the jump to next tb in generated code */ tb_reset_jump(tb, n); /* suppress jumps in the tb on which we could have jumped */ tb_reset_jump_recursive(tb_next); }}static void tb_reset_jump_recursive(TranslationBlock *tb){ tb_reset_jump_recursive2(tb, 0); tb_reset_jump_recursive2(tb, 1);}#if defined(TARGET_HAS_ICE)static void breakpoint_invalidate(CPUState *env, target_ulong pc){ target_ulong addr, pd; ram_addr_t ram_addr; PhysPageDesc *p; addr = cpu_get_phys_page_debug(env, pc); p = phys_page_find(addr >> TARGET_PAGE_BITS); if (!p) { pd = IO_MEM_UNASSIGNED; } else { pd = p->phys_offset; } ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);}#endif/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a breakpoint is reached */int cpu_breakpoint_insert(CPUState *env, target_ulong pc){#if defined(TARGET_HAS_ICE) int i; for(i = 0; i < env->nb_breakpoints; i++) { if (env->breakpoints[i] == pc) return 0; } if (env->nb_breakpoints >= MAX_BREAKPOINTS) return -1; env->breakpoints[env->nb_breakpoints++] = pc; breakpoint_invalidate(env, pc); return 0;#else return -1;#endif}/* remove a breakpoint */int cpu_breakpoint_remove(CPUState *env, target_ulong pc){#if defined(TARGET_HAS_ICE) int i; for(i = 0; i < env->nb_breakpoints; i++) { if (env->breakpoints[i] == pc) goto found; } return -1; found: env->nb_breakpoints--; if (i < env->nb_breakpoints) env->breakpoints[i] = env->breakpoints[env->nb_breakpoints]; breakpoint_invalidate(env, pc); return 0;#else return -1;#endif}/* enable or disable single step mode. EXCP_DEBUG is returned by the CPU loop after each instruction */void cpu_single_step(CPUState *env, int enabled){#if defined(TARGET_HAS_ICE) if (env->singlestep_enabled != enabled) { env->singlestep_enabled = enabled; /* must flush all the translated code to avoid inconsistancies */ /* XXX: only flush what is necessary */ tb_flush(env); }#endif}/* enable or disable low levels log */void cpu_set_log(int log_flags){ loglevel = log_flags; if (loglevel && !logfile) { logfile = fopen(logfilename, "w"); if (!logfile) { perror(logfilename); _exit(1); }#if !defined(CONFIG_SOFTMMU) /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ { static uint8_t logfile_buf[4096]; setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); }#else setvbuf(logfile, NULL, _IOLBF, 0);#endif }}void cpu_set_log_filename(const char *filename){ logfilename = strdup(filename);}/* mask must never be zero, except for A20 change call */void cpu_interrupt(CPUState *env, int mask){ TranslationBlock *tb; static int interrupt_lock; env->interrupt_request |= mask; /* if the cpu is currently executing code, we must unlink it and all the potentially executing TB */ tb = env->current_tb; if (tb && !testandset(&interrupt_lock)) { env->current_tb = NULL; tb_reset_jump_recursive(tb); interrupt_lock = 0; }}void cpu_reset_interrupt(CPUState *env, int mask){ env->interrupt_request &= ~mask;}CPULogItem cpu_log_items[] = { { CPU_LOG_TB_OUT_ASM, "out_asm", "show generated host assembly code for each compiled TB" }, { CPU_LOG_TB_IN_ASM, "in_asm", "show target assembly code for each compiled TB" }, { CPU_LOG_TB_OP, "op", "show micro ops for each compiled TB (only usable if 'in_asm' used)" },#ifdef TARGET_I386 { CPU_LOG_TB_OP_OPT, "op_opt", "show micro ops after optimization for each compiled TB" },#endif { CPU_LOG_INT, "int", "show interrupts/exceptions in short format" }, { CPU_LOG_EXEC, "exec", "show trace before each executed TB (lots of logs)" }, { CPU_LOG_TB_CPU, "cpu", "show CPU state before bloc translation" },#ifdef TARGET_I386 { CPU_LOG_PCALL, "pcall", "show protected mode far calls/returns/exceptions" },#endif#ifdef DEBUG_IOPORT { CPU_LOG_IOPORT, "ioport", "show all i/o ports accesses" },#endif { 0, NULL, NULL },};static int cmp1(const char *s1, int n, const char *s2){ if (strlen(s2) != n) return 0; return memcmp(s1, s2, n) == 0;} /* takes a comma separated list of log masks. Return 0 if error. */int cpu_str_to_log_mask(const char *str){ CPULogItem *item; int mask; const char *p, *p1; p = str; mask = 0; for(;;) { p1 = strchr(p, ','); if (!p1) p1 = p + strlen(p); if(cmp1(p,p1-p,"all")) { for(item = cpu_log_items; item->mask != 0; item++) { mask |= item->mask; } } else { for(item = cpu_log_items; item->mask != 0; item++) { if (cmp1(p, p1 - p, item->name)) goto found; } return 0; } found: mask |= item->mask; if (*p1 != ',') break; p = p1 + 1; } return mask;}void cpu_abort(CPUState *env, const char *fmt, ...){ va_list ap; va_start(ap, fmt); fprintf(stderr, "qemu: fatal: "); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n");#ifdef TARGET_I386 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);#else cpu_dump_state(env, stderr, fprintf, 0);#endif va_end(ap); abort();}#if !defined(CONFIG_USER_ONLY)/* NOTE: if flush_global is true, also flush global entries (not implemented yet) */void tlb_flush(CPUState *env, int flush_global){ int i;#if defined(DEBUG_TLB) printf("tlb_flush:\n");#endif /* must reset current TB so that interrupts cannot modify the links while we are modifying them */ env->current_tb = NULL; for(i = 0; i < CPU_TLB_SIZE; i++) { env->tlb_table[0][i].addr_read = -1; env->tlb_table[0][i].addr_write = -1; env->tlb_table[0][i].addr_code = -1; env->tlb_table[1][i].addr_read = -1; env->tlb_table[1][i].addr_write = -1; env->tlb_table[1][i].addr_code = -1; } memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));#if !defined(CONFIG_SOFTMMU) munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);#endif#ifdef USE_KQEMU if (env->kqemu_enabled) { kqemu_flush(env, flush_global); }#endif tlb_flush_count++;}static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr){ if (addr == (tlb_entry->addr_read & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || addr == (tlb_entry->addr_write & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || addr == (tlb_entry->addr_code & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { tlb_entry->addr_read = -1; tlb_entry->addr_write = -1; tlb_entry->addr_code = -1; }}void tlb_flush_page(CPUState *env, target_ulong addr){ int i; TranslationBlock *tb;#if defined(DEBUG_TLB) printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);#endif /* must reset current TB so that interrupts cannot modify the links while we are modifying them */ env->current_tb = NULL; addr &= TARGET_PAGE_MASK; i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_flush_entry(&env->tlb_table[0][i], addr); tlb_flush_entry(&env->tlb_table[1][i], addr); for(i = 0; i < TB_JMP_CACHE_SIZE; i++) { tb = env->tb_jmp_cache[i]; if (tb && ((tb->pc & TARGET_PAGE_MASK) == addr || ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) { env->tb_jmp_cache[i] = NULL; } }#if !defined(CONFIG_SOFTMMU) if (addr < MMAP_AREA_END) munmap((void *)addr, TARGET_PAGE_SIZE);#endif#ifdef USE_KQEMU if (env->kqemu_enabled) { kqemu_flush_page(env, addr); }#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -