⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 exec.c.svn-base

📁 我们自己开发的一个OSEK操作系统!不知道可不可以?
💻 SVN-BASE
📖 第 1 页 / 共 5 页
字号:
       all the potentially executing TB */    tb = env->current_tb;    if (tb && !testandset(&interrupt_lock)) {        env->current_tb = NULL;        tb_reset_jump_recursive(tb);        interrupt_lock = 0;    }}void cpu_reset_interrupt(CPUState *env, int mask){    env->interrupt_request &= ~mask;}CPULogItem cpu_log_items[] = {    { CPU_LOG_TB_OUT_ASM, "out_asm",      "show generated host assembly code for each compiled TB" },    { CPU_LOG_TB_IN_ASM, "in_asm",      "show target assembly code for each compiled TB" },    { CPU_LOG_TB_OP, "op",      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },#ifdef TARGET_I386    { CPU_LOG_TB_OP_OPT, "op_opt",      "show micro ops after optimization for each compiled TB" },#endif    { CPU_LOG_INT, "int",      "show interrupts/exceptions in short format" },    { CPU_LOG_EXEC, "exec",      "show trace before each executed TB (lots of logs)" },    { CPU_LOG_TB_CPU, "cpu",      "show CPU state before block translation" },#ifdef TARGET_I386    { CPU_LOG_PCALL, "pcall",      "show protected mode far calls/returns/exceptions" },#endif#ifdef DEBUG_IOPORT    { CPU_LOG_IOPORT, "ioport",      "show all i/o ports accesses" },#endif    { 0, NULL, NULL },};static int cmp1(const char *s1, int n, const char *s2){    if (strlen(s2) != n)        return 0;    return memcmp(s1, s2, n) == 0;}/* takes a comma separated list of log masks. Return 0 if error. */int cpu_str_to_log_mask(const char *str){    CPULogItem *item;    int mask;    const char *p, *p1;    p = str;    mask = 0;    for(;;) {        p1 = strchr(p, ',');        if (!p1)            p1 = p + strlen(p);	if(cmp1(p,p1-p,"all")) {		for(item = cpu_log_items; item->mask != 0; item++) {			mask |= item->mask;		}	} else {        for(item = cpu_log_items; item->mask != 0; item++) {            if (cmp1(p, p1 - p, item->name))                goto found;        }        return 0;	}    found:        mask |= item->mask;        if (*p1 != ',')            break;        p = p1 + 1;    }    return mask;}void cpu_abort(CPUState *env, const char *fmt, ...){    va_list ap;    va_list ap2;    va_start(ap, fmt);    va_copy(ap2, ap);    fprintf(stderr, "qemu: fatal: ");    vfprintf(stderr, fmt, ap);    fprintf(stderr, "\n");#ifdef TARGET_I386    if(env->intercept & INTERCEPT_SVM_MASK) {	/* most probably the virtual machine should not	   be shut down but rather caught by the VMM */        vmexit(SVM_EXIT_SHUTDOWN, 0);    }    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);#else    cpu_dump_state(env, stderr, fprintf, 0);#endif    if (logfile) {        fprintf(logfile, "qemu: fatal: ");        vfprintf(logfile, fmt, ap2);        fprintf(logfile, "\n");#ifdef TARGET_I386        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);#else        cpu_dump_state(env, logfile, fprintf, 0);#endif        fflush(logfile);        fclose(logfile);    }    va_end(ap2);    va_end(ap);    abort();}CPUState *cpu_copy(CPUState *env){    CPUState *new_env = cpu_init(env->cpu_model_str);    /* preserve chaining and index */    CPUState *next_cpu = new_env->next_cpu;    int cpu_index = new_env->cpu_index;    memcpy(new_env, env, sizeof(CPUState));    new_env->next_cpu = next_cpu;    new_env->cpu_index = cpu_index;    return new_env;}#if !defined(CONFIG_USER_ONLY)/* NOTE: if flush_global is true, also flush global entries (not   implemented yet) */void tlb_flush(CPUState *env, int flush_global){    int i;#if defined(DEBUG_TLB)    printf("tlb_flush:\n");//debugger#endif    /* must reset current TB so that interrupts cannot modify the       links while we are modifying them */    env->current_tb = NULL;     for(i = 0; i < CPU_TLB_SIZE; i++) {        env->tlb_table[0][i].addr_read = -1;        env->tlb_table[0][i].addr_write = -1;        env->tlb_table[0][i].addr_code = -1;        env->tlb_table[1][i].addr_read = -1;        env->tlb_table[1][i].addr_write = -1;        env->tlb_table[1][i].addr_code = -1;#if (NB_MMU_MODES >= 3)        env->tlb_table[2][i].addr_read = -1;        env->tlb_table[2][i].addr_write = -1;        env->tlb_table[2][i].addr_code = -1;#if (NB_MMU_MODES == 4)        env->tlb_table[3][i].addr_read = -1;        env->tlb_table[3][i].addr_write = -1;        env->tlb_table[3][i].addr_code = -1;#endif#endif    }    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));#if !defined(CONFIG_SOFTMMU)    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);#endif#ifdef USE_KQEMU    if (env->kqemu_enabled) {        kqemu_flush(env, flush_global);    }#endif    tlb_flush_count++;}static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr){    if (addr == (tlb_entry->addr_read &                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||        addr == (tlb_entry->addr_write &                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||        addr == (tlb_entry->addr_code &                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {        tlb_entry->addr_read = -1;        tlb_entry->addr_write = -1;        tlb_entry->addr_code = -1;    }}void tlb_flush_page(CPUState *env, target_ulong addr){    int i;    TranslationBlock *tb;//#if defined(DEBUG_TLB) //debugger    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);//#endif    /* must reset current TB so that interrupts cannot modify the       links while we are modifying them */    env->current_tb = NULL;    addr &= TARGET_PAGE_MASK;    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);    tlb_flush_entry(&env->tlb_table[0][i], addr);    tlb_flush_entry(&env->tlb_table[1][i], addr);#if (NB_MMU_MODES >= 3)    tlb_flush_entry(&env->tlb_table[2][i], addr);#if (NB_MMU_MODES == 4)    tlb_flush_entry(&env->tlb_table[3][i], addr);#endif#endif    /* Discard jump cache entries for any tb which might potentially       overlap the flushed page.  */    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));    i = tb_jmp_cache_hash_page(addr);    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));#if !defined(CONFIG_SOFTMMU)    if (addr < MMAP_AREA_END)        munmap((void *)addr, TARGET_PAGE_SIZE);#endif#ifdef USE_KQEMU    if (env->kqemu_enabled) {        kqemu_flush_page(env, addr);    }#endif}/* update the TLBs so that writes to code in the virtual page 'addr'   can be detected */static void tlb_protect_code(ram_addr_t ram_addr){    cpu_physical_memory_reset_dirty(ram_addr,                                    ram_addr + TARGET_PAGE_SIZE,                                    CODE_DIRTY_FLAG);}/* update the TLB so that writes in physical page 'phys_addr' are no longer   tested for self modifying code */static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,                                    target_ulong vaddr){    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;}static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,                                         unsigned long start, unsigned long length){    unsigned long addr;    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;        if ((addr - start) < length) {            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;        }    }}void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,                                     int dirty_flags){    CPUState *env;    unsigned long length, start1;    int i, mask, len;    uint8_t *p;    start &= TARGET_PAGE_MASK;    end = TARGET_PAGE_ALIGN(end);    length = end - start;    if (length == 0)        return;    len = length >> TARGET_PAGE_BITS;#ifdef USE_KQEMU    /* XXX: should not depend on cpu context */    env = first_cpu;    if (env->kqemu_enabled) {        ram_addr_t addr;        addr = start;        for(i = 0; i < len; i++) {            kqemu_set_notdirty(env, addr);            addr += TARGET_PAGE_SIZE;        }    }#endif    mask = ~dirty_flags;    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);    for(i = 0; i < len; i++)        p[i] &= mask;    /* we modify the TLB cache so that the dirty bit will be set again       when accessing the range */    start1 = start + (unsigned long)phys_ram_base;    for(env = first_cpu; env != NULL; env = env->next_cpu) {        for(i = 0; i < CPU_TLB_SIZE; i++)            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);        for(i = 0; i < CPU_TLB_SIZE; i++)            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);#if (NB_MMU_MODES >= 3)        for(i = 0; i < CPU_TLB_SIZE; i++)            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);#if (NB_MMU_MODES == 4)        for(i = 0; i < CPU_TLB_SIZE; i++)            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);#endif#endif    }#if !defined(CONFIG_SOFTMMU)    /* XXX: this is expensive */    {        VirtPageDesc *p;        int j;        target_ulong addr;        for(i = 0; i < L1_SIZE; i++) {            p = l1_virt_map[i];            if (p) {                addr = i << (TARGET_PAGE_BITS + L2_BITS);                for(j = 0; j < L2_SIZE; j++) {                    if (p->valid_tag == virt_valid_tag &&                        p->phys_addr >= start && p->phys_addr < end &&                        (p->prot & PROT_WRITE)) {                        if (addr < MMAP_AREA_END) {                            mprotect((void *)addr, TARGET_PAGE_SIZE,                                     p->prot & ~PROT_WRITE);                        }                    }                    addr += TARGET_PAGE_SIZE;                    p++;                }            }        }    }#endif}static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry){    ram_addr_t ram_addr;    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +            tlb_entry->addend - (unsigned long)phys_ram_base;        if (!cpu_physical_memory_is_dirty(ram_addr)) {            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;        }    }}/* update the TLB according to the current state of the dirty bits */void cpu_tlb_update_dirty(CPUState *env){    int i;	printf("\n cpu_tlb_update_dirty\n");//debugger    for(i = 0; i < CPU_TLB_SIZE; i++)        tlb_update_dirty(&env->tlb_table[0][i]);    for(i = 0; i < CPU_TLB_SIZE; i++)        tlb_update_dirty(&env->tlb_table[1][i]);#if (NB_MMU_MODES >= 3)    for(i = 0; i < CPU_TLB_SIZE; i++)        tlb_update_dirty(&env->tlb_table[2][i]);#if (NB_MMU_MODES == 4)    for(i = 0; i < CPU_TLB_SIZE; i++)        tlb_update_dirty(&env->tlb_table[3][i]);#endif#endif}static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,                                  unsigned long start){         unsigned long addr;    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;        if (addr == start) {            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;        }    }}/* update the TLB corresponding to virtual page vaddr and phys addr   addr so that it is no longer dirty */static inline void tlb_set_dirty(CPUState *env,                                 unsigned long addr, target_ulong vaddr){    int i;    addr &= TARGET_PAGE_MASK;    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);    tlb_set_dirty1(&env->tlb_table[0][i], addr);    tlb_set_dirty1(&env->tlb_table[1][i], addr);#if (NB_MMU_MODES >= 3)    tlb_set_dirty1(&env->tlb_table[2][i], addr);#if (NB_MMU_MODES == 4)    tlb_set_dirty1(&env->tlb_table[3][i], addr);#endif#endif}/* add a new TLB entry. At most one entry for a given virtual address

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -