📄 exec.c
字号:
}/* update the TLBs so that writes to code in the virtual page 'addr' can be detected */static void tlb_protect_code(ram_addr_t ram_addr){ cpu_physical_memory_reset_dirty(ram_addr, ram_addr + TARGET_PAGE_SIZE, CODE_DIRTY_FLAG);}/* update the TLB so that writes in physical page 'phys_addr' are no longer tested for self modifying code */static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, target_ulong vaddr){ phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;}static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, unsigned long start, unsigned long length){ unsigned long addr; if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; if ((addr - start) < length) { tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; } }}void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags){ CPUState *env; unsigned long length, start1; int i, mask, len; uint8_t *p; start &= TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); length = end - start; if (length == 0) return; len = length >> TARGET_PAGE_BITS;#ifdef USE_KQEMU /* XXX: should not depend on cpu context */ env = first_cpu; if (env->kqemu_enabled) { ram_addr_t addr; addr = start; for(i = 0; i < len; i++) { kqemu_set_notdirty(env, addr); addr += TARGET_PAGE_SIZE; } }#endif mask = ~dirty_flags; p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); for(i = 0; i < len; i++) p[i] &= mask; /* we modify the TLB cache so that the dirty bit will be set again when accessing the range */ start1 = start + (unsigned long)phys_ram_base; for(env = first_cpu; env != NULL; env = env->next_cpu) { for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); }#if !defined(CONFIG_SOFTMMU) /* XXX: this is expensive */ { VirtPageDesc *p; int j; target_ulong addr; for(i = 0; i < L1_SIZE; i++) { p = l1_virt_map[i]; if (p) { addr = i << (TARGET_PAGE_BITS + L2_BITS); for(j = 0; j < L2_SIZE; j++) { if (p->valid_tag == virt_valid_tag && p->phys_addr >= start && p->phys_addr < end && (p->prot & PROT_WRITE)) { if (addr < MMAP_AREA_END) { mprotect((void *)addr, TARGET_PAGE_SIZE, p->prot & ~PROT_WRITE); } } addr += TARGET_PAGE_SIZE; p++; } } } }#endif}static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry){ ram_addr_t ram_addr; if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend - (unsigned long)phys_ram_base; if (!cpu_physical_memory_is_dirty(ram_addr)) { tlb_entry->addr_write |= IO_MEM_NOTDIRTY; } }}/* update the TLB according to the current state of the dirty bits */void cpu_tlb_update_dirty(CPUState *env){ int i; for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[0][i]); for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[1][i]);}static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, unsigned long start){ unsigned long addr; if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; if (addr == start) { tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; } }}/* update the TLB corresponding to virtual page vaddr and phys addr addr so that it is no longer dirty */static inline void tlb_set_dirty(CPUState *env, unsigned long addr, target_ulong vaddr){ int i; addr &= TARGET_PAGE_MASK; i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_set_dirty1(&env->tlb_table[0][i], addr); tlb_set_dirty1(&env->tlb_table[1][i], addr);}/* add a new TLB entry. At most one entry for a given virtual address is permitted. Return 0 if OK or 2 if the page could not be mapped (can only happen in non SOFTMMU mode for I/O pages or pages conflicting with the host address space). */int tlb_set_page_exec(CPUState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int is_user, int is_softmmu){ PhysPageDesc *p; unsigned long pd; unsigned int index; target_ulong address; target_phys_addr_t addend; int ret; CPUTLBEntry *te; p = phys_page_find(paddr >> TARGET_PAGE_BITS); if (!p) { pd = IO_MEM_UNASSIGNED; } else { pd = p->phys_offset; }#if defined(DEBUG_TLB) printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", vaddr, (int)paddr, prot, is_user, is_softmmu, pd);#endif ret = 0;#if !defined(CONFIG_SOFTMMU) if (is_softmmu) #endif { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { /* IO memory case */ address = vaddr | pd; addend = paddr; } else { /* standard memory */ address = vaddr; addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); } index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); addend -= vaddr; te = &env->tlb_table[is_user][index]; te->addend = addend; if (prot & PAGE_READ) { te->addr_read = address; } else { te->addr_read = -1; } if (prot & PAGE_EXEC) { te->addr_code = address; } else { te->addr_code = -1; } if (prot & PAGE_WRITE) { if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) { /* ROM: access is ignored (same as unassigned) */ te->addr_write = vaddr | IO_MEM_ROM; } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && !cpu_physical_memory_is_dirty(pd)) { te->addr_write = vaddr | IO_MEM_NOTDIRTY; } else { te->addr_write = address; } } else { te->addr_write = -1; } }#if !defined(CONFIG_SOFTMMU) else { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { /* IO access: no mapping is done as it will be handled by the soft MMU */ if (!(env->hflags & HF_SOFTMMU_MASK)) ret = 2; } else { void *map_addr; if (vaddr >= MMAP_AREA_END) { ret = 2; } else { if (prot & PROT_WRITE) { if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || #if defined(TARGET_HAS_SMC) || 1 first_tb ||#endif ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && !cpu_physical_memory_is_dirty(pd))) { /* ROM: we do as if code was inside */ /* if code is present, we only map as read only and save the original mapping */ VirtPageDesc *vp; vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); vp->phys_addr = pd; vp->prot = prot; vp->valid_tag = virt_valid_tag; prot &= ~PAGE_WRITE; } } map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); if (map_addr == MAP_FAILED) { cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", paddr, vaddr); } } } }#endif return ret;}/* called from signal handler: invalidate the code and unprotect the page. Return TRUE if the fault was succesfully handled. */int page_unprotect(target_ulong addr, unsigned long pc, void *puc){#if !defined(CONFIG_SOFTMMU) VirtPageDesc *vp;#if defined(DEBUG_TLB) printf("page_unprotect: addr=0x%08x\n", addr);#endif addr &= TARGET_PAGE_MASK; /* if it is not mapped, no need to worry here */ if (addr >= MMAP_AREA_END) return 0; vp = virt_page_find(addr >> TARGET_PAGE_BITS); if (!vp) return 0; /* NOTE: in this case, validate_tag is _not_ tested as it validates only the code TLB */ if (vp->valid_tag != virt_valid_tag) return 0; if (!(vp->prot & PAGE_WRITE)) return 0;#if defined(DEBUG_TLB) printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", addr, vp->phys_addr, vp->prot);#endif if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", (unsigned long)addr, vp->prot); /* set the dirty bit */ phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff; /* flush the code inside */ tb_invalidate_phys_page(vp->phys_addr, pc, puc); return 1;#else return 0;#endif}#elsevoid tlb_flush(CPUState *env, int flush_global){}void tlb_flush_page(CPUState *env, target_ulong addr){}int tlb_set_page_exec(CPUState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int is_user, int is_softmmu){ return 0;}/* dump memory mappings */void page_dump(FILE *f){ unsigned long start, end; int i, j, prot, prot1; PageDesc *p; fprintf(f, "%-8s %-8s %-8s %s\n", "start", "end", "size", "prot"); start = -1; end = -1; prot = 0; for(i = 0; i <= L1_SIZE; i++) { if (i < L1_SIZE) p = l1_map[i]; else p = NULL; for(j = 0;j < L2_SIZE; j++) { if (!p) prot1 = 0; else prot1 = p[j].flags; if (prot1 != prot) { end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); if (start != -1) { fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", start, end, end - start, prot & PAGE_READ ? 'r' : '-', prot & PAGE_WRITE ? 'w' : '-', prot & PAGE_EXEC ? 'x' : '-'); } if (prot1 != 0) start = end; else start = -1; prot = prot1; } if (!p) break; } }}int page_get_flags(target_ulong address){ PageDesc *p; p = page_find(address >> TARGET_PAGE_BITS); if (!p) return 0; return p->flags;}/* modify the flags of a page and invalidate the code if necessary. The flag PAGE_WRITE_ORG is positionned automatically depending on PAGE_WRITE */void page_set_flags(target_ulong start, target_ulong end, int flags){ PageDesc *p; target_ulong addr; start = start & TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); if (flags & PAGE_WRITE) flags |= PAGE_WRITE_ORG; spin_lock(&tb_lock); for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { p = page_find_alloc(addr >> TARGET_PAGE_BITS); /* if the write protection is set, then we invalidate the code inside */ if (!(p->flags & PAGE_WRITE) && (flags & PAGE_WRITE) && p->first_tb) { tb_invalidate_phys_page(addr, 0, NULL); } p->flags = flags; } spin_unlock(&tb_lock);}/* called from signal handler: invalidate the code and unprotect the page. Return TRUE if the fault was succesfully handled. */int page_unprotect(target_ulong address, unsigned long pc, void *puc){ unsigned int page_index, prot, pindex; PageDesc *p, *p1; target_ulong host_start, host_end, addr; host_start = address & qemu_host_page_mask; page_index = host_start >> TARGET_PAGE_BITS; p1 = page_find(page_index); if (!p1) return 0; host_end = host_start + qemu_host_page_size; p = p1; prot = 0; for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { prot |= p->flags; p++; } /* if the page was really writable, then we change its protection back to writable */ if (prot & PAGE_WRITE_ORG) { pindex = (address - host_start) >> TARGET_PAGE_BITS; if (!(p1[pindex].flags & PAGE_WRITE)) { mprotect((void *)g2h(host_start), qemu_host_page_size, (prot & PAGE_BITS) | PAGE_WRITE); p1[pindex].flags |= PAGE_WRITE; /* and since the content will be modified, we must invalidate the corresponding translated code. */ tb_invalidate_phys_page(address, pc, puc);#ifdef DEBUG_TB_CHECK tb_invalidate_check(address);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -