📄 exec.c.svn-base
字号:
is permitted. Return 0 if OK or 2 if the page could not be mapped (can only happen in non SOFTMMU mode for I/O pages or pages conflicting with the host address space). */int tlb_set_page_exec(CPUState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int mmu_idx, int is_softmmu){ PhysPageDesc *p; unsigned long pd; unsigned int index; target_ulong address; target_phys_addr_t addend; int ret; CPUTLBEntry *te; int i; p = phys_page_find(paddr >> TARGET_PAGE_BITS); if (!p) { pd = IO_MEM_UNASSIGNED; } else { pd = p->phys_offset; } #if defined(DEBUG_TLB) printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n", vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);#endif ret = 0;#if !defined(CONFIG_SOFTMMU) if (is_softmmu)#endif { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { /* IO memory case */ address = vaddr | pd; addend = paddr; } else { /* standard memory */ address = vaddr; addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); } /* Make accesses to pages with watchpoints go via the watchpoint trap routines. */ for (i = 0; i < env->nb_watchpoints; i++) { if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) { if (address & ~TARGET_PAGE_MASK) { env->watchpoint[i].addend = 0; address = vaddr | io_mem_watch; } else { env->watchpoint[i].addend = pd - paddr + (unsigned long) phys_ram_base; /* TODO: Figure out how to make read watchpoints coexist with code. */ pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD; } } } index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); addend -= vaddr; te = &env->tlb_table[mmu_idx][index]; te->addend = addend; if (prot & PAGE_READ) { te->addr_read = address; } else { te->addr_read = -1; } if (prot & PAGE_EXEC) { te->addr_code = address; } else { te->addr_code = -1; } if (prot & PAGE_WRITE) { if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || (pd & IO_MEM_ROMD)) { /* write access calls the I/O callback */ te->addr_write = vaddr | (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && !cpu_physical_memory_is_dirty(pd)) { te->addr_write = vaddr | IO_MEM_NOTDIRTY; } else { te->addr_write = address; } } else { te->addr_write = -1; } }#if !defined(CONFIG_SOFTMMU) else { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { /* IO access: no mapping is done as it will be handled by the soft MMU */ if (!(env->hflags & HF_SOFTMMU_MASK)) ret = 2; } else { void *map_addr; if (vaddr >= MMAP_AREA_END) { ret = 2; } else { if (prot & PROT_WRITE) { if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||#if defined(TARGET_HAS_SMC) || 1 first_tb ||#endif ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && !cpu_physical_memory_is_dirty(pd))) { /* ROM: we do as if code was inside */ /* if code is present, we only map as read only and save the original mapping */ VirtPageDesc *vp; vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); vp->phys_addr = pd; vp->prot = prot; vp->valid_tag = virt_valid_tag; prot &= ~PAGE_WRITE; } } map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); if (map_addr == MAP_FAILED) { cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", paddr, vaddr); } } } }#endif return ret;}/* called from signal handler: invalidate the code and unprotect the page. Return TRUE if the fault was succesfully handled. */int page_unprotect(target_ulong addr, unsigned long pc, void *puc){#if !defined(CONFIG_SOFTMMU) VirtPageDesc *vp;//#if defined(DEBUG_TLB) printf("page_unprotect: addr=0x%08x\n", addr);//#endif addr &= TARGET_PAGE_MASK; /* if it is not mapped, no need to worry here */ if (addr >= MMAP_AREA_END) return 0; vp = virt_page_find(addr >> TARGET_PAGE_BITS); if (!vp) return 0; /* NOTE: in this case, validate_tag is _not_ tested as it validates only the code TLB */ if (vp->valid_tag != virt_valid_tag) return 0; if (!(vp->prot & PAGE_WRITE)) return 0;#if defined(DEBUG_TLB) printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", addr, vp->phys_addr, vp->prot);#endif if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", (unsigned long)addr, vp->prot); /* set the dirty bit */ phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff; /* flush the code inside */ tb_invalidate_phys_page(vp->phys_addr, pc, puc); return 1;#else return 0;#endif}#elsevoid tlb_flush(CPUState *env, int flush_global){}void tlb_flush_page(CPUState *env, target_ulong addr){}int tlb_set_page_exec(CPUState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int mmu_idx, int is_softmmu){ return 0;}/* dump memory mappings */void page_dump(FILE *f){ unsigned long start, end; int i, j, prot, prot1; PageDesc *p; fprintf(f, "%-8s %-8s %-8s %s\n", "start", "end", "size", "prot"); start = -1; end = -1; prot = 0; for(i = 0; i <= L1_SIZE; i++) { if (i < L1_SIZE) p = l1_map[i]; else p = NULL; for(j = 0;j < L2_SIZE; j++) { if (!p) prot1 = 0; else prot1 = p[j].flags; if (prot1 != prot) { end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); if (start != -1) { fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", start, end, end - start, prot & PAGE_READ ? 'r' : '-', prot & PAGE_WRITE ? 'w' : '-', prot & PAGE_EXEC ? 'x' : '-'); } if (prot1 != 0) start = end; else start = -1; prot = prot1; } if (!p) break; } }}int page_get_flags(target_ulong address){ PageDesc *p; p = page_find(address >> TARGET_PAGE_BITS); if (!p) return 0; return p->flags;}/* modify the flags of a page and invalidate the code if necessary. The flag PAGE_WRITE_ORG is positionned automatically depending on PAGE_WRITE */void page_set_flags(target_ulong start, target_ulong end, int flags){ PageDesc *p; target_ulong addr; start = start & TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); if (flags & PAGE_WRITE) flags |= PAGE_WRITE_ORG; spin_lock(&tb_lock); for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { p = page_find_alloc(addr >> TARGET_PAGE_BITS); /* if the write protection is set, then we invalidate the code inside */ if (!(p->flags & PAGE_WRITE) && (flags & PAGE_WRITE) && p->first_tb) { tb_invalidate_phys_page(addr, 0, NULL); } p->flags = flags; } spin_unlock(&tb_lock);}int page_check_range(target_ulong start, target_ulong len, int flags){ PageDesc *p; target_ulong end; target_ulong addr; end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ start = start & TARGET_PAGE_MASK; if( end < start ) /* we've wrapped around */ return -1; for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { p = page_find(addr >> TARGET_PAGE_BITS); if( !p ) return -1; if( !(p->flags & PAGE_VALID) ) return -1; if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) return -1; if (flags & PAGE_WRITE) { if (!(p->flags & PAGE_WRITE_ORG)) return -1; /* unprotect the page if it was put read-only because it contains translated code */ if (!(p->flags & PAGE_WRITE)) { if (!page_unprotect(addr, 0, NULL)) return -1; } return 0; } } return 0;}/* called from signal handler: invalidate the code and unprotect the page. Return TRUE if the fault was succesfully handled. */int page_unprotect(target_ulong address, unsigned long pc, void *puc){ unsigned int page_index, prot, pindex; PageDesc *p, *p1; target_ulong host_start, host_end, addr; host_start = address & qemu_host_page_mask; page_index = host_start >> TARGET_PAGE_BITS; p1 = page_find(page_index); if (!p1) return 0; host_end = host_start + qemu_host_page_size; p = p1; prot = 0; for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { prot |= p->flags; p++; } /* if the page was really writable, then we change its protection back to writable */ if (prot & PAGE_WRITE_ORG) { pindex = (address - host_start) >> TARGET_PAGE_BITS; if (!(p1[pindex].flags & PAGE_WRITE)) { mprotect((void *)g2h(host_start), qemu_host_page_size, (prot & PAGE_BITS) | PAGE_WRITE); p1[pindex].flags |= PAGE_WRITE; /* and since the content will be modified, we must invalidate the corresponding translated code. */ tb_invalidate_phys_page(address, pc, puc);#ifdef DEBUG_TB_CHECK tb_invalidate_check(address);#endif return 1; } } return 0;}static inline void tlb_set_dirty(CPUState *env, unsigned long addr, target_ulong vaddr){}#endif /* defined(CONFIG_USER_ONLY) */static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, int memory);static void *subpage_init (target_phys_addr_t base, uint32_t *phys, int orig_memory);#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ need_subpage) \ do { \ if (addr > start_addr)/*目标机的起始地址有没有被扫描过*/ \ start_addr2 = 0; \ else { \ start_addr2 = start_addr & ~TARGET_PAGE_MASK; \ if (start_addr2 > 0) \ need_subpage = 1; \ /*low 12bit of start_addr >0,==>不是页的起始地址需要取整*/\ } \ \ if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \ end_addr2 = TARGET_PAGE_SIZE - 1; \ else { \ end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \ if (end_addr2 < TARGET_PAGE_SIZE - 1) \ need_subpage = 1; \ } \ } while (0)/* register physical memory. 'size' must be a multiple of the target page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an io memory page */void cpu_register_physical_memory(target_phys_addr_t start_addr, unsigned long size, unsigned long phys_offset){ target_phys_addr_t addr, end_addr; PhysPageDesc *p; CPUState *env; unsigned long orig_size = size;//orignal size void *subpage; size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;//true size: allocated space is the ([size/target_page_size]) host pages end_addr = start_addr + (target_phys_addr_t)size; for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { p = phys_page_find(addr >> TARGET_PAGE_BITS);//page table,vaddress--host if (p && p->phys_offset != IO_MEM_UNASSIGNED) {//current page is used by others. unsigned long orig_memory = p->phys_offset;//save the kind of orignal page space target_phys_addr_t start_addr2, end_addr2; int need_subpage = 0; //对空间起始地址判断看其是否在整数页面的开始地址上,即低12位是否==0
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -