⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 exec.c

📁 qemu虚拟机代码
💻 C
📖 第 1 页 / 共 5 页
字号:
#endif            return 1;        }    }    return 0;}/* call this function when system calls directly modify a memory area *//* ??? This should be redundant now we have lock_user.  */void page_unprotect_range(target_ulong data, target_ulong data_size){    target_ulong start, end, addr;    start = data;    end = start + data_size;    start &= TARGET_PAGE_MASK;    end = TARGET_PAGE_ALIGN(end);    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {        page_unprotect(addr, 0, NULL);    }}static inline void tlb_set_dirty(CPUState *env,                                 unsigned long addr, target_ulong vaddr){}#endif /* defined(CONFIG_USER_ONLY) *//* register physical memory. 'size' must be a multiple of the target   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an   io memory page */void cpu_register_physical_memory(target_phys_addr_t start_addr,                                   unsigned long size,                                  unsigned long phys_offset){    target_phys_addr_t addr, end_addr;    PhysPageDesc *p;    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;    end_addr = start_addr + size;    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);        p->phys_offset = phys_offset;        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)            phys_offset += TARGET_PAGE_SIZE;    }}static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr){    return 0;}static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val){}static CPUReadMemoryFunc *unassigned_mem_read[3] = {    unassigned_mem_readb,    unassigned_mem_readb,    unassigned_mem_readb,};static CPUWriteMemoryFunc *unassigned_mem_write[3] = {    unassigned_mem_writeb,    unassigned_mem_writeb,    unassigned_mem_writeb,};static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val){    unsigned long ram_addr;    int dirty_flags;    ram_addr = addr - (unsigned long)phys_ram_base;    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];    if (!(dirty_flags & CODE_DIRTY_FLAG)) {#if !defined(CONFIG_USER_ONLY)        tb_invalidate_phys_page_fast(ram_addr, 1);        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];#endif    }    stb_p((uint8_t *)(long)addr, val);#ifdef USE_KQEMU    if (cpu_single_env->kqemu_enabled &&        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)        kqemu_modify_page(cpu_single_env, ram_addr);#endif    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;    /* we remove the notdirty callback only if the code has been       flushed */    if (dirty_flags == 0xff)        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);}static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val){    unsigned long ram_addr;    int dirty_flags;    ram_addr = addr - (unsigned long)phys_ram_base;    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];    if (!(dirty_flags & CODE_DIRTY_FLAG)) {#if !defined(CONFIG_USER_ONLY)        tb_invalidate_phys_page_fast(ram_addr, 2);        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];#endif    }    stw_p((uint8_t *)(long)addr, val);#ifdef USE_KQEMU    if (cpu_single_env->kqemu_enabled &&        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)        kqemu_modify_page(cpu_single_env, ram_addr);#endif    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;    /* we remove the notdirty callback only if the code has been       flushed */    if (dirty_flags == 0xff)        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);}static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val){    unsigned long ram_addr;    int dirty_flags;    ram_addr = addr - (unsigned long)phys_ram_base;    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];    if (!(dirty_flags & CODE_DIRTY_FLAG)) {#if !defined(CONFIG_USER_ONLY)        tb_invalidate_phys_page_fast(ram_addr, 4);        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];#endif    }    stl_p((uint8_t *)(long)addr, val);#ifdef USE_KQEMU    if (cpu_single_env->kqemu_enabled &&        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)        kqemu_modify_page(cpu_single_env, ram_addr);#endif    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;    /* we remove the notdirty callback only if the code has been       flushed */    if (dirty_flags == 0xff)        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);}static CPUReadMemoryFunc *error_mem_read[3] = {    NULL, /* never used */    NULL, /* never used */    NULL, /* never used */};static CPUWriteMemoryFunc *notdirty_mem_write[3] = {    notdirty_mem_writeb,    notdirty_mem_writew,    notdirty_mem_writel,};static void io_mem_init(void){    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);    io_mem_nb = 5;    /* alloc dirty bits array */    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);}/* mem_read and mem_write are arrays of functions containing the   function to access byte (index 0), word (index 1) and dword (index   2). All functions must be supplied. If io_index is non zero, the   corresponding io zone is modified. If it is zero, a new io zone is   allocated. The return value can be used with   cpu_register_physical_memory(). (-1) is returned if error. */int cpu_register_io_memory(int io_index,                           CPUReadMemoryFunc **mem_read,                           CPUWriteMemoryFunc **mem_write,                           void *opaque){    int i;    if (io_index <= 0) {        if (io_mem_nb >= IO_MEM_NB_ENTRIES)            return -1;        io_index = io_mem_nb++;    } else {        if (io_index >= IO_MEM_NB_ENTRIES)            return -1;    }    for(i = 0;i < 3; i++) {        io_mem_read[io_index][i] = mem_read[i];        io_mem_write[io_index][i] = mem_write[i];    }    io_mem_opaque[io_index] = opaque;    return io_index << IO_MEM_SHIFT;}CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index){    return io_mem_write[io_index >> IO_MEM_SHIFT];}CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index){    return io_mem_read[io_index >> IO_MEM_SHIFT];}/* physical memory access (slow version, mainly for debug) */#if defined(CONFIG_USER_ONLY)void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,                             int len, int is_write){    int l, flags;    target_ulong page;    void * p;    while (len > 0) {        page = addr & TARGET_PAGE_MASK;        l = (page + TARGET_PAGE_SIZE) - addr;        if (l > len)            l = len;        flags = page_get_flags(page);        if (!(flags & PAGE_VALID))            return;        if (is_write) {            if (!(flags & PAGE_WRITE))                return;            p = lock_user(addr, len, 0);            memcpy(p, buf, len);            unlock_user(p, addr, len);        } else {            if (!(flags & PAGE_READ))                return;            p = lock_user(addr, len, 1);            memcpy(buf, p, len);            unlock_user(p, addr, 0);        }        len -= l;        buf += l;        addr += l;    }}#elsevoid cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,                             int len, int is_write){    int l, io_index;    uint8_t *ptr;    uint32_t val;    target_phys_addr_t page;    unsigned long pd;    PhysPageDesc *p;        while (len > 0) {        page = addr & TARGET_PAGE_MASK;        l = (page + TARGET_PAGE_SIZE) - addr;        if (l > len)            l = len;        p = phys_page_find(page >> TARGET_PAGE_BITS);        if (!p) {            pd = IO_MEM_UNASSIGNED;        } else {            pd = p->phys_offset;        }                if (is_write) {            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);                /* XXX: could force cpu_single_env to NULL to avoid                   potential bugs */                if (l >= 4 && ((addr & 3) == 0)) {                    /* 32 bit write access */                    val = ldl_p(buf);                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);                    l = 4;                } else if (l >= 2 && ((addr & 1) == 0)) {                    /* 16 bit write access */                    val = lduw_p(buf);                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);                    l = 2;                } else {                    /* 8 bit write access */                    val = ldub_p(buf);                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);                    l = 1;                }            } else {                unsigned long addr1;                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);                /* RAM case */                ptr = phys_ram_base + addr1;                memcpy(ptr, buf, l);                if (!cpu_physical_memory_is_dirty(addr1)) {                    /* invalidate code */                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);                    /* set dirty bit */                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=                         (0xff & ~CODE_DIRTY_FLAG);                }            }        } else {            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {                /* I/O case */                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);                if (l >= 4 && ((addr & 3) == 0)) {                    /* 32 bit read access */                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);                    stl_p(buf, val);                    l = 4;                } else if (l >= 2 && ((addr & 1) == 0)) {                    /* 16 bit read access */                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);                    stw_p(buf, val);                    l = 2;                } else {                    /* 8 bit read access */                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);                    stb_p(buf, val);                    l = 1;                }            } else {                /* RAM case */                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +                     (addr & ~TARGET_PAGE_MASK);                memcpy(buf, ptr, l);            }        }        len -= l;        buf += l;        addr += l;    }}/* used for ROM loading : can write in RAM and ROM */void cpu_physical_memory_write_rom(target_phys_addr_t addr,                                    const uint8_t *buf, int len){    int l;    uint8_t *ptr;    target_phys_addr_t page;    unsigned long pd;    PhysPageDesc *p;        while (len > 0) {        page = addr & TARGET_PAGE_MASK;        l = (page + TARGET_PAGE_SIZE) - addr;        if (l > len)            l = len;        p = phys_page_find(page >> TARGET_PAGE_BITS);        if (!p) {            pd = IO_MEM_UNASSIGNED;        } else {            pd = p->phys_offset;        }                if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {            /* do nothing */        } else {            unsigned long addr1;            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);            /* ROM/RAM case */            ptr = phys_ram_base + addr1;            memcpy(ptr, buf, l);        }        len -= l;        buf += l;        addr += l;    }}/* warning: addr must be aligned */uint32_t ldl_phys(target_phys_addr_t addr){    int io_index;    uint8_t *ptr;    uint32_t val;    unsigned long pd;    PhysPageDesc *p;    p = phys_page_find(addr >> TARGET_PAGE_BITS);    if (!p) {        pd = IO_MEM_UNASSIGNED;    } else {        pd = p->phys_offset;    }            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {        /* I/O case */        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);    } else {        /* RAM case */        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +             (addr & ~TARGET_PAGE_MASK);        val = ldl_p(ptr);    }    return val;}/* warning: addr must be aligned */uint64_t ldq_phys(target_phys_addr_t addr){    int io_index;    uint8_t *ptr;    uint64_t val;    unsigned long pd;    PhysPageDesc *p;    p = phys_page_find(addr >> TARGET_PAGE_BITS);    if (!p) {        pd = IO_MEM_UNASSIGNED;    } else {        pd = p->phys_offset;    }            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {        /* I/O case */        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);#ifdef TARGET_WORDS_BIGENDIAN        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);#else        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;#endif    } else {        /* RAM case */        

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -