⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 op_helper.c

📁 QEMU 0.91 source code, supports ARM processor including S3C24xx series
💻 C
📖 第 1 页 / 共 4 页
字号:
    cpu_abort(env, "mtc0 compare\n");}void cpu_mips_start_count(CPUState *env){    cpu_abort(env, "start count\n");}void cpu_mips_stop_count(CPUState *env){    cpu_abort(env, "stop count\n");}void cpu_mips_update_irq(CPUState *env){    cpu_abort(env, "mtc0 status / mtc0 cause\n");}void do_mtc0_status_debug(uint32_t old, uint32_t val){    cpu_abort(env, "mtc0 status debug\n");}void do_mtc0_status_irqraise_debug (void){    cpu_abort(env, "mtc0 status irqraise debug\n");}void cpu_mips_tlb_flush (CPUState *env, int flush_global){    cpu_abort(env, "mips_tlb_flush\n");}#else/* CP0 helpers */void do_mfc0_random (void){    T0 = (int32_t)cpu_mips_get_random(env);}void do_mfc0_count (void){    T0 = (int32_t)cpu_mips_get_count(env);}void do_mtc0_status_debug(uint32_t old, uint32_t val){    fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",            old, old & env->CP0_Cause & CP0Ca_IP_mask,            val, val & env->CP0_Cause & CP0Ca_IP_mask,            env->CP0_Cause);    switch (env->hflags & MIPS_HFLAG_KSU) {    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;    case MIPS_HFLAG_KM: fputs("\n", logfile); break;    default: cpu_abort(env, "Invalid MMU mode!\n"); break;    }}void do_mtc0_status_irqraise_debug(void){    fprintf(logfile, "Raise pending IRQs\n");}void fpu_handle_exception(void){#ifdef CONFIG_SOFTFLOAT    int flags = get_float_exception_flags(&env->fpu->fp_status);    unsigned int cpuflags = 0, enable, cause = 0;    enable = GET_FP_ENABLE(env->fpu->fcr31);    /* determine current flags */    if (flags & float_flag_invalid) {        cpuflags |= FP_INVALID;        cause |= FP_INVALID & enable;    }    if (flags & float_flag_divbyzero) {        cpuflags |= FP_DIV0;        cause |= FP_DIV0 & enable;    }    if (flags & float_flag_overflow) {        cpuflags |= FP_OVERFLOW;        cause |= FP_OVERFLOW & enable;    }    if (flags & float_flag_underflow) {        cpuflags |= FP_UNDERFLOW;        cause |= FP_UNDERFLOW & enable;    }    if (flags & float_flag_inexact) {        cpuflags |= FP_INEXACT;        cause |= FP_INEXACT & enable;    }    SET_FP_FLAGS(env->fpu->fcr31, cpuflags);    SET_FP_CAUSE(env->fpu->fcr31, cause);#else    SET_FP_FLAGS(env->fpu->fcr31, 0);    SET_FP_CAUSE(env->fpu->fcr31, 0);#endif}/* TLB management */void cpu_mips_tlb_flush (CPUState *env, int flush_global){    /* Flush qemu's TLB and discard all shadowed entries.  */    tlb_flush (env, flush_global);    env->tlb->tlb_in_use = env->tlb->nb_tlb;}static void r4k_mips_tlb_flush_extra (CPUState *env, int first){    /* Discard entries from env->tlb[first] onwards.  */    while (env->tlb->tlb_in_use > first) {        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);    }}static void r4k_fill_tlb (int idx){    r4k_tlb_t *tlb;    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */    tlb = &env->tlb->mmu.r4k.tlb[idx];    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);#if defined(TARGET_MIPS64)    tlb->VPN &= env->SEGMask;#endif    tlb->ASID = env->CP0_EntryHi & 0xFF;    tlb->PageMask = env->CP0_PageMask;    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;    tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;    tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;}void r4k_do_tlbwi (void){    /* Discard cached TLB entries.  We could avoid doing this if the       tlbwi is just upgrading access permissions on the current entry;       that might be a further win.  */    r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);    r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);    r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);}void r4k_do_tlbwr (void){    int r = cpu_mips_get_random(env);    r4k_invalidate_tlb(env, r, 1);    r4k_fill_tlb(r);}void r4k_do_tlbp (void){    r4k_tlb_t *tlb;    target_ulong mask;    target_ulong tag;    target_ulong VPN;    uint8_t ASID;    int i;    ASID = env->CP0_EntryHi & 0xFF;    for (i = 0; i < env->tlb->nb_tlb; i++) {        tlb = &env->tlb->mmu.r4k.tlb[i];        /* 1k pages are not supported. */        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);        tag = env->CP0_EntryHi & ~mask;        VPN = tlb->VPN & ~mask;        /* Check ASID, virtual page number & size */        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {            /* TLB match */            env->CP0_Index = i;            break;        }    }    if (i == env->tlb->nb_tlb) {        /* No match.  Discard any shadow entries, if any of them match.  */        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {	    tlb = &env->tlb->mmu.r4k.tlb[i];	    /* 1k pages are not supported. */	    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);	    tag = env->CP0_EntryHi & ~mask;	    VPN = tlb->VPN & ~mask;	    /* Check ASID, virtual page number & size */	    if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {                r4k_mips_tlb_flush_extra (env, i);	        break;	    }	}        env->CP0_Index |= 0x80000000;    }}void r4k_do_tlbr (void){    r4k_tlb_t *tlb;    uint8_t ASID;    ASID = env->CP0_EntryHi & 0xFF;    tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];    /* If this will change the current ASID, flush qemu's TLB.  */    if (ASID != tlb->ASID)        cpu_mips_tlb_flush (env, 1);    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);    env->CP0_EntryHi = tlb->VPN | tlb->ASID;    env->CP0_PageMask = tlb->PageMask;    env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |                        (tlb->C0 << 3) | (tlb->PFN[0] >> 6);    env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |                        (tlb->C1 << 3) | (tlb->PFN[1] >> 6);}#endif /* !CONFIG_USER_ONLY */void dump_ldst (const unsigned char *func){    if (loglevel)        fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);}void dump_sc (void){    if (loglevel) {        fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,                T1, T0, env->CP0_LLAddr);    }}void debug_pre_eret (void){    fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,            env->PC[env->current_tc], env->CP0_EPC);    if (env->CP0_Status & (1 << CP0St_ERL))        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);    if (env->hflags & MIPS_HFLAG_DM)        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);    fputs("\n", logfile);}void debug_post_eret (void){    fprintf(logfile, "  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,            env->PC[env->current_tc], env->CP0_EPC);    if (env->CP0_Status & (1 << CP0St_ERL))        fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);    if (env->hflags & MIPS_HFLAG_DM)        fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);    switch (env->hflags & MIPS_HFLAG_KSU) {    case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;    case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;    case MIPS_HFLAG_KM: fputs("\n", logfile); break;    default: cpu_abort(env, "Invalid MMU mode!\n"); break;    }}void do_pmon (int function){    function /= 2;    switch (function) {    case 2: /* TODO: char inbyte(int waitflag); */        if (env->gpr[4][env->current_tc] == 0)            env->gpr[2][env->current_tc] = -1;        /* Fall through */    case 11: /* TODO: char inbyte (void); */        env->gpr[2][env->current_tc] = -1;        break;    case 3:    case 12:        printf("%c", (char)(env->gpr[4][env->current_tc] & 0xFF));        break;    case 17:        break;    case 158:        {            unsigned char *fmt = (void *)(unsigned long)env->gpr[4][env->current_tc];            printf("%s", fmt);        }        break;    }}#if !defined(CONFIG_USER_ONLY)static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);#define MMUSUFFIX _mmu#define ALIGNED_ONLY#define SHIFT 0#include "softmmu_template.h"#define SHIFT 1#include "softmmu_template.h"#define SHIFT 2#include "softmmu_template.h"#define SHIFT 3#include "softmmu_template.h"static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr){    env->CP0_BadVAddr = addr;    do_restore_state (retaddr);    do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);}void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr){    TranslationBlock *tb;    CPUState *saved_env;    unsigned long pc;    int ret;    /* XXX: hack to restore env in all cases, even if not called from       generated code */    saved_env = env;    env = cpu_single_env;    ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);    if (ret) {        if (retaddr) {            /* now we have a real cpu fault */            pc = (unsigned long)retaddr;            tb = tb_find_pc(pc);            if (tb) {                /* the PC is inside the translated code. It means that we have                   a virtual CPU fault */                cpu_restore_state(tb, env, pc, NULL);            }        }        do_raise_exception_err(env->exception_index, env->error_code);    }    env = saved_env;}void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,                          int unused){    if (is_exec)        do_raise_exception(EXCP_IBE);    else        do_raise_exception(EXCP_DBE);}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -