📄 translate.c
字号:
case 12: /* ult (=-1 or =2) */ gen_op_jmp_s32(flag, l1); gen_op_sub32(flag, flag, gen_im32(2)); gen_op_jmp_z32(flag, l1); break; case 13: /* ule (!=1) */ gen_op_sub32(flag, flag, gen_im32(1)); gen_op_jmp_nz32(flag, l1); break; case 14: /* ne (!=0) */ gen_op_jmp_nz32(flag, l1); break; case 15: /* t */ gen_op_mov32(flag, gen_im32(1)); break; } gen_jmp_tb(s, 0, s->pc); gen_set_label(l1); gen_jmp_tb(s, 1, addr + offset);}static disas_proc opcode_table[65536];static voidregister_opcode (disas_proc proc, uint16_t opcode, uint16_t mask){ int i; int from; int to; /* Sanity check. All set bits must be included in the mask. */ if (opcode & ~mask) abort(); /* This could probably be cleverer. For now just optimize the case where the top bits are known. */ /* Find the first zero bit in the mask. */ i = 0x8000; while ((i & mask) != 0) i >>= 1; /* Iterate over all combinations of this and lower bits. */ if (i == 0) i = 1; else i <<= 1; from = opcode & ~(i - 1); to = from + i; for (i = from; i < to; i++) { if ((i & mask) == opcode) opcode_table[i] = proc; }}/* Register m68k opcode handlers. Order is important. Later insn override earlier ones. */static voidregister_m68k_insns (m68k_def_t *def){ uint32_t iflags; iflags = def->insns;#define INSN(name, opcode, mask, isa) \ if (iflags & M68K_INSN_##isa) \ register_opcode(disas_##name, 0x##opcode, 0x##mask) INSN(undef, 0000, 0000, CF_A); INSN(arith_im, 0080, fff8, CF_A); INSN(bitrev, 00c0, fff8, CF_C); INSN(bitop_reg, 0100, f1c0, CF_A); INSN(bitop_reg, 0140, f1c0, CF_A); INSN(bitop_reg, 0180, f1c0, CF_A); INSN(bitop_reg, 01c0, f1c0, CF_A); INSN(arith_im, 0280, fff8, CF_A); INSN(byterev, 02c0, fff8, CF_A); INSN(arith_im, 0480, fff8, CF_A); INSN(ff1, 04c0, fff8, CF_C); INSN(arith_im, 0680, fff8, CF_A); INSN(bitop_im, 0800, ffc0, CF_A); INSN(bitop_im, 0840, ffc0, CF_A); INSN(bitop_im, 0880, ffc0, CF_A); INSN(bitop_im, 08c0, ffc0, CF_A); INSN(arith_im, 0a80, fff8, CF_A); INSN(arith_im, 0c00, ff38, CF_A); INSN(move, 1000, f000, CF_A); INSN(move, 2000, f000, CF_A); INSN(move, 3000, f000, CF_A); INSN(strldsr, 40e7, ffff, CF_A); INSN(negx, 4080, fff8, CF_A); INSN(move_from_sr, 40c0, fff8, CF_A); INSN(lea, 41c0, f1c0, CF_A); INSN(clr, 4200, ff00, CF_A); INSN(undef, 42c0, ffc0, CF_A); INSN(move_from_ccr, 42c0, fff8, CF_A); INSN(neg, 4480, fff8, CF_A); INSN(move_to_ccr, 44c0, ffc0, CF_A); INSN(not, 4680, fff8, CF_A); INSN(move_to_sr, 46c0, ffc0, CF_A); INSN(pea, 4840, ffc0, CF_A); INSN(swap, 4840, fff8, CF_A); INSN(movem, 48c0, fbc0, CF_A); INSN(ext, 4880, fff8, CF_A); INSN(ext, 48c0, fff8, CF_A); INSN(ext, 49c0, fff8, CF_A); INSN(tst, 4a00, ff00, CF_A); INSN(tas, 4ac0, ffc0, CF_B); INSN(halt, 4ac8, ffff, CF_A); INSN(pulse, 4acc, ffff, CF_A); INSN(illegal, 4afc, ffff, CF_A); INSN(mull, 4c00, ffc0, CF_A); INSN(divl, 4c40, ffc0, CF_A); INSN(sats, 4c80, fff8, CF_B); INSN(trap, 4e40, fff0, CF_A); INSN(link, 4e50, fff8, CF_A); INSN(unlk, 4e58, fff8, CF_A); INSN(move_to_usp, 4e60, fff8, CF_B); INSN(move_from_usp, 4e68, fff8, CF_B); INSN(nop, 4e71, ffff, CF_A); INSN(stop, 4e72, ffff, CF_A); INSN(rte, 4e73, ffff, CF_A); INSN(rts, 4e75, ffff, CF_A); INSN(movec, 4e7b, ffff, CF_A); INSN(jump, 4e80, ffc0, CF_A); INSN(jump, 4ec0, ffc0, CF_A); INSN(addsubq, 5180, f1c0, CF_A); INSN(scc, 50c0, f0f8, CF_A); INSN(addsubq, 5080, f1c0, CF_A); INSN(tpf, 51f8, fff8, CF_A); INSN(branch, 6000, f000, CF_A); INSN(moveq, 7000, f100, CF_A); INSN(mvzs, 7100, f100, CF_B); INSN(or, 8000, f000, CF_A); INSN(divw, 80c0, f0c0, CF_A); INSN(addsub, 9000, f000, CF_A); INSN(subx, 9180, f1f8, CF_A); INSN(suba, 91c0, f1c0, CF_A); INSN(undef_mac, a000, f000, CF_A); INSN(mov3q, a140, f1c0, CF_B); INSN(cmp, b000, f1c0, CF_B); /* cmp.b */ INSN(cmp, b040, f1c0, CF_B); /* cmp.w */ INSN(cmpa, b0c0, f1c0, CF_B); /* cmpa.w */ INSN(cmp, b080, f1c0, CF_A); INSN(cmpa, b1c0, f1c0, CF_A); INSN(eor, b180, f1c0, CF_A); INSN(and, c000, f000, CF_A); INSN(mulw, c0c0, f0c0, CF_A); INSN(addsub, d000, f000, CF_A); INSN(addx, d180, f1f8, CF_A); INSN(adda, d1c0, f1c0, CF_A); INSN(shift_im, e080, f0f0, CF_A); INSN(shift_reg, e0a0, f0f0, CF_A); INSN(undef_fpu, f000, f000, CF_A); INSN(fpu, f200, ffc0, CF_FPU); INSN(fbcc, f280, ffc0, CF_FPU); INSN(intouch, f340, ffc0, CF_A); INSN(cpushl, f428, ff38, CF_A); INSN(wddata, fb00, ff00, CF_A); INSN(wdebug, fbc0, ffc0, CF_A);#undef INSN}/* ??? Some of this implementation is not exception safe. We should always write back the result to memory before setting the condition codes. */static void disas_m68k_insn(CPUState * env, DisasContext *s){ uint16_t insn; insn = lduw(s->pc); s->pc += 2; opcode_table[insn](s, insn);}#if 0/* Save the result of a floating point operation. */static void expand_op_fp_result(qOP *qop){ gen_op_movf64(QREG_FP_RESULT, qop->args[0]);}/* Dummy op to indicate that the flags have been set. */static void expand_op_flags_set(qOP *qop){}/* Convert the confition codes into CC_OP_FLAGS format. */static void expand_op_flush_flags(qOP *qop){ int cc_opreg; if (qop->args[0] == CC_OP_DYNAMIC) cc_opreg = QREG_CC_OP; else cc_opreg = gen_im32(qop->args[0]); gen_op_helper32(QREG_NULL, cc_opreg, HELPER_flush_flags);}/* Set CC_DEST after a logical or direct flag setting operation. */static void expand_op_logic_cc(qOP *qop){ gen_op_mov32(QREG_CC_DEST, qop->args[0]);}/* Set CC_SRC and CC_DEST after an arithmetic operation. */static void expand_op_update_cc_add(qOP *qop){ gen_op_mov32(QREG_CC_DEST, qop->args[0]); gen_op_mov32(QREG_CC_SRC, qop->args[1]);}/* Update the X flag. */static void expand_op_update_xflag(qOP *qop){ int arg0; int arg1; arg0 = qop->args[0]; arg1 = qop->args[1]; if (arg1 == QREG_NULL) { /* CC_X = arg0. */ gen_op_mov32(QREG_CC_X, arg0); } else { /* CC_X = arg0 < (unsigned)arg1. */ gen_op_set_ltu32(QREG_CC_X, arg0, arg1); }}/* Set arg0 to the contents of the X flag. */static void expand_op_get_xflag(qOP *qop){ gen_op_mov32(qop->args[0], QREG_CC_X);}/* Expand a shift by immediate. The ISA only allows shifts by 1-8, so we already know the shift is within range. */static inline void expand_shift_im(qOP *qop, int right, int arith){ int val; int reg; int tmp; int im; reg = qop->args[0]; im = qop->args[1]; tmp = gen_im32(im); val = gen_new_qreg(QMODE_I32); gen_op_mov32(val, reg); gen_op_mov32(QREG_CC_DEST, val); gen_op_mov32(QREG_CC_SRC, tmp); if (right) { if (arith) { gen_op_sar32(reg, val, tmp); } else { gen_op_shr32(reg, val, tmp); } if (im == 1) tmp = QREG_NULL; else tmp = gen_im32(im - 1); } else { gen_op_shl32(reg, val, tmp); tmp = gen_im32(32 - im); } if (tmp != QREG_NULL) gen_op_shr32(val, val, tmp); gen_op_and32(QREG_CC_X, val, gen_im32(1));}static void expand_op_shl_im_cc(qOP *qop){ expand_shift_im(qop, 0, 0);}static void expand_op_shr_im_cc(qOP *qop){ expand_shift_im(qop, 1, 0);}static void expand_op_sar_im_cc(qOP *qop){ expand_shift_im(qop, 1, 1);}/* Expand a shift by register. *//* ??? This gives incorrect answers for shifts by 0 or >= 32 */static inline void expand_shift_reg(qOP *qop, int right, int arith){ int val; int reg; int shift; int tmp; reg = qop->args[0]; shift = qop->args[1]; val = gen_new_qreg(QMODE_I32); gen_op_mov32(val, reg); gen_op_mov32(QREG_CC_DEST, val); gen_op_mov32(QREG_CC_SRC, shift); tmp = gen_new_qreg(QMODE_I32); if (right) { if (arith) { gen_op_sar32(reg, val, shift); } else { gen_op_shr32(reg, val, shift); } gen_op_sub32(tmp, shift, gen_im32(1)); } else { gen_op_shl32(reg, val, shift); gen_op_sub32(tmp, gen_im32(31), shift); } gen_op_shl32(val, val, tmp); gen_op_and32(QREG_CC_X, val, gen_im32(1));}static void expand_op_shl_cc(qOP *qop){ expand_shift_reg(qop, 0, 0);}static void expand_op_shr_cc(qOP *qop){ expand_shift_reg(qop, 1, 0);}static void expand_op_sar_cc(qOP *qop){ expand_shift_reg(qop, 1, 1);}/* Set the Z flag to (arg0 & arg1) == 0. */static void expand_op_btest(qOP *qop){ int tmp; int l1; l1 = gen_new_label(); tmp = gen_new_qreg(QMODE_I32); gen_op_and32(tmp, qop->args[0], qop->args[1]); gen_op_and32(QREG_CC_DEST, QREG_CC_DEST, gen_im32(~(uint32_t)CCF_Z)); gen_op_jmp_nz32(tmp, l1); gen_op_or32(QREG_CC_DEST, QREG_CC_DEST, gen_im32(CCF_Z)); gen_op_label(l1);}/* arg0 += arg1 + CC_X */static void expand_op_addx_cc(qOP *qop){ int arg0 = qop->args[0]; int arg1 = qop->args[1]; int l1, l2; gen_op_add32 (arg0, arg0, arg1); l1 = gen_new_label(); l2 = gen_new_label(); gen_op_jmp_z32(QREG_CC_X, l1); gen_op_add32(arg0, arg0, gen_im32(1)); gen_op_mov32(QREG_CC_OP, gen_im32(CC_OP_ADDX)); gen_op_set_leu32(QREG_CC_X, arg0, arg1); gen_op_jmp(l2); gen_set_label(l1); gen_op_mov32(QREG_CC_OP, gen_im32(CC_OP_ADD)); gen_op_set_ltu32(QREG_CC_X, arg0, arg1); gen_set_label(l2);}/* arg0 -= arg1 + CC_X */static void expand_op_subx_cc(qOP *qop){ int arg0 = qop->args[0]; int arg1 = qop->args[1]; int l1, l2; l1 = gen_new_label(); l2 = gen_new_label(); gen_op_jmp_z32(QREG_CC_X, l1); gen_op_set_leu32(QREG_CC_X, arg0, arg1); gen_op_sub32(arg0, arg0, gen_im32(1)); gen_op_mov32(QREG_CC_OP, gen_im32(CC_OP_SUBX)); gen_op_jmp(l2); gen_set_label(l1); gen_op_set_ltu32(QREG_CC_X, arg0, arg1); gen_op_mov32(QREG_CC_OP, gen_im32(CC_OP_SUB)); gen_set_label(l2); gen_op_sub32 (arg0, arg0, arg1);}/* Expand target specific ops to generic qops. */static void expand_target_qops(void){ qOP *qop; qOP *next; int c; /* Copy the list of qops, expanding target specific ops as we go. */ qop = gen_first_qop; gen_first_qop = NULL; gen_last_qop = NULL; for (; qop; qop = next) { c = qop->opcode; next = qop->next; if (c < FIRST_TARGET_OP) { qop->prev = gen_last_qop; qop->next = NULL; if (gen_last_qop) gen_last_qop->next = qop; else gen_first_qop = qop; gen_last_qop = qop; continue; } switch (c) {#define DEF(name, nargs, barrier) \ case INDEX_op_##name: \ expand_op_##name(qop); \ break;#include "qop-target.def"#undef DEF default: cpu_abort(NULL, "Unexpanded target qop"); } }}/* ??? Implement this. */static voidoptimize_flags(void){}#endif/* generate intermediate code for basic block 'tb'. */int gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, int search_pc){ DisasContext dc1, *dc = &dc1; uint16_t *gen_opc_end; int j, lj; target_ulong pc_start; int pc_offset; int last_cc_op; /* generate intermediate code */ pc_start = tb->pc; dc->tb = tb; gen_opc_ptr = gen_opc_buf; gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; gen_opparam_ptr = gen_opparam_buf; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->cc_op = CC_OP_DYNAMIC; dc->singlestep_enabled = env->singlestep_enabled; dc->fpcr = env->fpcr; nb_gen_labels = 0; lj = -1; do { free_qreg = 0; pc_offset = dc->pc - pc_start; gen_throws_exception = NULL; if (env->nb_breakpoints > 0) { for(j = 0; j < env->nb_breakpoints; j++) { if (env->breakpoints[j] == dc->pc) { gen_exception(dc, dc->pc, EXCP_DEBUG); dc->is_jmp = DISAS_JUMP; break; } } if (dc->is_jmp) break; } if (search_pc) { j = gen_opc_ptr - gen_opc_buf; if (lj < j) { lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; } gen_opc_pc[lj] = dc->pc; gen_opc_instr_start[lj] = 1; } last_cc_op = dc->cc_op; disas_m68k_insn(env, dc); } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && !env->singlestep_enabled && (pc_offset) < (TARGET_PAGE_SIZE - 32)); if (__builtin_expect(env->singlestep_enabled, 0)) { /* Make sure the pc is updated, and raise a debug exception. */ if (!dc->is_jmp) { gen_flush_cc_op(dc); gen_op_mov32(QREG_PC, gen_im32((long)dc->pc)); } gen_op_raise_exception(EXCP_DEBUG); } else { switch(dc->is_jmp) { case DISAS_NEXT: gen_flush_cc_op(dc); gen_jmp_tb(dc, 0, dc->pc); break; default: case DISAS_JUMP: case DISAS_UPDATE: gen_flush_cc_op(dc); /* indicate that the hash table must be used to find the next TB */ gen_op_mov32(QREG_T0, gen_im32(0)); gen_op_exit_tb(); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; } } *gen_opc_ptr = INDEX_op_end;#ifdef DEBUG_DISAS if (loglevel & CPU_LOG_TB_IN_ASM) { fprintf(logfile, "----------------\n"); fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start)); target_disas(logfile, pc_start, dc->pc - pc_start, 0); fprintf(logfile, "\n"); if (loglevel & (CPU_LOG_TB_OP)) { fprintf(logfile, "OP:\n"); dump_ops(gen_opc_buf, gen_opparam_buf); fprintf(logfile, "\n"); } }#endif if (search_pc) { j =
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -