📄 amd64_trans.c
字号:
/* * Cisco 7200 (Predator) simulation platform. * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr) */#include <stdio.h>#include <stdlib.h>#include <unistd.h>#include <string.h>#include <sys/types.h>#include <sys/stat.h>#include <sys/mman.h>#include <fcntl.h>#include "amd64_trans.h"#include "cp0.h"#include "memory.h"/* Set an IRQ */void mips64_set_irq(cpu_mips_t *cpu,m_uint8_t irq){ m_uint32_t m; m = (1 << (irq + MIPS_CP0_CAUSE_ISHIFT)) & MIPS_CP0_CAUSE_IMASK; atomic_or(&cpu->irq_cause,m);}/* Clear an IRQ */void mips64_clear_irq(cpu_mips_t *cpu,m_uint8_t irq){ m_uint32_t m; m = (1 << (irq + MIPS_CP0_CAUSE_ISHIFT)) & MIPS_CP0_CAUSE_IMASK; atomic_and(&cpu->irq_cause,~m); if (!cpu->irq_cause) cpu->irq_pending = 0;}/* Load a 64 bit immediate value */static inline void mips64_load_imm(insn_block_t *b,u_int reg, m_uint64_t value){ if (value > 0xffffffffULL) amd64_mov_reg_imm_size(b->jit_ptr,reg,value,8); else amd64_mov_reg_imm(b->jit_ptr,reg,value);}/* Set the Pointer Counter (PC) register */void mips64_set_pc(insn_block_t *b,m_uint64_t new_pc){ mips64_load_imm(b,AMD64_RAX,new_pc); amd64_mov_membase_reg(b->jit_ptr, AMD64_R15,OFFSET(cpu_mips_t,pc), AMD64_RAX,8);}/* Set the Return Address (RA) register */void mips64_set_ra(insn_block_t *b,m_uint64_t ret_pc){ mips64_load_imm(b,AMD64_RAX,ret_pc); amd64_mov_membase_reg(b->jit_ptr,AMD64_R15, REG_OFFSET(MIPS_GPR_RA), AMD64_RAX,8);}/* Set Jump */static void mips64_set_jump(cpu_mips_t *cpu,insn_block_t *b,m_uint64_t new_pc, int local_jump){ int return_to_caller = FALSE; u_char *jump_ptr; if (cpu->sym_trace && !local_jump) return_to_caller = TRUE; if (!return_to_caller && insn_block_local_addr(b,new_pc,&jump_ptr)) { if (jump_ptr) { amd64_jump_code(b->jit_ptr,jump_ptr); } else { insn_block_record_patch(b,b->jit_ptr,new_pc); amd64_jump32(b->jit_ptr,0); } } else { /* save PC */ mips64_set_pc(b,new_pc); /* address is in another block, for now, returns to caller */ insn_block_push_epilog(b); }}/* Basic C call */static forced_inline void mips64_emit_basic_c_call(insn_block_t *b,void *f){ amd64_mov_reg_imm(b->jit_ptr,AMD64_RCX,f); amd64_call_reg(b->jit_ptr,AMD64_RCX);}/* Emit a simple call to a C function without any parameter */static void mips64_emit_c_call(insn_block_t *b,void *f){ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); amd64_mov_reg_imm(b->jit_ptr,AMD64_RCX,f); amd64_call_reg(b->jit_ptr,AMD64_RCX);}/* Fast memory operation prototype */typedef void (*memop_fast_access)(insn_block_t *b,int target);/* Fast LW */static void mips64_memop_fast_lw(insn_block_t *b,int target){ amd64_mov_reg_memindex(b->jit_ptr,AMD64_RAX,AMD64_RBX,0,AMD64_RSI,0,4); amd64_bswap32(b->jit_ptr,X86_EAX); amd64_movsxd_reg_reg(b->jit_ptr,AMD64_RDX,X86_EAX); /* Save value in register */ amd64_mov_membase_reg(b->jit_ptr,AMD64_R15,REG_OFFSET(target),AMD64_RDX,8);}/* Fast SW */static void mips64_memop_fast_sw(insn_block_t *b,int target){ /* Load value from register */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RAX,AMD64_R15,REG_OFFSET(target),4); amd64_bswap32(b->jit_ptr,X86_EAX); amd64_mov_memindex_reg(b->jit_ptr,AMD64_RBX,0,AMD64_RSI,0,AMD64_RAX,4);}/* Fast memory operation (64-bit) */static void mips64_emit_memop_fast64(insn_block_t *b,int op, int base,int offset, int target,int keep_ll_bit, memop_fast_access op_handler){ m_uint64_t val = sign_extend(offset,16); u_char *test1,*test2,*test3; u_char *p_exception,*p_exit; /* RDI = CPU instance */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); /* RSI = GPR[base] + sign-extended offset */ mips64_load_imm(b,AMD64_RSI,val); amd64_alu_reg_membase(b->jit_ptr,X86_ADD, AMD64_RSI,AMD64_RDI,REG_OFFSET(base)); /* RBX = mts64_entry index */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RBX,AMD64_RSI,8); amd64_shift_reg_imm(b->jit_ptr,X86_SHR,AMD64_RBX,MTS64_HASH_SHIFT); amd64_alu_reg_imm(b->jit_ptr,X86_AND,AMD64_RBX,MTS64_HASH_MASK); /* RCX = mts_cache */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RCX, AMD64_R15,OFFSET(cpu_mips_t,mts_cache),8); /* RAX = mts64_entry */ amd64_mov_reg_memindex(b->jit_ptr,AMD64_RAX,AMD64_RCX,0,AMD64_RBX,3,8); /* Do we have a non-null entry ? */ amd64_test_reg_reg_size(b->jit_ptr,AMD64_RAX,AMD64_RAX,8); test1 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_Z, 0, 1); /* RCX = start */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RCX, AMD64_RAX,OFFSET(mts64_entry_t,start),8); /* RDX = mask (sign-extended), RBX = action */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RDX, AMD64_RAX,OFFSET(mts64_entry_t,mask),4); amd64_mov_reg_membase(b->jit_ptr,AMD64_RBX, AMD64_RAX,OFFSET(mts64_entry_t,action),8); amd64_movsxd_reg_reg(b->jit_ptr,AMD64_RDX,X86_EDX); amd64_alu_reg_reg(b->jit_ptr,X86_AND,AMD64_RDX,AMD64_RSI); /* Virtual Address in the good range ? */ amd64_alu_reg_reg(b->jit_ptr,X86_CMP,AMD64_RDX,AMD64_RCX); test2 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_NE, 0, 1); /* Device access ? */ amd64_mov_reg_reg(b->jit_ptr,AMD64_R8,AMD64_RBX,8); amd64_alu_reg_imm(b->jit_ptr,X86_AND,AMD64_R8,MTS_DEV_MASK); test3 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_NZ, 0, 1); /* === Fast access === */ amd64_alu_reg_reg(b->jit_ptr,X86_SUB,AMD64_RSI,AMD64_RCX); /* Memory access */ op_handler(b,target); p_exit = b->jit_ptr; amd64_jump8(b->jit_ptr,0); /* === Slow lookup === */ amd64_patch(test1,b->jit_ptr); amd64_patch(test2,b->jit_ptr); amd64_patch(test3,b->jit_ptr); /* Save PC for exception handling */ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); /* RDX = target register */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RDX,target); /* Call memory access function */ amd64_call_membase(b->jit_ptr,AMD64_RDI,MEMOP_OFFSET(op)); /* Exception ? */ amd64_test_reg_reg_size(b->jit_ptr,AMD64_RAX,AMD64_RAX,4); p_exception = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_Z, 0, 1); insn_block_push_epilog(b); amd64_patch(p_exit,b->jit_ptr); amd64_patch(p_exception,b->jit_ptr);}/* Fast memory operation (32-bit) */static void mips64_emit_memop_fast32(insn_block_t *b,int op, int base,int offset, int target,int keep_ll_bit, memop_fast_access op_handler){ m_uint32_t val = sign_extend(offset,16); u_char *test1,*test2,*test3; u_char *p_exception,*p_exit; /* RDI = CPU instance */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); /* ESI = GPR[base] + sign-extended offset */ amd64_mov_reg_imm(b->jit_ptr,X86_ESI,val); amd64_alu_reg_membase_size(b->jit_ptr,X86_ADD, X86_ESI,AMD64_RDI,REG_OFFSET(base),4); /* EBX = mts32_entry index */ amd64_mov_reg_reg_size(b->jit_ptr,X86_EBX,X86_ESI,4); amd64_shift_reg_imm_size(b->jit_ptr,X86_SHR,X86_EBX,MTS64_HASH_SHIFT,4); amd64_alu_reg_imm_size(b->jit_ptr,X86_AND,X86_EBX,MTS64_HASH_MASK,4); /* RCX = mts_cache */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RCX, AMD64_R15,OFFSET(cpu_mips_t,mts_cache),8); /* RAX = mts32_entry */ amd64_mov_reg_memindex(b->jit_ptr,AMD64_RAX,AMD64_RCX,0,AMD64_RBX,3,8); /* Do we have a non-null entry ? */ amd64_test_reg_reg_size(b->jit_ptr,AMD64_RAX,AMD64_RAX,8); test1 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_Z, 0, 1); /* ECX = start */ amd64_mov_reg_membase(b->jit_ptr,X86_ECX, AMD64_RAX,OFFSET(mts32_entry_t,start),4); /* EDX = mask, RBX = action */ amd64_mov_reg_membase(b->jit_ptr,X86_EDX, AMD64_RAX,OFFSET(mts32_entry_t,mask),4); amd64_mov_reg_membase(b->jit_ptr,AMD64_RBX, AMD64_RAX,OFFSET(mts32_entry_t,action),8); amd64_alu_reg_reg_size(b->jit_ptr,X86_AND,X86_EDX,X86_ESI,4); /* Virtual Address in the good range ? */ amd64_alu_reg_reg_size(b->jit_ptr,X86_CMP,X86_EDX,X86_ECX,4); test2 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_NE, 0, 1); /* Device access ? */ amd64_mov_reg_reg(b->jit_ptr,AMD64_R8,AMD64_RBX,8); amd64_alu_reg_imm(b->jit_ptr,X86_AND,AMD64_R8,MTS_DEV_MASK); test3 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_NZ, 0, 1); /* === Fast access === */ amd64_alu_reg_reg_size(b->jit_ptr,X86_SUB,X86_ESI,X86_ECX,4); /* Memory access */ op_handler(b,target); p_exit = b->jit_ptr; amd64_jump8(b->jit_ptr,0); /* === Slow lookup === */ amd64_patch(test1,b->jit_ptr); amd64_patch(test2,b->jit_ptr); amd64_patch(test3,b->jit_ptr); /* Save PC for exception handling */ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); /* Sign-extend virtual address */ amd64_movsxd_reg_reg(b->jit_ptr,AMD64_RSI,X86_ESI); /* RDX = target register */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RDX,target); /* Call memory access function */ amd64_call_membase(b->jit_ptr,AMD64_RDI,MEMOP_OFFSET(op)); /* Exception ? */ amd64_test_reg_reg_size(b->jit_ptr,AMD64_RAX,AMD64_RAX,4); p_exception = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_Z, 0, 1); insn_block_push_epilog(b); amd64_patch(p_exit,b->jit_ptr); amd64_patch(p_exception,b->jit_ptr);}/* Fast memory operation */static void mips64_emit_memop_fast(cpu_mips_t *cpu,insn_block_t *b,int op, int base,int offset, int target,int keep_ll_bit, memop_fast_access op_handler){ switch(cpu->addr_mode) { case 32: mips64_emit_memop_fast32(b,op,base,offset,target,keep_ll_bit, op_handler); break; case 64: mips64_emit_memop_fast64(b,op,base,offset,target,keep_ll_bit, op_handler); break; }}/* Memory operation */static void mips64_emit_memop(insn_block_t *b,int op,int base,int offset, int target,int keep_ll_bit){ m_uint64_t val = sign_extend(offset,16); u_char *test1; /* Save PC for exception handling */ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); /* RDI = CPU instance */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); if (!keep_ll_bit) { amd64_clear_reg(b->jit_ptr,AMD64_RCX); amd64_mov_membase_reg(b->jit_ptr,AMD64_RDI,OFFSET(cpu_mips_t,ll_bit), X86_ECX,4); } /* RSI = GPR[base] + sign-extended offset */ mips64_load_imm(b,AMD64_RSI,val); amd64_alu_reg_membase(b->jit_ptr,X86_ADD, AMD64_RSI,AMD64_RDI,REG_OFFSET(base)); /* RDX = target register */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RDX,target); /* Call memory access function */ amd64_call_membase(b->jit_ptr,AMD64_RDI,MEMOP_OFFSET(op)); /* Exception ? */ amd64_test_reg_reg_size(b->jit_ptr,AMD64_RAX,AMD64_RAX,4); test1 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_Z, 0, 1); insn_block_push_epilog(b); amd64_patch(test1,b->jit_ptr);}/* Coprocessor Register transfert operation */static void mips64_emit_cp_xfr_op(insn_block_t *b,int rt,int rd,void *f){ /* update pc */ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); /* cp0 register */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RDX,rd); /* gpr */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RSI,rt); /* cpu instance */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); mips64_emit_basic_c_call(b,f);}/* Virtual Breakpoint */void mips64_emit_breakpoint(insn_block_t *b){ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); mips64_emit_c_call(b,mips64_run_breakpoint);}/* Unknown opcode handler */static fastcall void mips64_unknown_opcode(cpu_mips_t *cpu,m_uint32_t opcode){ printf("CPU = %p\n",cpu); printf("MIPS64: unhandled opcode 0x%8.8x at 0x%llx (ra=0x%llx)\n", opcode,cpu->pc,cpu->gpr[MIPS_GPR_RA]); mips64_dump_regs(cpu);}/* Emit unhandled instruction code */static int mips64_emit_unknown(cpu_mips_t *cpu,insn_block_t *b, mips_insn_t opcode){ amd64_mov_reg_imm(b->jit_ptr,AMD64_RSI,opcode); amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); mips64_emit_c_call(b,mips64_unknown_opcode); return(0);}/* Invalid delay slot handler */static fastcall void mips64_invalid_delay_slot(cpu_mips_t *cpu){ printf("MIPS64: invalid instruction in delay slot at 0x%llx (ra=0x%llx)\n", cpu->pc,cpu->gpr[MIPS_GPR_RA]); mips64_dump_regs(cpu); /* Halt the virtual CPU */ cpu->pc = 0;}/* Emit unhandled instruction code */int mips64_emit_invalid_delay_slot(insn_block_t *b){ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); mips64_emit_c_call(b,mips64_invalid_delay_slot); return(0);}/* * Increment count register and trigger the timer IRQ if value in compare * register is the same. */void mips64_inc_cp0_count_reg(insn_block_t *b){ amd64_inc_membase(b->jit_ptr,AMD64_R15,OFFSET(cpu_mips_t,cp0_virt_cnt_reg));#if 0 /* TIMER_IRQ */ u_char *test1; /* increment the virtual count register */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RAX,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -