📄 mips64_amd64_trans.c
字号:
/* * Cisco router simulation platform. * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr) */#include <stdio.h>#include <stdlib.h>#include <unistd.h>#include <string.h>#include <sys/types.h>#include <sys/stat.h>#include <sys/mman.h>#include <fcntl.h>#include "cpu.h"#include "mips64_jit.h"#include "mips64_amd64_trans.h"#include "mips64_cp0.h"#include "memory.h"/* Macros for CPU structure access */#define REG_OFFSET(reg) (OFFSET(cpu_mips_t,gpr[(reg)]))#define CP0_REG_OFFSET(c0reg) (OFFSET(cpu_mips_t,cp0.reg[(c0reg)]))#define MEMOP_OFFSET(op) (OFFSET(cpu_mips_t,mem_op_fn[(op)]))#define DECLARE_INSN(name) \ static int mips64_emit_##name(cpu_mips_t *cpu,mips64_jit_tcb_t *b, \ mips_insn_t insn)/* Set an IRQ */void mips64_set_irq(cpu_mips_t *cpu,m_uint8_t irq){ m_uint32_t m; m = (1 << (irq + MIPS_CP0_CAUSE_ISHIFT)) & MIPS_CP0_CAUSE_IMASK; atomic_or(&cpu->irq_cause,m);}/* Clear an IRQ */void mips64_clear_irq(cpu_mips_t *cpu,m_uint8_t irq){ m_uint32_t m; m = (1 << (irq + MIPS_CP0_CAUSE_ISHIFT)) & MIPS_CP0_CAUSE_IMASK; atomic_and(&cpu->irq_cause,~m); if (!cpu->irq_cause) cpu->irq_pending = 0;}/* Load a 64 bit immediate value */static inline void mips64_load_imm(mips64_jit_tcb_t *b,u_int reg, m_uint64_t value){ if (value > 0xffffffffULL) amd64_mov_reg_imm_size(b->jit_ptr,reg,value,8); else amd64_mov_reg_imm(b->jit_ptr,reg,value);}/* Set the Pointer Counter (PC) register */void mips64_set_pc(mips64_jit_tcb_t *b,m_uint64_t new_pc){ mips64_load_imm(b,AMD64_RAX,new_pc); amd64_mov_membase_reg(b->jit_ptr, AMD64_R15,OFFSET(cpu_mips_t,pc), AMD64_RAX,8);}/* Set the Return Address (RA) register */void mips64_set_ra(mips64_jit_tcb_t *b,m_uint64_t ret_pc){ mips64_load_imm(b,AMD64_RAX,ret_pc); amd64_mov_membase_reg(b->jit_ptr,AMD64_R15, REG_OFFSET(MIPS_GPR_RA), AMD64_RAX,8);}/* * Try to branch directly to the specified JIT block without returning to * main loop. */static void mips64_try_direct_far_jump(cpu_mips_t *cpu,mips64_jit_tcb_t *b, m_uint64_t new_pc){ m_uint64_t new_page; m_uint32_t pc_hash,pc_offset; u_char *test1,*test2,*test3; new_page = new_pc & MIPS_MIN_PAGE_MASK; pc_offset = (new_pc & MIPS_MIN_PAGE_IMASK) >> 2; pc_hash = mips64_jit_get_pc_hash(new_pc); /* Get JIT block info in %rdx */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RBX, AMD64_R15,OFFSET(cpu_mips_t,exec_blk_map),8); amd64_mov_reg_membase(b->jit_ptr,AMD64_RDX, AMD64_RBX,pc_hash*sizeof(void *),8); /* no JIT block found ? */ amd64_test_reg_reg(b->jit_ptr,AMD64_RDX,AMD64_RDX); test1 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_Z, 0, 1); /* Check block IA */ mips64_load_imm(b,AMD64_RAX,new_page); amd64_alu_reg_membase_size(b->jit_ptr,X86_CMP,X86_EAX,AMD64_RDX, OFFSET(mips64_jit_tcb_t,start_pc),4); test2 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_NE, 0, 1); /* Jump to the code */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RSI, AMD64_RDX,OFFSET(mips64_jit_tcb_t,jit_insn_ptr),8); amd64_mov_reg_membase(b->jit_ptr,AMD64_RBX, AMD64_RSI,pc_offset * sizeof(void *),8); amd64_test_reg_reg(b->jit_ptr,AMD64_RBX,AMD64_RBX); test3 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_Z, 0, 1); amd64_jump_reg(b->jit_ptr,AMD64_RBX); /* Returns to caller... */ amd64_patch(test1,b->jit_ptr); amd64_patch(test2,b->jit_ptr); amd64_patch(test3,b->jit_ptr); mips64_set_pc(b,new_pc); mips64_jit_tcb_push_epilog(b);}/* Set Jump */static void mips64_set_jump(cpu_mips_t *cpu,mips64_jit_tcb_t *b, m_uint64_t new_pc,int local_jump){ int return_to_caller = FALSE; u_char *jump_ptr; if (cpu->sym_trace && !local_jump) return_to_caller = TRUE; if (!return_to_caller && mips64_jit_tcb_local_addr(b,new_pc,&jump_ptr)) { if (jump_ptr) { amd64_jump_code(b->jit_ptr,jump_ptr); } else { /* Never jump directly to code in a delay slot */ if (mips64_jit_is_delay_slot(b,new_pc)) { mips64_set_pc(b,new_pc); mips64_jit_tcb_push_epilog(b); return; } mips64_jit_tcb_record_patch(b,b->jit_ptr,new_pc); amd64_jump32(b->jit_ptr,0); } } else { if (cpu->exec_blk_direct_jump) { /* Block lookup optimization */ mips64_try_direct_far_jump(cpu,b,new_pc); } else { mips64_set_pc(b,new_pc); mips64_jit_tcb_push_epilog(b); } }}/* Basic C call */static forced_inline void mips64_emit_basic_c_call(mips64_jit_tcb_t *b,void *f){ amd64_mov_reg_imm(b->jit_ptr,AMD64_RCX,f); amd64_call_reg(b->jit_ptr,AMD64_RCX);}/* Emit a simple call to a C function without any parameter */static void mips64_emit_c_call(mips64_jit_tcb_t *b,void *f){ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); amd64_mov_reg_imm(b->jit_ptr,AMD64_RCX,f); amd64_call_reg(b->jit_ptr,AMD64_RCX);}/* Single-step operation */void mips64_emit_single_step(mips64_jit_tcb_t *b,mips_insn_t insn){ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); amd64_mov_reg_imm(b->jit_ptr,AMD64_RSI,insn); mips64_emit_basic_c_call(b,mips64_exec_single_step);}/* Fast memory operation prototype */typedef void (*memop_fast_access)(mips64_jit_tcb_t *b,int target);/* Fast LW */static void mips64_memop_fast_lw(mips64_jit_tcb_t *b,int target){ amd64_mov_reg_memindex(b->jit_ptr,AMD64_RAX,AMD64_RBX,0,AMD64_RSI,0,4); amd64_bswap32(b->jit_ptr,X86_EAX); amd64_movsxd_reg_reg(b->jit_ptr,AMD64_RDX,X86_EAX); /* Save value in register */ amd64_mov_membase_reg(b->jit_ptr,AMD64_R15,REG_OFFSET(target),AMD64_RDX,8);}/* Fast SW */static void mips64_memop_fast_sw(mips64_jit_tcb_t *b,int target){ /* Load value from register */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RAX,AMD64_R15,REG_OFFSET(target),4); amd64_bswap32(b->jit_ptr,X86_EAX); amd64_mov_memindex_reg(b->jit_ptr,AMD64_RBX,0,AMD64_RSI,0,AMD64_RAX,4);}/* Fast memory operation (64-bit) */static void mips64_emit_memop_fast64(mips64_jit_tcb_t *b,int write_op, int opcode,int base,int offset, int target,int keep_ll_bit, memop_fast_access op_handler){ m_uint32_t val = sign_extend(offset,16); u_char *test1,*test2,*p_exit; test2 = NULL; /* RSI = GPR[base] + sign-extended offset */ mips64_load_imm(b,AMD64_RSI,val); amd64_alu_reg_membase(b->jit_ptr,X86_ADD, AMD64_RSI,AMD64_R15,REG_OFFSET(base)); /* RBX = mts64_entry index */ amd64_mov_reg_reg_size(b->jit_ptr,X86_EBX,X86_ESI,4); amd64_shift_reg_imm_size(b->jit_ptr,X86_SHR,X86_EBX,MTS64_HASH_SHIFT,4); amd64_alu_reg_imm_size(b->jit_ptr,X86_AND,X86_EBX,MTS64_HASH_MASK,4); /* RCX = mts32 entry */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RCX, AMD64_R15, OFFSET(cpu_mips_t,mts_u.mts64_cache),8); amd64_shift_reg_imm(b->jit_ptr,X86_SHL,AMD64_RBX,5); /* TO FIX */ amd64_alu_reg_reg(b->jit_ptr,X86_ADD,AMD64_RCX,AMD64_RBX); /* Compare virtual page address (EAX = vpage) */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RAX,AMD64_RSI,8); amd64_alu_reg_imm(b->jit_ptr,X86_AND,AMD64_RAX,MIPS_MIN_PAGE_MASK); amd64_alu_reg_membase_size(b->jit_ptr,X86_CMP,AMD64_RAX,AMD64_RCX, OFFSET(mts64_entry_t,gvpa),8); test1 = b->jit_ptr; x86_branch8(b->jit_ptr, X86_CC_NZ, 0, 1); /* Test if we are writing to a COW page */ if (write_op) { amd64_test_membase_imm_size(b->jit_ptr, AMD64_RCX,OFFSET(mts64_entry_t,flags), MTS_FLAG_COW,4); test2 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_NZ, 0, 1); } /* ESI = offset in page, RBX = Host Page Address */ amd64_alu_reg_imm(b->jit_ptr,X86_AND,X86_ESI,MIPS_MIN_PAGE_IMASK); amd64_mov_reg_membase(b->jit_ptr,AMD64_RBX, AMD64_RCX,OFFSET(mts64_entry_t,hpa),8); /* Memory access */ op_handler(b,target); p_exit = b->jit_ptr; amd64_jump8(b->jit_ptr,0); if (test2) amd64_patch(test2,b->jit_ptr); /* === Slow lookup === */ amd64_patch(test1,b->jit_ptr); /* Save PC for exception handling */ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); /* Sign-extend virtual address */ amd64_movsxd_reg_reg(b->jit_ptr,AMD64_RSI,X86_ESI); /* RDX = target register */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RDX,target); /* RDI = CPU instance */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); /* Call memory access function */ amd64_call_membase(b->jit_ptr,AMD64_R15,MEMOP_OFFSET(opcode)); amd64_patch(p_exit,b->jit_ptr);}/* Fast memory operation (32-bit) */static void mips64_emit_memop_fast32(mips64_jit_tcb_t *b,int write_op, int opcode,int base,int offset, int target,int keep_ll_bit, memop_fast_access op_handler){ m_uint32_t val = sign_extend(offset,16); u_char *test1,*test2,*p_exit; test2 = NULL; /* ESI = GPR[base] + sign-extended offset */ amd64_mov_reg_imm(b->jit_ptr,X86_ESI,val); amd64_alu_reg_membase_size(b->jit_ptr,X86_ADD, X86_ESI,AMD64_R15,REG_OFFSET(base),4); /* RBX = mts32_entry index */ amd64_mov_reg_reg_size(b->jit_ptr,X86_EBX,X86_ESI,4); amd64_shift_reg_imm_size(b->jit_ptr,X86_SHR,X86_EBX,MTS32_HASH_SHIFT,4); amd64_alu_reg_imm_size(b->jit_ptr,X86_AND,X86_EBX,MTS32_HASH_MASK,4); /* RCX = mts32 entry */ amd64_mov_reg_membase(b->jit_ptr,AMD64_RCX, AMD64_R15, OFFSET(cpu_mips_t,mts_u.mts32_cache),8); amd64_shift_reg_imm(b->jit_ptr,X86_SHL,AMD64_RBX,5); /* TO FIX */ amd64_alu_reg_reg(b->jit_ptr,X86_ADD,AMD64_RCX,AMD64_RBX); /* Compare virtual page address (EAX = vpage) */ amd64_mov_reg_reg(b->jit_ptr,X86_EAX,X86_ESI,4); amd64_alu_reg_imm(b->jit_ptr,X86_AND,X86_EAX,MIPS_MIN_PAGE_MASK); amd64_alu_reg_membase_size(b->jit_ptr,X86_CMP,X86_EAX,AMD64_RCX, OFFSET(mts32_entry_t,gvpa),4); test1 = b->jit_ptr; x86_branch8(b->jit_ptr, X86_CC_NZ, 0, 1); /* Test if we are writing to a COW page */ if (write_op) { amd64_test_membase_imm_size(b->jit_ptr, AMD64_RCX,OFFSET(mts32_entry_t,flags), MTS_FLAG_COW,4); test2 = b->jit_ptr; amd64_branch8(b->jit_ptr, X86_CC_NZ, 0, 1); } /* ESI = offset in page, RBX = Host Page Address */ amd64_alu_reg_imm(b->jit_ptr,X86_AND,X86_ESI,MIPS_MIN_PAGE_IMASK); amd64_mov_reg_membase(b->jit_ptr,AMD64_RBX, AMD64_RCX,OFFSET(mts32_entry_t,hpa),8); /* Memory access */ op_handler(b,target); p_exit = b->jit_ptr; amd64_jump8(b->jit_ptr,0); /* === Slow lookup === */ amd64_patch(test1,b->jit_ptr); if (test2) amd64_patch(test2,b->jit_ptr); /* Save PC for exception handling */ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); /* Sign-extend virtual address */ amd64_movsxd_reg_reg(b->jit_ptr,AMD64_RSI,X86_ESI); /* RDX = target register */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RDX,target); /* RDI = CPU instance */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); /* Call memory access function */ amd64_call_membase(b->jit_ptr,AMD64_R15,MEMOP_OFFSET(opcode)); amd64_patch(p_exit,b->jit_ptr);}/* Fast memory operation */static void mips64_emit_memop_fast(cpu_mips_t *cpu,mips64_jit_tcb_t *b, int write_op,int opcode, int base,int offset, int target,int keep_ll_bit, memop_fast_access op_handler){ switch(cpu->addr_mode) { case 32: mips64_emit_memop_fast32(b,write_op,opcode,base,offset,target, keep_ll_bit,op_handler); break; case 64: mips64_emit_memop_fast64(b,write_op,opcode,base,offset,target, keep_ll_bit,op_handler); break; }}/* Memory operation */static void mips64_emit_memop(mips64_jit_tcb_t *b,int op,int base,int offset, int target,int keep_ll_bit){ m_uint64_t val = sign_extend(offset,16); /* Save PC for exception handling */ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); /* RDI = CPU instance */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); if (!keep_ll_bit) { amd64_clear_reg(b->jit_ptr,AMD64_RCX); amd64_mov_membase_reg(b->jit_ptr,AMD64_RDI,OFFSET(cpu_mips_t,ll_bit), X86_ECX,4); } /* RSI = GPR[base] + sign-extended offset */ mips64_load_imm(b,AMD64_RSI,val); amd64_alu_reg_membase(b->jit_ptr,X86_ADD, AMD64_RSI,AMD64_RDI,REG_OFFSET(base)); /* RDX = target register */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RDX,target); /* Call memory access function */ amd64_call_membase(b->jit_ptr,AMD64_RDI,MEMOP_OFFSET(op));}/* Coprocessor Register transfert operation */static void mips64_emit_cp_xfr_op(mips64_jit_tcb_t *b,int rt,int rd,void *f){ /* update pc */ mips64_set_pc(b,b->start_pc+((b->mips_trans_pos-1)<<2)); /* cp0 register */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RDX,rd); /* gpr */ amd64_mov_reg_imm(b->jit_ptr,AMD64_RSI,rt); /* cpu instance */ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); mips64_emit_basic_c_call(b,f);}/* Virtual Breakpoint */void mips64_emit_breakpoint(mips64_jit_tcb_t *b){ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); mips64_emit_c_call(b,mips64_run_breakpoint);}/* Unknown opcode handler */static fastcall void mips64_unknown_opcode(cpu_mips_t *cpu,m_uint32_t opcode){ printf("CPU = %p\n",cpu); printf("MIPS64: unhandled opcode 0x%8.8x at 0x%llx (ra=0x%llx)\n", opcode,cpu->pc,cpu->gpr[MIPS_GPR_RA]); mips64_dump_regs(cpu->gen);}/* Emit unhandled instruction code */static int mips64_emit_unknown(cpu_mips_t *cpu,mips64_jit_tcb_t *b, mips_insn_t opcode){ amd64_mov_reg_imm(b->jit_ptr,AMD64_RSI,opcode); amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); mips64_emit_c_call(b,mips64_unknown_opcode); return(0);}/* Invalid delay slot handler */static fastcall void mips64_invalid_delay_slot(cpu_mips_t *cpu){ printf("MIPS64: invalid instruction in delay slot at 0x%llx (ra=0x%llx)\n", cpu->pc,cpu->gpr[MIPS_GPR_RA]); mips64_dump_regs(cpu->gen); /* Halt the virtual CPU */ cpu->pc = 0;}/* Emit unhandled instruction code */int mips64_emit_invalid_delay_slot(mips64_jit_tcb_t *b){ amd64_mov_reg_reg(b->jit_ptr,AMD64_RDI,AMD64_R15,8); mips64_emit_c_call(b,mips64_invalid_delay_slot); return(0);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -