translate.c

来自「xen虚拟机源代码安装包」· C语言 代码 · 共 2,286 行 · 第 1/5 页

C
2,286
字号
/* *  ARM translation * *  Copyright (c) 2003 Fabrice Bellard *  Copyright (c) 2005-2007 CodeSourcery *  Copyright (c) 2007 OpenedHand, Ltd. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */#include <stdarg.h>#include <stdlib.h>#include <stdio.h>#include <string.h>#include <inttypes.h>#include "cpu.h"#include "exec-all.h"#include "disas.h"#include "tcg-op.h"#define GEN_HELPER 1#include "helpers.h"#define ENABLE_ARCH_5J    0#define ENABLE_ARCH_6     arm_feature(env, ARM_FEATURE_V6)#define ENABLE_ARCH_6K   arm_feature(env, ARM_FEATURE_V6K)#define ENABLE_ARCH_6T2   arm_feature(env, ARM_FEATURE_THUMB2)#define ENABLE_ARCH_7     arm_feature(env, ARM_FEATURE_V7)#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;/* internal defines */typedef struct DisasContext {    target_ulong pc;    int is_jmp;    /* Nonzero if this instruction has been conditionally skipped.  */    int condjmp;    /* The label that will be jumped to when the instruction is skipped.  */    int condlabel;    /* Thumb-2 condtional execution bits.  */    int condexec_mask;    int condexec_cond;    struct TranslationBlock *tb;    int singlestep_enabled;    int thumb;    int is_mem;#if !defined(CONFIG_USER_ONLY)    int user;#endif} DisasContext;#if defined(CONFIG_USER_ONLY)#define IS_USER(s) 1#else#define IS_USER(s) (s->user)#endif/* These instructions trap after executing, so defer them until after the   conditional executions state has been updated.  */#define DISAS_WFI 4#define DISAS_SWI 5/* XXX: move that elsewhere */extern FILE *logfile;extern int loglevel;static TCGv cpu_env;/* We reuse the same 64-bit temporaries for efficiency.  */static TCGv cpu_V0, cpu_V1, cpu_M0;/* FIXME:  These should be removed.  */static TCGv cpu_T[2];static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;/* initialize TCG globals.  */void arm_translate_init(void){    cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");    cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");    cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");}/* The code generator doesn't like lots of temporaries, so maintain our own   cache for reuse within a function.  */#define MAX_TEMPS 8static int num_temps;static TCGv temps[MAX_TEMPS];/* Allocate a temporary variable.  */static TCGv new_tmp(void){    TCGv tmp;    if (num_temps == MAX_TEMPS)        abort();    if (GET_TCGV(temps[num_temps]))      return temps[num_temps++];    tmp = tcg_temp_new(TCG_TYPE_I32);    temps[num_temps++] = tmp;    return tmp;}/* Release a temporary variable.  */static void dead_tmp(TCGv tmp){    int i;    num_temps--;    i = num_temps;    if (GET_TCGV(temps[i]) == GET_TCGV(tmp))        return;    /* Shuffle this temp to the last slot.  */    while (GET_TCGV(temps[i]) != GET_TCGV(tmp))        i--;    while (i < num_temps) {        temps[i] = temps[i + 1];        i++;    }    temps[i] = tmp;}static inline TCGv load_cpu_offset(int offset){    TCGv tmp = new_tmp();    tcg_gen_ld_i32(tmp, cpu_env, offset);    return tmp;}#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))static inline void store_cpu_offset(TCGv var, int offset){    tcg_gen_st_i32(var, cpu_env, offset);    dead_tmp(var);}#define store_cpu_field(var, name) \    store_cpu_offset(var, offsetof(CPUState, name))/* Set a variable to the value of a CPU register.  */static void load_reg_var(DisasContext *s, TCGv var, int reg){    if (reg == 15) {        uint32_t addr;        /* normaly, since we updated PC, we need only to add one insn */        if (s->thumb)            addr = (long)s->pc + 2;        else            addr = (long)s->pc + 4;        tcg_gen_movi_i32(var, addr);    } else {        tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));    }}/* Create a new temporary and set it to the value of a CPU register.  */static inline TCGv load_reg(DisasContext *s, int reg){    TCGv tmp = new_tmp();    load_reg_var(s, tmp, reg);    return tmp;}/* Set a CPU register.  The source must be a temporary and will be   marked as dead.  */static void store_reg(DisasContext *s, int reg, TCGv var){    if (reg == 15) {        tcg_gen_andi_i32(var, var, ~1);        s->is_jmp = DISAS_JUMP;    }    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));    dead_tmp(var);}/* Basic operations.  */#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)/* Value extensions.  */#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)#define gen_sxtb16(var) gen_helper_sxtb16(var, var)#define gen_uxtb16(var) gen_helper_uxtb16(var, var)#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))/* Set NZCV flags from the high 4 bits of var.  */#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)static void gen_exception(int excp){    TCGv tmp = new_tmp();    tcg_gen_movi_i32(tmp, excp);    gen_helper_exception(tmp);    dead_tmp(tmp);}static void gen_smul_dual(TCGv a, TCGv b){    TCGv tmp1 = new_tmp();    TCGv tmp2 = new_tmp();    tcg_gen_ext8s_i32(tmp1, a);    tcg_gen_ext8s_i32(tmp2, b);    tcg_gen_mul_i32(tmp1, tmp1, tmp2);    dead_tmp(tmp2);    tcg_gen_sari_i32(a, a, 16);    tcg_gen_sari_i32(b, b, 16);    tcg_gen_mul_i32(b, b, a);    tcg_gen_mov_i32(a, tmp1);    dead_tmp(tmp1);}/* Byteswap each halfword.  */static void gen_rev16(TCGv var){    TCGv tmp = new_tmp();    tcg_gen_shri_i32(tmp, var, 8);    tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);    tcg_gen_shli_i32(var, var, 8);    tcg_gen_andi_i32(var, var, 0xff00ff00);    tcg_gen_or_i32(var, var, tmp);    dead_tmp(tmp);}/* Byteswap low halfword and sign extend.  */static void gen_revsh(TCGv var){    TCGv tmp = new_tmp();    tcg_gen_shri_i32(tmp, var, 8);    tcg_gen_andi_i32(tmp, tmp, 0x00ff);    tcg_gen_shli_i32(var, var, 8);    tcg_gen_ext8s_i32(var, var);    tcg_gen_or_i32(var, var, tmp);    dead_tmp(tmp);}/* Unsigned bitfield extract.  */static void gen_ubfx(TCGv var, int shift, uint32_t mask){    if (shift)        tcg_gen_shri_i32(var, var, shift);    tcg_gen_andi_i32(var, var, mask);}/* Signed bitfield extract.  */static void gen_sbfx(TCGv var, int shift, int width){    uint32_t signbit;    if (shift)        tcg_gen_sari_i32(var, var, shift);    if (shift + width < 32) {        signbit = 1u << (width - 1);        tcg_gen_andi_i32(var, var, (1u << width) - 1);        tcg_gen_xori_i32(var, var, signbit);        tcg_gen_subi_i32(var, var, signbit);    }}/* Bitfield insertion.  Insert val into base.  Clobbers base and val.  */static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask){    tcg_gen_andi_i32(val, val, mask);    tcg_gen_shli_i32(val, val, shift);    tcg_gen_andi_i32(base, base, ~(mask << shift));    tcg_gen_or_i32(dest, base, val);}/* Round the top 32 bits of a 64-bit value.  */static void gen_roundqd(TCGv a, TCGv b){    tcg_gen_shri_i32(a, a, 31);    tcg_gen_add_i32(a, a, b);}/* FIXME: Most targets have native widening multiplication.   It would be good to use that instead of a full wide multiply.  *//* 32x32->64 multiply.  Marks inputs as dead.  */static TCGv gen_mulu_i64_i32(TCGv a, TCGv b){    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);    tcg_gen_extu_i32_i64(tmp1, a);    dead_tmp(a);    tcg_gen_extu_i32_i64(tmp2, b);    dead_tmp(b);    tcg_gen_mul_i64(tmp1, tmp1, tmp2);    return tmp1;}static TCGv gen_muls_i64_i32(TCGv a, TCGv b){    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);    tcg_gen_ext_i32_i64(tmp1, a);    dead_tmp(a);    tcg_gen_ext_i32_i64(tmp2, b);    dead_tmp(b);    tcg_gen_mul_i64(tmp1, tmp1, tmp2);    return tmp1;}/* Unsigned 32x32->64 multiply.  */static void gen_op_mull_T0_T1(void){    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);    tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);    tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);    tcg_gen_mul_i64(tmp1, tmp1, tmp2);    tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);    tcg_gen_shri_i64(tmp1, tmp1, 32);    tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);}/* Signed 32x32->64 multiply.  */static void gen_imull(TCGv a, TCGv b){    TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);    TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);    tcg_gen_ext_i32_i64(tmp1, a);    tcg_gen_ext_i32_i64(tmp2, b);    tcg_gen_mul_i64(tmp1, tmp1, tmp2);    tcg_gen_trunc_i64_i32(a, tmp1);    tcg_gen_shri_i64(tmp1, tmp1, 32);    tcg_gen_trunc_i64_i32(b, tmp1);}#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])/* Swap low and high halfwords.  */static void gen_swap_half(TCGv var){    TCGv tmp = new_tmp();    tcg_gen_shri_i32(tmp, var, 16);    tcg_gen_shli_i32(var, var, 16);    tcg_gen_or_i32(var, var, tmp);    dead_tmp(tmp);}/* Dual 16-bit add.  Result placed in t0 and t1 is marked as dead.    tmp = (t0 ^ t1) & 0x8000;    t0 &= ~0x8000;    t1 &= ~0x8000;    t0 = (t0 + t1) ^ tmp; */static void gen_add16(TCGv t0, TCGv t1){    TCGv tmp = new_tmp();    tcg_gen_xor_i32(tmp, t0, t1);    tcg_gen_andi_i32(tmp, tmp, 0x8000);    tcg_gen_andi_i32(t0, t0, ~0x8000);    tcg_gen_andi_i32(t1, t1, ~0x8000);    tcg_gen_add_i32(t0, t0, t1);    tcg_gen_xor_i32(t0, t0, tmp);    dead_tmp(tmp);    dead_tmp(t1);}#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))/* Set CF to the top bit of var.  */static void gen_set_CF_bit31(TCGv var){    TCGv tmp = new_tmp();    tcg_gen_shri_i32(tmp, var, 31);    gen_set_CF(var);    dead_tmp(tmp);}/* Set N and Z flags from var.  */static inline void gen_logic_CC(TCGv var){    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));    tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));}/* T0 += T1 + CF.  */static void gen_adc_T0_T1(void){    TCGv tmp;    gen_op_addl_T0_T1();    tmp = load_cpu_field(CF);    tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);    dead_tmp(tmp);}/* dest = T0 - T1 + CF - 1.  */static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1){    TCGv tmp;    tcg_gen_sub_i32(dest, t0, t1);    tmp = load_cpu_field(CF);    tcg_gen_add_i32(dest, dest, tmp);    tcg_gen_subi_i32(dest, dest, 1);    dead_tmp(tmp);}#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])/* FIXME:  Implement this natively.  */static inline void tcg_gen_not_i32(TCGv t0, TCGv t1){    tcg_gen_xori_i32(t0, t1, ~0);}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?