📄 ghelpers.c
字号:
// case AMD64G_CC_OP_SUBW: // return ((UInt)(cc_dep1 & 0xFFFF)) < ((UInt)(cc_dep2 & 0xFFFF)) // ? AMD64G_CC_MASK_C : 0; // case AMD64G_CC_OP_SUBB: // return ((UInt)(cc_dep1 & 0xFF)) < ((UInt)(cc_dep2 & 0xFF)) // ? AMD64G_CC_MASK_C : 0; // case AMD64G_CC_OP_INCL: // case AMD64G_CC_OP_DECL: // return cc_ndep & AMD64G_CC_MASK_C; default: break; }# if PROFILE_RFLAGS tabc_fast[cc_op]--; tabc_slow[cc_op]++;# endif return amd64g_calculate_rflags_all_WRK(cc_op,cc_dep1,cc_dep2,cc_ndep) & AMD64G_CC_MASK_C;}/* CALLED FROM GENERATED CODE: CLEAN HELPER *//* returns 1 or 0 */ULong amd64g_calculate_condition ( ULong/*AMD64Condcode*/ cond, ULong cc_op, ULong cc_dep1, ULong cc_dep2, ULong cc_ndep ){ ULong rflags = amd64g_calculate_rflags_all_WRK(cc_op, cc_dep1, cc_dep2, cc_ndep); ULong of,sf,zf,cf,pf; ULong inv = cond & 1;# if PROFILE_RFLAGS if (!initted) initCounts(); tab_cond[cc_op][cond]++; n_calc_cond++; if (SHOW_COUNTS_NOW) showCounts();# endif switch (cond) { case AMD64CondNO: case AMD64CondO: /* OF == 1 */ of = rflags >> AMD64G_CC_SHIFT_O; return 1 & (inv ^ of); case AMD64CondNZ: case AMD64CondZ: /* ZF == 1 */ zf = rflags >> AMD64G_CC_SHIFT_Z; return 1 & (inv ^ zf); case AMD64CondNB: case AMD64CondB: /* CF == 1 */ cf = rflags >> AMD64G_CC_SHIFT_C; return 1 & (inv ^ cf); break; case AMD64CondNBE: case AMD64CondBE: /* (CF or ZF) == 1 */ cf = rflags >> AMD64G_CC_SHIFT_C; zf = rflags >> AMD64G_CC_SHIFT_Z; return 1 & (inv ^ (cf | zf)); break; case AMD64CondNS: case AMD64CondS: /* SF == 1 */ sf = rflags >> AMD64G_CC_SHIFT_S; return 1 & (inv ^ sf); case AMD64CondNP: case AMD64CondP: /* PF == 1 */ pf = rflags >> AMD64G_CC_SHIFT_P; return 1 & (inv ^ pf); case AMD64CondNL: case AMD64CondL: /* (SF xor OF) == 1 */ sf = rflags >> AMD64G_CC_SHIFT_S; of = rflags >> AMD64G_CC_SHIFT_O; return 1 & (inv ^ (sf ^ of)); break; case AMD64CondNLE: case AMD64CondLE: /* ((SF xor OF) or ZF) == 1 */ sf = rflags >> AMD64G_CC_SHIFT_S; of = rflags >> AMD64G_CC_SHIFT_O; zf = rflags >> AMD64G_CC_SHIFT_Z; return 1 & (inv ^ ((sf ^ of) | zf)); break; default: /* shouldn't really make these calls from generated code */ vex_printf("amd64g_calculate_condition" "( %llu, %llu, 0x%llx, 0x%llx, 0x%llx )\n", cond, cc_op, cc_dep1, cc_dep2, cc_ndep ); vpanic("amd64g_calculate_condition"); }}/* VISIBLE TO LIBVEX CLIENT */ULong LibVEX_GuestAMD64_get_rflags ( /*IN*/VexGuestAMD64State* vex_state ){ ULong rflags = amd64g_calculate_rflags_all_WRK( vex_state->guest_CC_OP, vex_state->guest_CC_DEP1, vex_state->guest_CC_DEP2, vex_state->guest_CC_NDEP ); Long dflag = vex_state->guest_DFLAG; vassert(dflag == 1 || dflag == -1); if (dflag == -1) rflags |= (1<<10); if (vex_state->guest_IDFLAG == 1) rflags |= (1<<21); return rflags;}/*---------------------------------------------------------------*//*--- %rflags translation-time function specialisers. ---*//*--- These help iropt specialise calls the above run-time ---*//*--- %rflags functions. ---*//*---------------------------------------------------------------*//* Used by the optimiser to try specialisations. Returns an equivalent expression, or NULL if none. */static Bool isU64 ( IRExpr* e, ULong n ){ return toBool( e->tag == Iex_Const && e->Iex.Const.con->tag == Ico_U64 && e->Iex.Const.con->Ico.U64 == n );}IRExpr* guest_amd64_spechelper ( HChar* function_name, IRExpr** args ){# define unop(_op,_a1) IRExpr_Unop((_op),(_a1))# define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))# define mkU64(_n) IRExpr_Const(IRConst_U64(_n))# define mkU8(_n) IRExpr_Const(IRConst_U8(_n)) Int i, arity = 0; for (i = 0; args[i]; i++) arity++;# if 0 vex_printf("spec request:\n"); vex_printf(" %s ", function_name); for (i = 0; i < arity; i++) { vex_printf(" "); ppIRExpr(args[i]); } vex_printf("\n");# endif /* --------- specialising "amd64g_calculate_condition" --------- */ if (vex_streq(function_name, "amd64g_calculate_condition")) { /* specialise calls to above "calculate condition" function */ IRExpr *cond, *cc_op, *cc_dep1, *cc_dep2; vassert(arity == 5); cond = args[0]; cc_op = args[1]; cc_dep1 = args[2]; cc_dep2 = args[3]; /*---------------- ADDQ ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_ADDQ) && isU64(cond, AMD64CondZ)) { /* long long add, then Z --> test (dst+src == 0) */ return unop(Iop_1Uto64, binop(Iop_CmpEQ64, binop(Iop_Add64, cc_dep1, cc_dep2), mkU64(0))); } /*---------------- SUBQ ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondZ)) { /* long long sub/cmp, then Z --> test dst==src */ return unop(Iop_1Uto64, binop(Iop_CmpEQ64,cc_dep1,cc_dep2)); } if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNZ)) { /* long long sub/cmp, then NZ --> test dst!=src */ return unop(Iop_1Uto64, binop(Iop_CmpNE64,cc_dep1,cc_dep2)); } if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondL)) { /* long long sub/cmp, then L (signed less than) --> test dst <s src */ return unop(Iop_1Uto64, binop(Iop_CmpLT64S, cc_dep1, cc_dep2)); } if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondB)) { /* long long sub/cmp, then B (unsigned less than) --> test dst <u src */ return unop(Iop_1Uto64, binop(Iop_CmpLT64U, cc_dep1, cc_dep2)); } if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNB)) { /* long long sub/cmp, then NB (unsigned greater than or equal) --> test src <=u dst */ /* Note, args are opposite way round from the usual */ return unop(Iop_1Uto64, binop(Iop_CmpLE64U, cc_dep2, cc_dep1)); } if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondBE)) { /* long long sub/cmp, then BE (unsigned less than or equal) --> test dst <=u src */ return unop(Iop_1Uto64, binop(Iop_CmpLE64U, cc_dep1, cc_dep2)); } /*---------------- SUBL ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondZ)) { /* long sub/cmp, then Z --> test dst==src */ return unop(Iop_1Uto64, binop(Iop_CmpEQ64, binop(Iop_Shl64,cc_dep1,mkU8(32)), binop(Iop_Shl64,cc_dep2,mkU8(32)))); } if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNZ)) { /* long sub/cmp, then NZ --> test dst!=src */ return unop(Iop_1Uto64, binop(Iop_CmpNE64, binop(Iop_Shl64,cc_dep1,mkU8(32)), binop(Iop_Shl64,cc_dep2,mkU8(32)))); } if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondL)) { /* long sub/cmp, then L (signed less than) --> test dst <s src */ return unop(Iop_1Uto64, binop(Iop_CmpLT64S, binop(Iop_Shl64,cc_dep1,mkU8(32)), binop(Iop_Shl64,cc_dep2,mkU8(32)))); } if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondLE)) { /* long sub/cmp, then LE (signed less than or equal) --> test dst <=s src */ return unop(Iop_1Uto64, binop(Iop_CmpLE64S, binop(Iop_Shl64,cc_dep1,mkU8(32)), binop(Iop_Shl64,cc_dep2,mkU8(32)))); } if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondBE)) { /* long sub/cmp, then BE (unsigned less than or equal) --> test dst <=u src */ return unop(Iop_1Uto64, binop(Iop_CmpLE64U, binop(Iop_Shl64,cc_dep1,mkU8(32)), binop(Iop_Shl64,cc_dep2,mkU8(32)))); } if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNBE)) { /* long sub/cmp, then NBE (unsigned greater than) --> test src <u dst */ /* Note, args are opposite way round from the usual */ return unop(Iop_1Uto64, binop(Iop_CmpLT64U, binop(Iop_Shl64,cc_dep2,mkU8(32)), binop(Iop_Shl64,cc_dep1,mkU8(32)))); } /*---------------- SUBW ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondZ)) { /* word sub/cmp, then Z --> test dst==src */ return unop(Iop_1Uto64, binop(Iop_CmpEQ16, unop(Iop_64to16,cc_dep1), unop(Iop_64to16,cc_dep2))); } if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondLE)) { /* word sub/cmp, then LE (signed less than or equal) --> test dst <=s src */ return unop(Iop_1Uto64, binop(Iop_CmpLE64S, binop(Iop_Shl64,cc_dep1,mkU8(48)), binop(Iop_Shl64,cc_dep2,mkU8(48)))); } /*---------------- SUBB ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondZ)) { /* byte sub/cmp, then Z --> test dst==src */ return unop(Iop_1Uto64, binop(Iop_CmpEQ8, unop(Iop_64to8,cc_dep1), unop(Iop_64to8,cc_dep2))); } if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondNZ)) { /* byte sub/cmp, then NZ --> test dst!=src */ return unop(Iop_1Uto64, binop(Iop_CmpNE8, unop(Iop_64to8,cc_dep1), unop(Iop_64to8,cc_dep2))); } if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondS) && isU64(cc_dep2, 0)) { /* byte sub/cmp of zero, then S --> test (dst-0 <s 0) --> test dst <s 0 --> (ULong)dst[7] This is yet another scheme by which gcc figures out if the top bit of a byte is 1 or 0. See also LOGICB/CondS below. */ /* Note: isU64(cc_dep2, 0) is correct, even though this is for an 8-bit comparison, since the args to the helper function are always U64s. */ return binop(Iop_And64, binop(Iop_Shr64,cc_dep1,mkU8(7)), mkU64(1)); }// if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondNZ)) {// /* byte sub/cmp, then NZ --> test dst!=src */// return unop(Iop_32Uto64,// unop(Iop_1Uto32,// binop(Iop_CmpNE8, // unop(Iop_32to8,unop(Iop_64to32,cc_dep1)),// unop(Iop_32to8,unop(Iop_64to32,cc_dep2)))));// }//.. if (isU32(cc_op, AMD64G_CC_OP_SUBB) && isU32(cond, X86CondNBE)) {//.. /* long sub/cmp, then NBE (unsigned greater than)//.. --> test src <u dst *///.. /* Note, args are opposite way round from the usual *///.. return unop(Iop_1Uto32,//.. binop(Iop_CmpLT32U, //.. binop(Iop_And32,cc_dep2,mkU32(0xFF)),//.. binop(Iop_And32,cc_dep1,mkU32(0xFF))));//.. } /*---------------- LOGICQ ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_LOGICQ) && isU64(cond, AMD64CondZ)) { /* long long and/or/xor, then Z --> test dst==0 */ return unop(Iop_1Uto64, binop(Iop_CmpEQ64, cc_dep1, mkU64(0))); } if (isU64(cc_op, AMD64G_CC_OP_LOGICQ) && isU64(cond, AMD64CondL)) { /* long long and/or/xor, then L
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -