📄 ghelpers.c
字号:
}/* CALLED FROM GENERATED CODE: CLEAN HELPER *//* returns 1 or 0 */ULong amd64g_calculate_condition ( ULong/*AMD64Condcode*/ cond, ULong cc_op, ULong cc_dep1, ULong cc_dep2, ULong cc_ndep ){ ULong rflags = amd64g_calculate_rflags_all_WRK(cc_op, cc_dep1, cc_dep2, cc_ndep); ULong of,sf,zf,cf,pf; ULong inv = cond & 1;# if PROFILE_RFLAGS if (!initted) initCounts(); tab_cond[cc_op][cond]++; n_calc_cond++; if (SHOW_COUNTS_NOW) showCounts();# endif switch (cond) { case AMD64CondNO: case AMD64CondO: /* OF == 1 */ of = rflags >> AMD64G_CC_SHIFT_O; return 1 & (inv ^ of); case AMD64CondNZ: case AMD64CondZ: /* ZF == 1 */ zf = rflags >> AMD64G_CC_SHIFT_Z; return 1 & (inv ^ zf); case AMD64CondNB: case AMD64CondB: /* CF == 1 */ cf = rflags >> AMD64G_CC_SHIFT_C; return 1 & (inv ^ cf); break; case AMD64CondNBE: case AMD64CondBE: /* (CF or ZF) == 1 */ cf = rflags >> AMD64G_CC_SHIFT_C; zf = rflags >> AMD64G_CC_SHIFT_Z; return 1 & (inv ^ (cf | zf)); break; case AMD64CondNS: case AMD64CondS: /* SF == 1 */ sf = rflags >> AMD64G_CC_SHIFT_S; return 1 & (inv ^ sf); case AMD64CondNP: case AMD64CondP: /* PF == 1 */ pf = rflags >> AMD64G_CC_SHIFT_P; return 1 & (inv ^ pf); case AMD64CondNL: case AMD64CondL: /* (SF xor OF) == 1 */ sf = rflags >> AMD64G_CC_SHIFT_S; of = rflags >> AMD64G_CC_SHIFT_O; return 1 & (inv ^ (sf ^ of)); break; case AMD64CondNLE: case AMD64CondLE: /* ((SF xor OF) or ZF) == 1 */ sf = rflags >> AMD64G_CC_SHIFT_S; of = rflags >> AMD64G_CC_SHIFT_O; zf = rflags >> AMD64G_CC_SHIFT_Z; return 1 & (inv ^ ((sf ^ of) | zf)); break; default: /* shouldn't really make these calls from generated code */ vex_printf("amd64g_calculate_condition" "( %llu, %llu, 0x%llx, 0x%llx, 0x%llx )\n", cond, cc_op, cc_dep1, cc_dep2, cc_ndep ); vpanic("amd64g_calculate_condition"); }}/* VISIBLE TO LIBVEX CLIENT */ULong LibVEX_GuestAMD64_get_rflags ( /*IN*/VexGuestAMD64State* vex_state ){ ULong rflags = amd64g_calculate_rflags_all_WRK( vex_state->guest_CC_OP, vex_state->guest_CC_DEP1, vex_state->guest_CC_DEP2, vex_state->guest_CC_NDEP ); Long dflag = vex_state->guest_DFLAG; vassert(dflag == 1 || dflag == -1); if (dflag == -1) rflags |= (1<<10); if (vex_state->guest_IDFLAG == 1) rflags |= (1<<21); return rflags;}/*---------------------------------------------------------------*//*--- %rflags translation-time function specialisers. ---*//*--- These help iropt specialise calls the above run-time ---*//*--- %rflags functions. ---*//*---------------------------------------------------------------*//* Used by the optimiser to try specialisations. Returns an equivalent expression, or NULL if none. */static Bool isU64 ( IRExpr* e, ULong n ){ return toBool( e->tag == Iex_Const && e->Iex.Const.con->tag == Ico_U64 && e->Iex.Const.con->Ico.U64 == n );}IRExpr* guest_amd64_spechelper ( HChar* function_name, IRExpr** args ){# define unop(_op,_a1) IRExpr_Unop((_op),(_a1))# define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))# define mkU64(_n) IRExpr_Const(IRConst_U64(_n))# define mkU8(_n) IRExpr_Const(IRConst_U8(_n)) Int i, arity = 0; for (i = 0; args[i]; i++) arity++;# if 0 vex_printf("spec request:\n"); vex_printf(" %s ", function_name); for (i = 0; i < arity; i++) { vex_printf(" "); ppIRExpr(args[i]); } vex_printf("\n");# endif /* --------- specialising "amd64g_calculate_condition" --------- */ if (vex_streq(function_name, "amd64g_calculate_condition")) { /* specialise calls to above "calculate condition" function */ IRExpr *cond, *cc_op, *cc_dep1, *cc_dep2; vassert(arity == 5); cond = args[0]; cc_op = args[1]; cc_dep1 = args[2]; cc_dep2 = args[3]; /*---------------- ADDQ ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_ADDQ) && isU64(cond, AMD64CondZ)) { /* long long add, then Z --> test (dst+src == 0) */ return unop(Iop_1Uto64, binop(Iop_CmpEQ64, binop(Iop_Add64, cc_dep1, cc_dep2), mkU64(0))); } /*---------------- SUBL ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondZ)) { /* long sub/cmp, then Z --> test dst==src */ return unop(Iop_1Uto64, binop(Iop_CmpEQ32, unop(Iop_64to32,cc_dep1), unop(Iop_64to32,cc_dep2))); }//.. if (isU32(cc_op, AMD64G_CC_OP_SUBL) && isU32(cond, X86CondNZ)) {//.. /* long sub/cmp, then NZ --> test dst!=src *///.. return unop(Iop_1Uto32,//.. binop(Iop_CmpNE32, cc_dep1, cc_dep2));//.. } if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondL)) { /* long sub/cmp, then L (signed less than) --> test dst <s src */ return unop(Iop_1Uto64, binop(Iop_CmpLT64S, binop(Iop_Shl64,cc_dep1,mkU8(32)), binop(Iop_Shl64,cc_dep2,mkU8(32)))); } if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondLE)) { /* long sub/cmp, then LE (signed less than or equal) --> test dst <=s src */ return unop(Iop_1Uto64, binop(Iop_CmpLE64S, binop(Iop_Shl64,cc_dep1,mkU8(32)), binop(Iop_Shl64,cc_dep2,mkU8(32)))); }//.. if (isU32(cc_op, AMD64G_CC_OP_SUBL) && isU32(cond, X86CondBE)) {//.. /* long sub/cmp, then BE (unsigned less than or equal)//.. --> test dst <=u src *///.. return unop(Iop_1Uto32,//.. binop(Iop_CmpLE32U, cc_dep1, cc_dep2));//.. }//.. //.. if (isU32(cc_op, AMD64G_CC_OP_SUBL) && isU32(cond, X86CondB)) {//.. /* long sub/cmp, then B (unsigned less than)//.. --> test dst <u src *///.. return unop(Iop_1Uto32,//.. binop(Iop_CmpLT32U, cc_dep1, cc_dep2));//.. } /*---------------- SUBW ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondZ)) { /* word sub/cmp, then Z --> test dst==src */ return unop(Iop_1Uto64, binop(Iop_CmpEQ16, unop(Iop_64to16,cc_dep1), unop(Iop_64to16,cc_dep2))); } if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondLE)) { /* 16-bit sub/cmp, then LE (signed less than or equal) --> test dst <=s src */ return unop(Iop_1Uto64, binop(Iop_CmpLE64S, binop(Iop_Shl64,cc_dep1,mkU8(48)), binop(Iop_Shl64,cc_dep2,mkU8(48)))); } /*---------------- SUBB ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondZ)) { /* byte sub/cmp, then Z --> test dst==src */ return unop(Iop_1Uto64, binop(Iop_CmpEQ8, unop(Iop_64to8,cc_dep1), unop(Iop_64to8,cc_dep2))); }// if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondNZ)) {// /* byte sub/cmp, then NZ --> test dst!=src */// return unop(Iop_32Uto64,// unop(Iop_1Uto32,// binop(Iop_CmpNE8, // unop(Iop_32to8,unop(Iop_64to32,cc_dep1)),// unop(Iop_32to8,unop(Iop_64to32,cc_dep2)))));// }//.. if (isU32(cc_op, AMD64G_CC_OP_SUBB) && isU32(cond, X86CondNBE)) {//.. /* long sub/cmp, then NBE (unsigned greater than)//.. --> test src <=u dst *///.. /* Note, args are opposite way round from the usual *///.. return unop(Iop_1Uto32,//.. binop(Iop_CmpLT32U, //.. binop(Iop_And32,cc_dep2,mkU32(0xFF)),//.. binop(Iop_And32,cc_dep1,mkU32(0xFF))));//.. } /*---------------- LOGICL ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_LOGICL) && isU64(cond, AMD64CondZ)) { /* long and/or/xor, then Z --> test dst==0 */ return unop(Iop_1Uto64, binop(Iop_CmpEQ64, binop(Iop_Shl64,cc_dep1,mkU8(32)), mkU64(0))); } if (isU64(cc_op, AMD64G_CC_OP_LOGICL) && isU64(cond, AMD64CondNZ)) { /* long and/or/xor, then NZ --> test dst!=0 */ return unop(Iop_1Uto64, binop(Iop_CmpNE64, binop(Iop_Shl64,cc_dep1,mkU8(32)), mkU64(0))); }//.. if (isU32(cc_op, AMD64G_CC_OP_LOGICL) && isU32(cond, X86CondS)) {//.. /* long and/or/xor, then S --> test dst <s 0 *///.. return unop(Iop_1Uto32,binop(Iop_CmpLT32S, cc_dep1, mkU32(0)));//.. } if (isU64(cc_op, AMD64G_CC_OP_LOGICL) && isU64(cond, AMD64CondLE)) { /* long and/or/xor, then LE This is pretty subtle. LOGIC sets SF and ZF according to the result and makes OF be zero. LE computes (SZ ^ OF) | ZF, but OF is zero, so this reduces to SZ | ZF -- which will be 1 iff the result is <=signed 0. Hence ... */ return unop(Iop_1Uto64, binop(Iop_CmpLE64S, binop(Iop_Shl64,cc_dep1,mkU8(32)), mkU64(0))); }//.. if (isU32(cc_op, AMD64G_CC_OP_LOGICL) && isU32(cond, X86CondBE)) {//.. /* long and/or/xor, then BE//.. LOGIC sets ZF according to the result and makes CF be zero.//.. BE computes (CF | ZF), but CF is zero, so this reduces ZF //.. -- which will be 1 iff the result is zero. Hence ...//.. *///.. return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));//.. }//.. //.. /*---------------- LOGICW ----------------*///.. //.. if (isU32(cc_op, AMD64G_CC_OP_LOGICW) && isU32(cond, X86CondZ)) {//.. /* byte and/or/xor, then Z --> test dst==0 *///.. return unop(Iop_1Uto32,//.. binop(Iop_CmpEQ32, binop(Iop_And32,cc_dep1,mkU32(0xFFFF)), //.. mkU32(0)));//.. }//.. //.. /*---------------- LOGICB ----------------*///.. //.. if (isU32(cc_op, AMD64G_CC_OP_LOGICB) && isU32(cond, X86CondZ)) {//.. /* byte and/or/xor, then Z --> test dst==0 *///.. return unop(Iop_1Uto32,//.. binop(Iop_CmpEQ32, binop(Iop_And32,cc_dep1,mkU32(255)), //.. mkU32(0)));//.. }//.. /*---------------- INCB ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_INCB) && isU64(cond, AMD64CondLE)) { /* 8-bit inc, then LE --> test result <=s 0 */ return unop(Iop_1Uto64, binop(Iop_CmpLE64S, binop(Iop_Shl64,cc_dep1,mkU8(56)), mkU64(0))); } /*---------------- DECW ----------------*/ if (isU64(cc_op, AMD64G_CC_OP_DECW) && isU64(cond, AMD64CondNZ)) { /* 16-bit dec, then NZ --> test dst != 0 */ return unop(Iop_1Uto64, binop(Iop_CmpNE64, binop(Iop_Shl64,cc_dep1,mkU8(48)), mkU64(0))); }//.. /*---------------- DECL ----------------*///.. //.. if (isU32(cc_op, AMD64G_CC_OP_DECL) && isU32(cond, X86CondZ)) {//.. /* dec L, then Z --> test dst == 0 *///.. return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));//.. }//.. //.. if (isU32(cc_op, AMD64G_CC_OP_DECL) && isU32(cond, X86CondS)) {//.. /* dec L, then S --> compare DST <s 0 *///.. return unop(Iop_1Uto32,binop(Iop_CmpLT32S, cc_dep1, mkU32(0)));//.. }//.. //.. /*---------------- SHRL ----------------*///.. //.. if (isU32(cc_op, AMD64G_CC_OP_SHRL) && isU32(cond, X86CondZ)) {//.. /* SHRL, then Z --> test dep1 == 0 *///.. return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));//.. } /*---------------- COPY ----------------*/ /* This can happen, as a result of amd64 FP compares: "comisd ... ; jbe" for example. */ if (isU64(cc_op, AMD64G_CC_OP_COPY) && (isU64(cond, AMD64CondBE) || isU64(cond, AMD64CondNBE))) { /* COPY, then BE --> extract C and Z from dep1, and test (C or Z == 1). */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -