📄 xlcore.c
字号:
/* movzwl %cx,%ecx */ bb_emit_byte(M, 0x0Fu); // 0F B7 /R bb_emit_byte(M, 0xB7u); bb_emit_byte(M, 0xC9u); // 11 001 001 /* lea M->hash_table(,%ecx,2),%ecx */ bb_emit_byte(M, 0x8Du); // 8D /r bb_emit_byte(M, 0x0Cu); // 00 001 100 bb_emit_byte(M, 0x4du); // 01 001 101 bb_emit_w32(M, (unsigned long)M->hash_table);#endif //50: ff e1 jmp *%ecx bb_emit_byte(M, 0xffu); bb_emit_byte(M, 0xe1u); /* /\* jmp *M->hash_table(,%ecx,4) *\/ */ /* bb_emit_byte(M, 0xFFu); // FF /4 */ /* bb_emit_byte(M, 0x24u); // 00 100 100 */ /* bb_emit_byte(M, 0x8Du); // 10 001 101 */ /* bb_emit_w32(M, (unsigned long) M->hash_table); */}#ifdef SIEVE_WITHOUT_PPFINLINE voidbb_setup_fast_dispatch_bb(machine_t *M){ /* Emit the special BB that transfers control from one basic-block to another within the basic block cache. */ /* push %ecx */ bb_emit_byte(M, 0x51u); /* mov 0x4(%esp),%ecx */ bb_emit_byte(M, 0x8bu); // 8b /r bb_emit_byte(M, 0x4cu); // 01 001 100 bb_emit_byte(M, 0x24u); // 00 100 100 bb_emit_byte(M, 0x4u);#ifndef SMALL_HASH /* lea 0x0(,%ecx,2),%ecx */ bb_emit_byte(M, 0x8Du); // 8D /r bb_emit_byte(M, 0x0Cu); // 00 001 100 bb_emit_byte(M, 0x4du); // 01 001 101 bb_emit_w32(M, 0x0u); // This 0 word is needed. // There is no other addressing mode. /* movzwl %cx,%ecx */ bb_emit_byte(M, 0x0Fu); // 0F B7 /R bb_emit_byte(M, 0xB7u); bb_emit_byte(M, 0xC9u); // 11 001 001 /* lea M->hash_table(,%ecx,4),%ecx */ bb_emit_byte(M, 0x8Du); // 8D /r bb_emit_byte(M, 0x0Cu); // 00 001 100 bb_emit_byte(M, 0x8du); // 10 001 101 bb_emit_w32(M, (unsigned long)M->hash_table);#else /* lea 0x0(,%ecx,4),%ecx */ bb_emit_byte(M, 0x8Du); // 8D /r bb_emit_byte(M, 0x0Cu); // 00 001 100 bb_emit_byte(M, 0x8du); // 10 001 101 bb_emit_w32(M, 0x0u); // This 0 word is needed. // There is no other addressing mode. /* movzwl %cx,%ecx */ bb_emit_byte(M, 0x0Fu); // 0F B7 /R bb_emit_byte(M, 0xB7u); bb_emit_byte(M, 0xC9u); // 11 001 001 /* lea M->hash_table(,%ecx,2),%ecx */ bb_emit_byte(M, 0x8Du); // 8D /r bb_emit_byte(M, 0x0Cu); // 00 001 100 bb_emit_byte(M, 0x4du); // 01 001 101 bb_emit_w32(M, (unsigned long)M->hash_table);#endif //50: ff e1 jmp *%ecx bb_emit_byte(M, 0xffu); bb_emit_byte(M, 0xe1u); /* /\* jmp *M->hash_table(,%ecx,4) *\/ */ /* bb_emit_byte(M, 0xFFu); // FF /4 */ /* bb_emit_byte(M, 0x24u); // 00 100 100 */ /* bb_emit_byte(M, 0x8Du); // 10 001 101 */ /* bb_emit_w32(M, (unsigned long) M->hash_table); */}#elseINLINE voidbb_setup_fast_dispatch_bb(machine_t *M){ /* Emit the special BB that transfers control from one basic-block to another within the basic block cache. */ /* 0. PUSHF */ bb_emit_byte (M, 0x9Cu); /* Pushl 4(%esp) */ bb_emit_byte(M, 0xFFu); // FF /6 bb_emit_byte(M, 0x74u); // 01 110 100 bb_emit_byte(M, 0x24u); // 00 100 100 bb_emit_byte(M, 0x4u); /* andl $(SIEVE_HASH_MASK), (%esp) */ bb_emit_byte(M, 0x81u); // 81/4 bb_emit_byte(M, 0x24u); // 00 100 100 bb_emit_byte(M, 0x24u); // 00 100 100 bb_emit_w32(M, SIEVE_HASH_MASK); /* add $(M->hash_table), (%esp) */ bb_emit_byte(M, 0x81u); // 81/0 bb_emit_byte(M, 0x04u); // 00 000 100 bb_emit_byte(M, 0x24u); // 00 100 100 bb_emit_w32(M, (unsigned long) M->hash_table); /* ret */ bb_emit_byte(M, 0xC3u);}#endif /* SIEVE_WITHOUT_PPF */#endif /* USE_SIEVE */#ifdef PROFILE_BB_STATS #define SPECIAL_BB(bb) do { \ curr_bb_entry = make_bb_entry(M, 0, (unsigned long) M->bbOut, (unsigned long) COLD_PROC_ENTRY); \ curr_bb_entry->flags = IS_HAND_CONSTRUCTED | IS_START_OF_TRACE | IS_END_OF_TRACE | NEEDS_RELOC; \ bb_setup_##bb(M); \ curr_bb_entry->src_bb_end_eip = 0; \ curr_bb_entry->trans_bb_end_eip = (unsigned long)M->bbOut; \} while(0)#else#define SPECIAL_BB(bb) do { \ bb_setup_##bb(M); \} while(0)#endif static voidbb_cache_init(machine_t *M){#ifdef PROFILE_BB_STATS bb_entry *curr_bb_entry;#endif int i; M->bbOut = M->bbCache; M->bbLimit = M->bbCache + BBCACHE_SIZE;#ifdef USE_SIEVE#ifdef SEPARATE_SIEVES M->slow_dispatch_bb = M->bbOut + (NBUCKETS * sizeof(bucket_entry)) + (CNBUCKETS * sizeof(bucket_entry));#else M->slow_dispatch_bb = M->bbOut + (NBUCKETS * sizeof(bucket_entry));#endif #ifdef SEPARATE_SIEVES /*** WARNING: sensitive to size of SLOW_DISPATCH_BB ***/ M->cslow_dispatch_bb = M->slow_dispatch_bb + 59;#endif /* Set up Sieve */ M->hash_table = M->bbOut; bb_setup_hash_table(M);#ifdef SEPARATE_SIEVES /* Set up Call Sieve */ M->chash_table = M->bbOut; bb_setup_chash_table(M);#endif#else /* USE_SIEVE */ M->slow_dispatch_bb = M->bbOut;#endif /* USE_SIEVE */ /* Set up BB-Directory */ M->no_of_bbs = 0; for (i=0 ; i<LOOKUP_TABLE_SIZE ; i++) M->lookup_table[i] = NULL; SPECIAL_BB(slow_dispatch_bb); #ifdef SEPARATE_SIEVES SPECIAL_BB(cslow_dispatch_bb);#endif M->backpatch_and_dispatch_bb = M->bbOut; SPECIAL_BB(backpatch_and_dispatch_bb); #ifdef USE_SIEVE M->fast_dispatch_bb = M->bbOut; SPECIAL_BB(fast_dispatch_bb); #ifdef SEPARATE_SIEVES M->cfast_dispatch_bb = M->bbOut; SPECIAL_BB(cfast_dispatch_bb);#endif /* SEPARATE_SIEVES */ #ifdef PROFILE_RET_MISS M->call_calls_fast_dispatch_bb = M->bbOut; SPECIAL_BB(call_calls_fast_dispatch_bb); M->ret_calls_fast_dispatch_bb = M->bbOut; SPECIAL_BB(ret_calls_fast_dispatch_bb);#else /* PROFILE_RET_MISS */#ifdef SIEVE_WITHOUT_PPF M->call_calls_fast_dispatch_bb = M->fast_dispatch_bb + 1; // Past the push %ecx#else M->call_calls_fast_dispatch_bb = M->fast_dispatch_bb; #endif M->ret_calls_fast_dispatch_bb = M->fast_dispatch_bb;#endif /* PROFILE_RET_MISS */#else /* USE_SIEVE */ M->call_calls_fast_dispatch_bb = M->slow_dispatch_bb; M->ret_calls_fast_dispatch_bb = M->slow_dispatch_bb; M->fast_dispatch_bb = M->slow_dispatch_bb;#endif /* USE_SIEVE */ M->startup_slow_dispatch_bb = M->bbOut; SPECIAL_BB(startup_slow_dispatch_bb); M->bbCache_main = M->bbOut; for(i=0; i<CALL_TABLE_SIZE; i++) M->call_hash_table[i] = (unsigned long) M->ret_calls_fast_dispatch_bb;}static voidbb_cache_reinit(machine_t *M){ int i;#ifdef PROFILE_BB_STATS bb_cache_init(M); return;#endif M->bbOut = M->bbCache;#ifdef USE_SIEVE bb_setup_hash_table(M);#ifdef SEPARATE_SIEVES bb_setup_chash_table(M);#endif#endif /* USE_SIEVE */ /* Set up BB-Directory */ M->no_of_bbs = 0; for (i=0 ; i<LOOKUP_TABLE_SIZE ; i++) M->lookup_table[i] = NULL; M->bbOut = M->bbCache_main; for(i=0; i<CALL_TABLE_SIZE; i++) M->call_hash_table[i] = (unsigned long) M->ret_calls_fast_dispatch_bb;}INLINE bool translate_instr(machine_t *M, decode_t *ds){#ifdef INLINE_EMITTERS if ((void *)ds->emitfn == (void *)emit_normal){ DEBUG(inline_emits) fprintf(DBG, "Tanslation Path - inline_emit_normal\n"); return inline_emit_normal(M, ds); } else #endif /* INLINE_EMITTERS */ { DEBUG(inline_emits) fprintf(DBG, "Tanslation Path - NOT inline_emit_normal\n"); return (ds->emitfn)(M, ds); }}static voidpanic_decode_fail(unsigned long eip, unsigned long bAddr){ panic("Illegal Instruction -- Decode had failed! eip = %lx byte = %lx\n", eip, bAddr); }#ifdef STATIC_PASS static bool update_mem_next_eip(machine_t *M){ int i; i = M->curr_sec_index; bool found = false; /* Usually in the same section */ if((M->sec_info[i].start <= M->next_eip) && (M->sec_info[i].end >= M->next_eip)) { found = true; } else { /* If not, find it the hard way */ for(i=0;(!found) && (i<M->nsections); i++) if((M->sec_info[i].start <= M->next_eip) && (M->sec_info[i].end >= M->next_eip)) { found = true; break; } } if(found) { DEBUG(static_pass_addr_trans) fprintf(DBG, "\t#%lx: [%s]\n",M->next_eip, M->sec_info[i].name); M->mem_next_eip = (unsigned long) (M->sec_info[i].inmem + (M->next_eip - (unsigned long)M->sec_info[i].start)); M->curr_sec_index = i; } else { DEBUG(static_pass_addr_trans) fprintf(DBG, "\t@%lx: Not Found\n",M->next_eip); } fflush(DBG); return !found;}static voidsimple_patch(machine_t *M, unsigned long at, unsigned long addr){ unsigned char *tmp = M->bbOut; M->bbOut = (unsigned char *)at; bb_emit_w32(M, addr - (at + 4)); M->bbOut = tmp;}#endif #define PATCH_BLOCK_LEN 13 /* WARNING: Sensitive to size of Patch-block */#define MAX_PATCH_BLOCK_BYTES (PATCH_ARRAY_LEN * PATCH_BLOCK_LEN)/* Conservative estimate used to determine if there is more room for this BB. Space needed for all patch_blocks + space for at least one instruction I guess no emitted sequence of instructions per single instruction currently exceeds 64 bytes. If it does, fix the next line */#define BYTES_NEEDED_AT_THE_END (MAX_PATCH_BLOCK_BYTES + 64)#define ROOM_FOR_BB(M) ((M->bbLimit - M->bbOut) > BYTES_NEEDED_AT_THE_END)#define MORE_FREE_PATCH_BLOCKS(M) (M->patch_count <= (PATCH_ARRAY_LEN - 4)) /* Leave some extra room for cases like call that need multiple patch_blocks *//* THE Translator -- Returns: - a pointer to the bb_entry of the required destination - M->jmp_target holds the bb address of the destunation (There is no peculiar reson to have this convention, it is just a hack to avoid emitting code to get the jmp destination from the bb_entry. The translator has to be locked prior to the backpatcher anyway, so this does not present any new problems for locking */bb_entry * xlate_bb(machine_t *M){ decode_t ds; /* Decode Structure - filled up by do_decode and used by translate_instr */ bool isEndOfBB = true; /* Flag to indicate encountering of basic-block terminating instruction */ bool goingOutofElf = true; int i, j; unsigned char * tmp; bb_entry *prev_bb_entry = NULL; bb_entry *curr_bb_entry = lookup_bb_eip(M, M->fixregs.eip), *temp_entry; unsigned long long start_time; unsigned long long end_time;#ifdef PROFILE_BB_CNT bool inc_emitted = false;#endif //if(M->trigger) { // fprintf(DBG, "Enter xlate_bb: %lx\n", M->fixregs.eip); // fflush(DBG); // M->trigger = false; //} /* If the required bb is already found, just return */ if((curr_bb_entry != NULL) && (curr_bb_entry->trans_bb_eip != NOT_YET_TRANSLATED)) { DEBUG(xlate) { fprintf(DBG, "Entering New translation in Proc %lx\n", curr_bb_entry->proc_entry); fflush(DBG); } M->jmp_target = (unsigned char *)curr_bb_entry->trans_bb_eip; return curr_bb_entry; } #ifdef PROFILE_TRANSLATION start_time = read_timer();#endif if(curr_bb_entry == NULL) { if (M->comming_from_call_indirect) { curr_bb_entry = make_bb_entry(M, M->fixregs.eip, (unsigned long) M->bbOut, (unsigned long) CALL_HASH_BUCKET(M->call_hash_table, M->fixregs.eip)); } else { curr_bb_entry = make_bb_entry(M, M->fixregs.eip, (unsigned long) M->bbOut, (unsigned long) COLD_PROC_ENTRY); DEBUG(xlate) { fprintf(DBG, "Entering New translation $#COLD$# on %lx::%lx\n",M->fixregs.eip, curr_bb_entry->trans_bb_eip); fflush(DBG); } } } else { curr_bb_entry->trans_bb_eip = (unsigned long)M->bbOut; } #ifdef DEBUG_ON M->nTrInstr = 0;#endif //fprintf(DBG, "Enter xlate_bb: %lx\n", M->fixregs.eip); //fflush(DBG); /* Did not find the translated Basic Block, So need to Translate */ M->comming_from_call_indirect = false; M->curr_bb_entry = curr_bb_entry;#if 0 struct bb_mapping_entry { unsigned short src_eip_offset; unsigned short bb_eip_offset; } bb_mappings[MAX_TRACE_INSTRS]; /* src_eip_offset to bb_eip_offset mappings array that will be filled up after translating each instruction */#endif M->jmp_target = M->bbOut; M->next_eip = M->fixregs.eip; M->patch_count = 0; /* If basic block cache is full, wipe it and start over. */ /* This is not done inside the loop on purpose. Dont wipe the BBCache Unless utterly necessary. We translate as far as possible and return with the hope that the guest executes upto completion without need for for translation */ if ((!ROOM_FOR_BB(M)) || (M->no_of_bbs >= MAX_BBS)) {#ifdef SIGNALS sigset_t oldSet; sigprocmask(SIG_SETMASK, &allSignals, &oldSet); #endif DEBUG(xlate) { fprintf(DBG, "Wiping basic block cache\n"); } bb_cache_reinit(M);#ifdef SIGNALS sigprocmask(SIG_SETMASK, &allSignals, &oldSet); #endif return xlate_bb(M); }#ifdef PROFILE_BB_STATS bb_entry *this_bb_entry = M->curr_bb_entry; this_bb_entry->flags = IS_START_OF_TRACE; this_bb_entry->src_bb_end_eip = M->next_eip; this_bb_entry->trans_bb_end_eip = (unsigned long) M->bbOut;#endif /* This loop executes once per instruction */ while (ROOM_FOR_BB(M) && MORE_FREE_PATCH_BLOCKS(M)) { /*If it is necessary to limit the trace length (I don't know why) use : M->nTrInstr < MAX_TRACE_INSTRS */ /* See if the instruction needs to be decoded */#ifdef STATIC_PASS goingOutofElf = update_mem_next_eip(M); /* Jump to a library etc, outside of the current elf file */ if (goingOutofElf) break;#endif /* Decode the Instruction */ if (do_decode(M, &ds) == false) { DEBUG(decode_eager_panic) panic ("do_decode failed at instr: %lx byte: %lx\n", ds.decode_eip, ds.pInstr); // Emit a call to panic ... bb_emit_byte(M, 0x68u); bb_emit_w32(M, (unsigned long) ds.pInstr); bb_emit_byte(M, 0x68u); bb_emit_w32(M, (unsigned long) M->next_eip); bb_emit_call(M, (unsigned char *) panic_decode_fail); break; } DEBUG(show_each_instr_trans) { unsigned long bbno = (M->curr_bb_entry - M->bb_entry_nodes); fprintf(DBG, "bb# %lu, ", bbno); do_disasm(&ds, DBG); fflush(DBG); } #ifdef PROFILE_BB_CNT if(prev_bb_entry != M->curr_bb_entry) { prev_bb_entry = M->curr_bb_entry; inc_emitted = false; } if(!inc_emitted) { OpCode *p = (OpCode *) ds.pEntry; if((SOURCES_FLAGS(p) == 0) && (MODIFIES_OSZAPF(p))) { bb_emit_lw_inc(M, (unsigned long)&(M->ptState->bb_cnt)); inc_emitted = true; } else if(p->attr & DF_BRANCH) { // BB is about to end bb_emit_inc(M, (unsigned long)&(M->ptState->bb_cnt)); inc_emitted = true; } }#endif unsigned char *begin_bbout = M->bbOut;#ifdef DEBUG_ON M->nTrInstr++;#endif#ifdef PROFILE_BB_STATS this_bb_entry->nInstr++; /* Note: this_bb_entry->src_bb_end_eip MUST be updated before emitting. This is because, some emitters do change M->next_eip */ this_bb_entry->src_bb_end_eip = M->next_eip;#endif /* Emit the Instruction using the appropriate emitter */ isEndOfBB = translate_instr(M, &ds); DEBUG(show_each_trans_instr) { unsigned long saved_Meip = M->next_eip; M->next_eip = (unsigned long)begin_bbout; while(M->next_eip < (unsigned long)M->bbOut) { decode_t dd; do_decode(M, &dd); do_disasm(&dd, DBG); fflush(DBG); } M->next_eip = saved_Meip; fprintf(DBG, "\n"); } #ifdef PROFILE_BB_STATS /* Note: this_bb_entry->trans_bb_end_eip MUST be updated after emitting, for obvious reasons. */ this_bb_entry->trans_bb_end_eip = (unsigned long) M->bbOut;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -