x86_emulate.c
来自「linux 内核源代码」· C语言 代码 · 共 1,663 行 · 第 1/3 页
C
1,663 行
#ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: op_bytes = 4; ad_bytes = 8; break;#endif default: return -1; } /* Legacy prefixes. */ for (i = 0; i < 8; i++) { switch (b = insn_fetch(u8, 1, _eip)) { case 0x66: /* operand-size override */ op_bytes ^= 6; /* switch between 2/4 bytes */ break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) ad_bytes ^= 12; /* switch between 4/8 bytes */ else ad_bytes ^= 6; /* switch between 2/4 bytes */ break; case 0x2e: /* CS override */ override_base = &ctxt->cs_base; break; case 0x3e: /* DS override */ override_base = &ctxt->ds_base; break; case 0x26: /* ES override */ override_base = &ctxt->es_base; break; case 0x64: /* FS override */ override_base = &ctxt->fs_base; break; case 0x65: /* GS override */ override_base = &ctxt->gs_base; break; case 0x36: /* SS override */ override_base = &ctxt->ss_base; break; case 0xf0: /* LOCK */ lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ rep_prefix = 1; break; default: goto done_prefixes; } }done_prefixes: /* REX prefix. */ if ((mode == X86EMUL_MODE_PROT64) && ((b & 0xf0) == 0x40)) { rex_prefix = b; if (b & 8) op_bytes = 8; /* REX.W */ modrm_reg = (b & 4) << 1; /* REX.R */ index_reg = (b & 2) << 2; /* REX.X */ modrm_rm = base_reg = (b & 1) << 3; /* REG.B */ b = insn_fetch(u8, 1, _eip); } /* Opcode byte(s). */ d = opcode_table[b]; if (d == 0) { /* Two-byte opcode? */ if (b == 0x0f) { twobyte = 1; b = insn_fetch(u8, 1, _eip); d = twobyte_table[b]; } /* Unrecognised? */ if (d == 0) goto cannot_emulate; } /* ModRM and SIB bytes. */ if (d & ModRM) { modrm = insn_fetch(u8, 1, _eip); modrm_mod |= (modrm & 0xc0) >> 6; modrm_reg |= (modrm & 0x38) >> 3; modrm_rm |= (modrm & 0x07); modrm_ea = 0; use_modrm_ea = 1; if (modrm_mod == 3) { modrm_val = *(unsigned long *) decode_register(modrm_rm, _regs, d & ByteOp); goto modrm_done; } if (ad_bytes == 2) { unsigned bx = _regs[VCPU_REGS_RBX]; unsigned bp = _regs[VCPU_REGS_RBP]; unsigned si = _regs[VCPU_REGS_RSI]; unsigned di = _regs[VCPU_REGS_RDI]; /* 16-bit ModR/M decode. */ switch (modrm_mod) { case 0: if (modrm_rm == 6) modrm_ea += insn_fetch(u16, 2, _eip); break; case 1: modrm_ea += insn_fetch(s8, 1, _eip); break; case 2: modrm_ea += insn_fetch(u16, 2, _eip); break; } switch (modrm_rm) { case 0: modrm_ea += bx + si; break; case 1: modrm_ea += bx + di; break; case 2: modrm_ea += bp + si; break; case 3: modrm_ea += bp + di; break; case 4: modrm_ea += si; break; case 5: modrm_ea += di; break; case 6: if (modrm_mod != 0) modrm_ea += bp; break; case 7: modrm_ea += bx; break; } if (modrm_rm == 2 || modrm_rm == 3 || (modrm_rm == 6 && modrm_mod != 0)) if (!override_base) override_base = &ctxt->ss_base; modrm_ea = (u16)modrm_ea; } else { /* 32/64-bit ModR/M decode. */ switch (modrm_rm) { case 4: case 12: sib = insn_fetch(u8, 1, _eip); index_reg |= (sib >> 3) & 7; base_reg |= sib & 7; scale = sib >> 6; switch (base_reg) { case 5: if (modrm_mod != 0) modrm_ea += _regs[base_reg]; else modrm_ea += insn_fetch(s32, 4, _eip); break; default: modrm_ea += _regs[base_reg]; } switch (index_reg) { case 4: break; default: modrm_ea += _regs[index_reg] << scale; } break; case 5: if (modrm_mod != 0) modrm_ea += _regs[modrm_rm]; else if (mode == X86EMUL_MODE_PROT64) rip_relative = 1; break; default: modrm_ea += _regs[modrm_rm]; break; } switch (modrm_mod) { case 0: if (modrm_rm == 5) modrm_ea += insn_fetch(s32, 4, _eip); break; case 1: modrm_ea += insn_fetch(s8, 1, _eip); break; case 2: modrm_ea += insn_fetch(s32, 4, _eip); break; } } if (!override_base) override_base = &ctxt->ds_base; if (mode == X86EMUL_MODE_PROT64 && override_base != &ctxt->fs_base && override_base != &ctxt->gs_base) override_base = NULL; if (override_base) modrm_ea += *override_base; if (rip_relative) { modrm_ea += _eip; switch (d & SrcMask) { case SrcImmByte: modrm_ea += 1; break; case SrcImm: if (d & ByteOp) modrm_ea += 1; else if (op_bytes == 8) modrm_ea += 4; else modrm_ea += op_bytes; } } if (ad_bytes != 8) modrm_ea = (u32)modrm_ea; cr2 = modrm_ea; modrm_done: ; } /* * Decode and fetch the source operand: register, memory * or immediate. */ switch (d & SrcMask) { case SrcNone: break; case SrcReg: src.type = OP_REG; if (d & ByteOp) { src.ptr = decode_register(modrm_reg, _regs, (rex_prefix == 0)); src.val = src.orig_val = *(u8 *) src.ptr; src.bytes = 1; } else { src.ptr = decode_register(modrm_reg, _regs, 0); switch ((src.bytes = op_bytes)) { case 2: src.val = src.orig_val = *(u16 *) src.ptr; break; case 4: src.val = src.orig_val = *(u32 *) src.ptr; break; case 8: src.val = src.orig_val = *(u64 *) src.ptr; break; } } break; case SrcMem16: src.bytes = 2; goto srcmem_common; case SrcMem32: src.bytes = 4; goto srcmem_common; case SrcMem: src.bytes = (d & ByteOp) ? 1 : op_bytes; /* Don't fetch the address for invlpg: it could be unmapped. */ if (twobyte && b == 0x01 && modrm_reg == 7) break; srcmem_common: /* * For instructions with a ModR/M byte, switch to register * access if Mod = 3. */ if ((d & ModRM) && modrm_mod == 3) { src.type = OP_REG; break; } src.type = OP_MEM; src.ptr = (unsigned long *)cr2; src.val = 0; if ((rc = ops->read_emulated((unsigned long)src.ptr, &src.val, src.bytes, ctxt->vcpu)) != 0) goto done; src.orig_val = src.val; break; case SrcImm: src.type = OP_IMM; src.ptr = (unsigned long *)_eip; src.bytes = (d & ByteOp) ? 1 : op_bytes; if (src.bytes == 8) src.bytes = 4; /* NB. Immediates are sign-extended as necessary. */ switch (src.bytes) { case 1: src.val = insn_fetch(s8, 1, _eip); break; case 2: src.val = insn_fetch(s16, 2, _eip); break; case 4: src.val = insn_fetch(s32, 4, _eip); break; } break; case SrcImmByte: src.type = OP_IMM; src.ptr = (unsigned long *)_eip; src.bytes = 1; src.val = insn_fetch(s8, 1, _eip); break; } /* Decode and fetch the destination operand: register or memory. */ switch (d & DstMask) { case ImplicitOps: /* Special instructions do their own operand decoding. */ goto special_insn; case DstReg: dst.type = OP_REG; if ((d & ByteOp) && !(twobyte && (b == 0xb6 || b == 0xb7))) { dst.ptr = decode_register(modrm_reg, _regs, (rex_prefix == 0)); dst.val = *(u8 *) dst.ptr; dst.bytes = 1; } else { dst.ptr = decode_register(modrm_reg, _regs, 0); switch ((dst.bytes = op_bytes)) { case 2: dst.val = *(u16 *)dst.ptr; break; case 4: dst.val = *(u32 *)dst.ptr; break; case 8: dst.val = *(u64 *)dst.ptr; break; } } break; case DstMem: dst.type = OP_MEM; dst.ptr = (unsigned long *)cr2; dst.bytes = (d & ByteOp) ? 1 : op_bytes; dst.val = 0; /* * For instructions with a ModR/M byte, switch to register * access if Mod = 3. */ if ((d & ModRM) && modrm_mod == 3) { dst.type = OP_REG; break; } if (d & BitOp) { unsigned long mask = ~(dst.bytes * 8 - 1); dst.ptr = (void *)dst.ptr + (src.val & mask) / 8; } if (!(d & Mov) && /* optimisation - avoid slow emulated read */ ((rc = ops->read_emulated((unsigned long)dst.ptr, &dst.val, dst.bytes, ctxt->vcpu)) != 0)) goto done; break; } dst.orig_val = dst.val; if (twobyte) goto twobyte_insn; switch (b) { case 0x00 ... 0x05: add: /* add */ emulate_2op_SrcV("add", src, dst, _eflags); break; case 0x08 ... 0x0d: or: /* or */ emulate_2op_SrcV("or", src, dst, _eflags); break; case 0x10 ... 0x15: adc: /* adc */ emulate_2op_SrcV("adc", src, dst, _eflags); break; case 0x18 ... 0x1d: sbb: /* sbb */ emulate_2op_SrcV("sbb", src, dst, _eflags); break; case 0x20 ... 0x23: and: /* and */ emulate_2op_SrcV("and", src, dst, _eflags); break; case 0x24: /* and al imm8 */ dst.type = OP_REG; dst.ptr = &_regs[VCPU_REGS_RAX]; dst.val = *(u8 *)dst.ptr; dst.bytes = 1; dst.orig_val = dst.val; goto and; case 0x25: /* and ax imm16, or eax imm32 */ dst.type = OP_REG; dst.bytes = op_bytes; dst.ptr = &_regs[VCPU_REGS_RAX]; if (op_bytes == 2) dst.val = *(u16 *)dst.ptr; else dst.val = *(u32 *)dst.ptr; dst.orig_val = dst.val; goto and; case 0x28 ... 0x2d: sub: /* sub */ emulate_2op_SrcV("sub", src, dst, _eflags); break; case 0x30 ... 0x35: xor: /* xor */ emulate_2op_SrcV("xor", src, dst, _eflags); break; case 0x38 ... 0x3d: cmp: /* cmp */ emulate_2op_SrcV("cmp", src, dst, _eflags); break; case 0x63: /* movsxd */ if (mode != X86EMUL_MODE_PROT64) goto cannot_emulate; dst.val = (s32) src.val; break; case 0x80 ... 0x83: /* Grp1 */ switch (modrm_reg) { case 0: goto add; case 1: goto or; case 2: goto adc; case 3: goto sbb; case 4: goto and; case 5: goto sub; case 6: goto xor; case 7: goto cmp; } break; case 0x84 ... 0x85: test: /* test */ emulate_2op_SrcV("test", src, dst, _eflags); break; case 0x86 ... 0x87: /* xchg */ /* Write back the register source. */ switch (dst.bytes) { case 1: *(u8 *) src.ptr = (u8) dst.val; break; case 2: *(u16 *) src.ptr = (u16) dst.val; break; case 4: *src.ptr = (u32) dst.val; break; /* 64b reg: zero-extend */ case 8: *src.ptr = dst.val; break; } /* * Write back the memory destination with implicit LOCK * prefix. */ dst.val = src.val; lock_prefix = 1; break; case 0x88 ... 0x8b: /* mov */ goto mov; case 0x8d: /* lea r16/r32, m */ dst.val = modrm_val; break; case 0x8f: /* pop (sole member of Grp1a) */ /* 64-bit mode: POP always pops a 64-bit operand. */ if (mode == X86EMUL_MODE_PROT64) dst.bytes = 8; if ((rc = ops->read_std(register_address(ctxt->ss_base, _regs[VCPU_REGS_RSP]), &dst.val, dst.bytes, ctxt->vcpu)) != 0) goto done; register_address_increment(_regs[VCPU_REGS_RSP], dst.bytes); break; case 0xa0 ... 0xa1: /* mov */ dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX]; dst.val = src.val; _eip += ad_bytes; /* skip src displacement */ break; case 0xa2 ... 0xa3: /* mov */ dst.val = (unsigned long)_regs[VCPU_REGS_RAX]; _eip += ad_bytes; /* skip dst displacement */ break; case 0xc0 ... 0xc1: grp2: /* Grp2 */ switch (modrm_reg) { case 0: /* rol */ emulate_2op_SrcB("rol", src, dst, _eflags); break; case 1: /* ror */ emulate_2op_SrcB("ror", src, dst, _eflags); break; case 2: /* rcl */ emulate_2op_SrcB("rcl", src, dst, _eflags); break; case 3: /* rcr */ emulate_2op_SrcB("rcr", src, dst, _eflags); break; case 4: /* sal/shl */ case 6: /* sal/shl */ emulate_2op_SrcB("sal", src, dst, _eflags); break; case 5: /* shr */ emulate_2op_SrcB("shr", src, dst, _eflags); break; case 7: /* sar */ emulate_2op_SrcB("sar", src, dst, _eflags); break; } break; case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */ mov: dst.val = src.val; break; case 0xd0 ... 0xd1: /* Grp2 */ src.val = 1; goto grp2; case 0xd2 ... 0xd3: /* Grp2 */ src.val = _regs[VCPU_REGS_RCX]; goto grp2; case 0xf6 ... 0xf7: /* Grp3 */ switch (modrm_reg) { case 0 ... 1: /* test */ /* * Special case in Grp3: test has an immediate * source operand. */ src.type = OP_IMM; src.ptr = (unsigned long *)_eip; src.bytes = (d & ByteOp) ? 1 : op_bytes; if (src.bytes == 8) src.bytes = 4; switch (src.bytes) { case 1: src.val = insn_fetch(s8, 1, _eip); break; case 2: src.val = insn_fetch(s16, 2, _eip); break; case 4: src.val = insn_fetch(s32, 4, _eip);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?