📄 macro-assembler-ia32.cc.svn-base
字号:
// expectation. if (f->nargs >= 0 && f->nargs != num_arguments) { IllegalOperation(); return; } Runtime::FunctionId function_id = static_cast<Runtime::FunctionId>(f->stub_id); RuntimeStub stub(function_id, num_arguments); CallStub(&stub);}void MacroAssembler::TailCallRuntime(const ExternalReference& ext, int num_arguments) { // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we // should remove this need and make the runtime routine entry code // smarter. mov(Operand(eax), Immediate(num_arguments)); JumpToBuiltin(ext);}void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) { // Set the entry point and jump to the C entry runtime stub. mov(Operand(ebx), Immediate(ext)); CEntryStub ces; jmp(ces.GetCode(), RelocInfo::CODE_TARGET);}void MacroAssembler::InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, Handle<Code> code_constant, const Operand& code_operand, Label* done, InvokeFlag flag) { bool definitely_matches = false; Label invoke; if (expected.is_immediate()) { ASSERT(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { mov(eax, actual.immediate()); const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; if (expected.immediate() == sentinel) { // Don't worry about adapting arguments for builtins that // don't want that done. Skip adaption code by making it look // like we have a match between expected and actual number of // arguments. definitely_matches = true; } else { mov(ebx, expected.immediate()); } } } else { if (actual.is_immediate()) { // Expected is in register, actual is immediate. This is the // case when we invoke function values without going through the // IC mechanism. cmp(expected.reg(), actual.immediate()); j(equal, &invoke); ASSERT(expected.reg().is(ebx)); mov(eax, actual.immediate()); } else if (!expected.reg().is(actual.reg())) { // Both expected and actual are in (different) registers. This // is the case when we invoke functions using call and apply. cmp(expected.reg(), Operand(actual.reg())); j(equal, &invoke); ASSERT(actual.reg().is(eax)); ASSERT(expected.reg().is(ebx)); } } if (!definitely_matches) { Handle<Code> adaptor = Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); if (!code_constant.is_null()) { mov(Operand(edx), Immediate(code_constant)); add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); } else if (!code_operand.is_reg(edx)) { mov(edx, code_operand); } if (flag == CALL_FUNCTION) { call(adaptor, RelocInfo::CODE_TARGET); jmp(done); } else { jmp(adaptor, RelocInfo::CODE_TARGET); } bind(&invoke); }}void MacroAssembler::InvokeCode(const Operand& code, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag) { Label done; InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); if (flag == CALL_FUNCTION) { call(code); } else { ASSERT(flag == JUMP_FUNCTION); jmp(code); } bind(&done);}void MacroAssembler::InvokeCode(Handle<Code> code, const ParameterCount& expected, const ParameterCount& actual, RelocInfo::Mode rmode, InvokeFlag flag) { Label done; Operand dummy(eax); InvokePrologue(expected, actual, code, dummy, &done, flag); if (flag == CALL_FUNCTION) { call(code, rmode); } else { ASSERT(flag == JUMP_FUNCTION); jmp(code, rmode); } bind(&done);}void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual, InvokeFlag flag) { ASSERT(fun.is(edi)); mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); lea(edx, FieldOperand(edx, Code::kHeaderSize)); ParameterCount expected(ebx); InvokeCode(Operand(edx), expected, actual, flag);}void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { bool resolved; Handle<Code> code = ResolveBuiltin(id, &resolved); // Calls are not allowed in some stubs. ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); // Rely on the assertion to check that the number of provided // arguments match the expected number of arguments. Fake a // parameter count to avoid emitting code to do the check. ParameterCount expected(0); InvokeCode(Handle<Code>(code), expected, expected, RelocInfo::CODE_TARGET, flag); const char* name = Builtins::GetName(id); int argc = Builtins::GetArgumentsCount(id); if (!resolved) { uint32_t flags = Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | Bootstrapper::FixupFlagsIsPCRelative::encode(true); Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name }; unresolved_.Add(entry); }}void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { bool resolved; Handle<Code> code = ResolveBuiltin(id, &resolved); const char* name = Builtins::GetName(id); int argc = Builtins::GetArgumentsCount(id); mov(Operand(target), Immediate(code)); if (!resolved) { uint32_t flags = Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | Bootstrapper::FixupFlagsIsPCRelative::encode(false); Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name }; unresolved_.Add(entry); } add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));}Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id, bool* resolved) { // Move the builtin function into the temporary function slot by // reading it from the builtins object. NOTE: We should be able to // reduce this to two instructions by putting the function table in // the global object instead of the "builtins" object and by using a // real register for the function. mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); mov(edx, FieldOperand(edx, GlobalObject::kBuiltinsOffset)); int builtins_offset = JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize); mov(edi, FieldOperand(edx, builtins_offset)); return Builtins::GetCode(id, resolved);}void MacroAssembler::Ret() { ret(0);}void MacroAssembler::SetCounter(StatsCounter* counter, int value) { if (FLAG_native_code_counters && counter->Enabled()) { mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value)); }}void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { ASSERT(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { Operand operand = Operand::StaticVariable(ExternalReference(counter)); if (value == 1) { inc(operand); } else { add(operand, Immediate(value)); } }}void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { ASSERT(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { Operand operand = Operand::StaticVariable(ExternalReference(counter)); if (value == 1) { dec(operand); } else { sub(operand, Immediate(value)); } }}void MacroAssembler::Assert(Condition cc, const char* msg) { if (FLAG_debug_code) Check(cc, msg);}void MacroAssembler::Check(Condition cc, const char* msg) { Label L; j(cc, &L, taken); Abort(msg); // will not return here bind(&L);}void MacroAssembler::Abort(const char* msg) { // We want to pass the msg string like a smi to avoid GC // problems, however msg is not guaranteed to be aligned // properly. Instead, we pass an aligned pointer that is // a proper v8 smi, but also pass the aligment difference // from the real pointer as a smi. intptr_t p1 = reinterpret_cast<intptr_t>(msg); intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());#ifdef DEBUG if (msg != NULL) { RecordComment("Abort message: "); RecordComment(msg); }#endif push(eax); push(Immediate(p0)); push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)))); CallRuntime(Runtime::kAbort, 2); // will not return here}CodePatcher::CodePatcher(byte* address, int size) : address_(address), size_(size), masm_(address, size + Assembler::kGap) { // Create a new macro assembler pointing to the assress of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);}CodePatcher::~CodePatcher() { // Indicate that code has changed. CPU::FlushICache(address_, size_); // Check that the code was patched as expected. ASSERT(masm_.pc_ == address_ + size_); ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);}} } // namespace v8::internal
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -