📄 macro-assembler-arm.cc.svn-base
字号:
// Copyright 2006-2008 the V8 project authors. All rights reserved.// Redistribution and use in source and binary forms, with or without// modification, are permitted provided that the following conditions are// met://// * Redistributions of source code must retain the above copyright// notice, this list of conditions and the following disclaimer.// * Redistributions in binary form must reproduce the above// copyright notice, this list of conditions and the following// disclaimer in the documentation and/or other materials provided// with the distribution.// * Neither the name of Google Inc. nor the names of its// contributors may be used to endorse or promote products derived// from this software without specific prior written permission.//// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.#include "v8.h"#include "bootstrapper.h"#include "codegen-inl.h"#include "debug.h"#include "runtime.h"namespace v8 { namespace internal {// Give alias names to registersRegister cp = { 8 }; // JavaScript context pointerRegister pp = { 10 }; // parameter pointerMacroAssembler::MacroAssembler(void* buffer, int size) : Assembler(buffer, size), unresolved_(0), generating_stub_(false), allow_stub_calls_(true) {}// We always generate arm code, never thumb code, even if V8 is compiled to// thumb, so we require inter-working support#if defined(__thumb__) && !defined(__THUMB_INTERWORK__)#error "flag -mthumb-interwork missing"#endif// We do not support thumb inter-working with an arm architecture not supporting// the blx instruction (below v5t)#if defined(__THUMB_INTERWORK__)#if !defined(__ARM_ARCH_5T__) && !defined(__ARM_ARCH_5TE__)// add tests for other versions above v5t as required#error "for thumb inter-working we require architecture v5t or above"#endif#endif// Using blx may yield better code, so use it when required or when available#if defined(__THUMB_INTERWORK__) || defined(__ARM_ARCH_5__)#define USE_BLX 1#endif// Using bx does not yield better code, so use it only when required#if defined(__THUMB_INTERWORK__)#define USE_BX 1#endifvoid MacroAssembler::Jump(Register target, Condition cond) {#if USE_BX bx(target, cond);#else mov(pc, Operand(target), LeaveCC, cond);#endif}void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond) {#if USE_BX mov(ip, Operand(target, rmode), LeaveCC, cond); bx(ip, cond);#else mov(pc, Operand(target, rmode), LeaveCC, cond);#endif}void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode, Condition cond) { ASSERT(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast<intptr_t>(target), rmode, cond);}void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond) { ASSERT(RelocInfo::IsCodeTarget(rmode)); // 'code' is always generated ARM code, never THUMB code Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);}void MacroAssembler::Call(Register target, Condition cond) {#if USE_BLX blx(target, cond);#else // set lr for return at current pc + 8 mov(lr, Operand(pc), LeaveCC, cond); mov(pc, Operand(target), LeaveCC, cond);#endif}void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, Condition cond) {#if !defined(__arm__) if (rmode == RelocInfo::RUNTIME_ENTRY) { mov(r2, Operand(target, rmode), LeaveCC, cond); // Set lr for return at current pc + 8. mov(lr, Operand(pc), LeaveCC, cond); // Emit a ldr<cond> pc, [pc + offset of target in constant pool]. // Notify the simulator of the transition to C code. swi(assembler::arm::call_rt_r2); } else { // set lr for return at current pc + 8 mov(lr, Operand(pc), LeaveCC, cond); // emit a ldr<cond> pc, [pc + offset of target in constant pool] mov(pc, Operand(target, rmode), LeaveCC, cond); }#else // Set lr for return at current pc + 8. mov(lr, Operand(pc), LeaveCC, cond); // Emit a ldr<cond> pc, [pc + offset of target in constant pool]. mov(pc, Operand(target, rmode), LeaveCC, cond);#endif // !defined(__arm__) // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a // 'blx ip'; however, the code would not be shorter than the above sequence // and the target address of the call would be referenced by the first // instruction rather than the second one, which would make it harder to patch // (two instructions before the return address, instead of one). ASSERT(kTargetAddrToReturnAddrDist == sizeof(Instr));}void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, Condition cond) { ASSERT(!RelocInfo::IsCodeTarget(rmode)); Call(reinterpret_cast<intptr_t>(target), rmode, cond);}void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond) { ASSERT(RelocInfo::IsCodeTarget(rmode)); // 'code' is always generated ARM code, never THUMB code Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);}void MacroAssembler::Ret() {#if USE_BX bx(lr);#else mov(pc, Operand(lr));#endif}// Will clobber 4 registers: object, offset, scratch, ip. The// register 'object' contains a heap object pointer. The heap object// tag is shifted away.void MacroAssembler::RecordWrite(Register object, Register offset, Register scratch) { // This is how much we shift the remembered set bit offset to get the // offset of the word in the remembered set. We divide by kBitsPerInt (32, // shift right 5) and then multiply by kIntSize (4, shift left 2). const int kRSetWordShift = 3; Label fast, done; // First, test that the object is not in the new space. We cannot set // remembered set bits in the new space. // object: heap object pointer (with tag) // offset: offset to store location from the object and_(scratch, object, Operand(Heap::NewSpaceMask())); cmp(scratch, Operand(ExternalReference::new_space_start())); b(eq, &done); // Compute the bit offset in the remembered set. // object: heap object pointer (with tag) // offset: offset to store location from the object mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once and_(scratch, object, Operand(ip)); // offset into page of the object add(offset, scratch, Operand(offset)); // add offset into the object mov(offset, Operand(offset, LSR, kObjectAlignmentBits)); // Compute the page address from the heap object pointer. // object: heap object pointer (with tag) // offset: bit offset of store position in the remembered set bic(object, object, Operand(ip)); // If the bit offset lies beyond the normal remembered set range, it is in // the extra remembered set area of a large object. // object: page start // offset: bit offset of store position in the remembered set cmp(offset, Operand(Page::kPageSize / kPointerSize)); b(lt, &fast); // Adjust the bit offset to be relative to the start of the extra // remembered set and the start address to be the address of the extra // remembered set. sub(offset, offset, Operand(Page::kPageSize / kPointerSize)); // Load the array length into 'scratch' and multiply by four to get the // size in bytes of the elements. ldr(scratch, MemOperand(object, Page::kObjectStartOffset + FixedArray::kLengthOffset)); mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits)); // Add the page header (including remembered set), array header, and array // body size to the page address. add(object, object, Operand(Page::kObjectStartOffset + Array::kHeaderSize)); add(object, object, Operand(scratch)); bind(&fast); // Get address of the rset word. // object: start of the remembered set (page start for the fast case) // offset: bit offset of store position in the remembered set bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset add(object, object, Operand(scratch, LSR, kRSetWordShift)); // Get bit offset in the rset word. // object: address of remembered set word // offset: bit offset of store position and_(offset, offset, Operand(kBitsPerInt - 1)); ldr(scratch, MemOperand(object)); mov(ip, Operand(1)); orr(scratch, scratch, Operand(ip, LSL, offset)); str(scratch, MemOperand(object)); bind(&done);}void MacroAssembler::EnterInternalFrame() { // r0-r3: preserved int type = StackFrame::INTERNAL; stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); mov(ip, Operand(Smi::FromInt(type))); push(ip); mov(ip, Operand(0)); push(ip); // Push an empty code cache slot. add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.}void MacroAssembler::LeaveInternalFrame() { // r0: preserved // r1: preserved // r2: preserved // Drop the execution stack down to the frame pointer and restore the caller // frame pointer and return address. mov(sp, fp); ldm(ia_w, sp, fp.bit() | lr.bit());}void MacroAssembler::EnterExitFrame(StackFrame::Type type) { ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG); // Compute parameter pointer before making changes and save it as ip // register so that it is restored as sp register on exit, thereby // popping the args. // ip = sp + kPointerSize * #args; add(ip, sp, Operand(r0, LSL, kPointerSizeLog2)); // Push in reverse order: caller_fp, sp_on_exit, and caller_pc. stm(db_w, sp, fp.bit() | ip.bit() | lr.bit()); mov(fp, Operand(sp)); // setup new frame pointer // Push debug marker. mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0)); push(ip); // Save the frame pointer and the context in top. mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); str(fp, MemOperand(ip)); mov(ip, Operand(ExternalReference(Top::k_context_address))); str(cp, MemOperand(ip)); // Setup argc and the builtin function in callee-saved registers. mov(r4, Operand(r0)); mov(r5, Operand(r1)); // Compute the argv pointer and keep it in a callee-saved register. add(r6, fp, Operand(r4, LSL, kPointerSizeLog2)); add(r6, r6, Operand(ExitFrameConstants::kPPDisplacement - kPointerSize)); // Save the state of all registers to the stack from the memory // location. This is needed to allow nested break points. if (type == StackFrame::EXIT_DEBUG) { // Use sp as base to push. CopyRegistersFromMemoryToStack(sp, kJSCallerSaved); }}void MacroAssembler::LeaveExitFrame(StackFrame::Type type) { // Restore the memory copy of the registers by digging them out from // the stack. This is needed to allow nested break points. if (type == StackFrame::EXIT_DEBUG) { // This code intentionally clobbers r2 and r3. const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize; add(r3, fp, Operand(kOffset)); CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved); } // Clear top frame. mov(r3, Operand(0)); mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); str(r3, MemOperand(ip)); // Restore current context from top and clear it in debug mode. mov(ip, Operand(ExternalReference(Top::k_context_address))); ldr(cp, MemOperand(ip)); if (kDebug) { str(r3, MemOperand(ip)); } // Pop the arguments, restore registers, and return. mov(sp, Operand(fp)); // respect ABI stack constraint ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());}void MacroAssembler::InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, Handle<Code> code_constant, Register code_reg, Label* done, InvokeFlag flag) { bool definitely_matches = false; Label regular_invoke; // Check whether the expected and actual arguments count match. If not, // setup registers according to contract with ArgumentsAdaptorTrampoline: // r0: actual arguments count // r1: function (passed through to callee) // r2: expected arguments count // r3: callee code entry // The code below is made a lot easier because the calling code already sets // up actual and expected registers according to the contract if values are // passed in registers. ASSERT(actual.is_immediate() || actual.reg().is(r0)); ASSERT(expected.is_immediate() || expected.reg().is(r2)); ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); if (expected.is_immediate()) { ASSERT(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { mov(r0, Operand(actual.immediate())); const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; if (expected.immediate() == sentinel) { // Don't worry about adapting arguments for builtins that // don't want that done. Skip adaption code by making it look // like we have a match between expected and actual number of // arguments. definitely_matches = true; } else { mov(r2, Operand(expected.immediate())); } } } else { if (actual.is_immediate()) { cmp(expected.reg(), Operand(actual.immediate())); b(eq, ®ular_invoke); mov(r0, Operand(actual.immediate())); } else { cmp(expected.reg(), Operand(actual.reg())); b(eq, ®ular_invoke); } } if (!definitely_matches) { if (!code_constant.is_null()) { mov(r3, Operand(code_constant)); add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); } Handle<Code> adaptor = Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); if (flag == CALL_FUNCTION) { Call(adaptor, RelocInfo::CODE_TARGET); b(done); } else { Jump(adaptor, RelocInfo::CODE_TARGET); } bind(®ular_invoke); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -