📄 codegen-ia32.cc.svn-base
字号:
// Fall through! case NO_OVERWRITE: FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime, ecx, edx); __ bind(&skip_allocation); break; default: UNREACHABLE(); } FloatingPointHelper::LoadFloatOperands(masm, ecx); switch (op_) { case Token::ADD: __ faddp(1); break; case Token::SUB: __ fsubp(1); break; case Token::MUL: __ fmulp(1); break; case Token::DIV: __ fdivp(1); break; default: UNREACHABLE(); } __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ ret(2 * kPointerSize); } case Token::MOD: { // For MOD we go directly to runtime in the non-smi case. break; } case Token::BIT_OR: case Token::BIT_AND: case Token::BIT_XOR: case Token::SAR: case Token::SHL: case Token::SHR: { FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); FloatingPointHelper::LoadFloatOperands(masm, ecx); Label non_int32_operands, non_smi_result, skip_allocation; // Reserve space for converted numbers. __ sub(Operand(esp), Immediate(2 * kPointerSize)); // Check if right operand is int32. __ fist_s(Operand(esp, 1 * kPointerSize)); __ fild_s(Operand(esp, 1 * kPointerSize)); __ fucompp(); __ fnstsw_ax(); __ sahf(); __ j(not_zero, &non_int32_operands); __ j(parity_even, &non_int32_operands); // Check if left operand is int32. __ fist_s(Operand(esp, 0 * kPointerSize)); __ fild_s(Operand(esp, 0 * kPointerSize)); __ fucompp(); __ fnstsw_ax(); __ sahf(); __ j(not_zero, &non_int32_operands); __ j(parity_even, &non_int32_operands); // Get int32 operands and perform bitop. __ pop(eax); __ pop(ecx); switch (op_) { case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; case Token::SAR: __ sar(eax); break; case Token::SHL: __ shl(eax); break; case Token::SHR: __ shr(eax); break; default: UNREACHABLE(); } // Check if result is non-negative and fits in a smi. __ test(eax, Immediate(0xc0000000)); __ j(not_zero, &non_smi_result); // Tag smi result and return. ASSERT(kSmiTagSize == times_2); // adjust code if not the case __ lea(eax, Operand(eax, times_2, kSmiTag)); __ ret(2 * kPointerSize); // All ops except SHR return a signed int32 that we load in a HeapNumber. if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. __ mov(ebx, Operand(eax)); // ebx: result switch (mode_) { case OVERWRITE_LEFT: case OVERWRITE_RIGHT: // If the operand was an object, we skip the // allocation of a heap number. __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? 1 * kPointerSize : 2 * kPointerSize)); __ test(eax, Immediate(kSmiTagMask)); __ j(not_zero, &skip_allocation, not_taken); // Fall through! case NO_OVERWRITE: FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime, ecx, edx); __ bind(&skip_allocation); break; default: UNREACHABLE(); } // Store the result in the HeapNumber and return. __ mov(Operand(esp, 1 * kPointerSize), ebx); __ fild_s(Operand(esp, 1 * kPointerSize)); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ ret(2 * kPointerSize); } __ bind(&non_int32_operands); // Restore stacks and operands before calling runtime. __ ffree(0); __ add(Operand(esp), Immediate(2 * kPointerSize)); // SHR should return uint32 - go to runtime for non-smi/negative result. if (op_ == Token::SHR) __ bind(&non_smi_result); __ mov(eax, Operand(esp, 1 * kPointerSize)); __ mov(edx, Operand(esp, 2 * kPointerSize)); break; } default: UNREACHABLE(); break; } // 3. If all else fails, use the runtime system to get the correct result. __ bind(&call_runtime); switch (op_) { case Token::ADD: __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); break; case Token::SUB: __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); break; case Token::MUL: __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); break; case Token::DIV: __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); break; case Token::MOD: __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); break; case Token::BIT_OR: __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); break; case Token::BIT_AND: __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); break; case Token::BIT_XOR: __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); break; case Token::SAR: __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); break; case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); break; case Token::SHR: __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); break; default: UNREACHABLE(); }}void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm, Label* need_gc, Register scratch1, Register scratch2) { ExternalReference allocation_top = ExternalReference::new_space_allocation_top_address(); ExternalReference allocation_limit = ExternalReference::new_space_allocation_limit_address(); __ mov(Operand(scratch1), Immediate(allocation_top)); __ mov(eax, Operand(scratch1, 0)); __ lea(scratch2, Operand(eax, HeapNumber::kSize)); // scratch2: new top __ cmp(scratch2, Operand::StaticVariable(allocation_limit)); __ j(above, need_gc, not_taken); __ mov(Operand(scratch1, 0), scratch2); // store new top __ mov(Operand(eax, HeapObject::kMapOffset), Immediate(Factory::heap_number_map())); // Tag old top and use as result. __ add(Operand(eax), Immediate(kHeapObjectTag));}void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, Register scratch) { Label load_smi_1, load_smi_2, done_load_1, done; __ mov(scratch, Operand(esp, 2 * kPointerSize)); __ test(scratch, Immediate(kSmiTagMask)); __ j(zero, &load_smi_1, not_taken); __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); __ bind(&done_load_1); __ mov(scratch, Operand(esp, 1 * kPointerSize)); __ test(scratch, Immediate(kSmiTagMask)); __ j(zero, &load_smi_2, not_taken); __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); __ jmp(&done); __ bind(&load_smi_1); __ sar(scratch, kSmiTagSize); __ push(scratch); __ fild_s(Operand(esp, 0)); __ pop(scratch); __ jmp(&done_load_1); __ bind(&load_smi_2); __ sar(scratch, kSmiTagSize); __ push(scratch); __ fild_s(Operand(esp, 0)); __ pop(scratch); __ bind(&done);}void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, Label* non_float, Register scratch) { Label test_other, done; // Test if both operands are floats or smi -> scratch=k_is_float; // Otherwise scratch = k_not_float. __ test(edx, Immediate(kSmiTagMask)); __ j(zero, &test_other, not_taken); // argument in edx is OK __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); __ cmp(scratch, Factory::heap_number_map()); __ j(not_equal, non_float); // argument in edx is not a number -> NaN __ bind(&test_other); __ test(eax, Immediate(kSmiTagMask)); __ j(zero, &done); // argument in eax is OK __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); __ cmp(scratch, Factory::heap_number_map()); __ j(not_equal, non_float); // argument in eax is not a number -> NaN // Fall-through: Both operands are numbers. __ bind(&done);}#undef __#define __ masm->void UnarySubStub::Generate(MacroAssembler* masm) { Label undo; Label slow; Label done; Label try_float; // Check whether the value is a smi. __ test(eax, Immediate(kSmiTagMask)); __ j(not_zero, &try_float, not_taken); // Enter runtime system if the value of the expression is zero // to make sure that we switch between 0 and -0. __ test(eax, Operand(eax)); __ j(zero, &slow, not_taken); // The value of the expression is a smi that is not zero. Try // optimistic subtraction '0 - value'. __ mov(edx, Operand(eax)); __ Set(eax, Immediate(0)); __ sub(eax, Operand(edx)); __ j(overflow, &undo, not_taken); // If result is a smi we are done. __ test(eax, Immediate(kSmiTagMask)); __ j(zero, &done, taken); // Restore eax and enter runtime system. __ bind(&undo); __ mov(eax, Operand(edx)); // Enter runtime system. __ bind(&slow); __ pop(ecx); // pop return address __ push(eax); __ push(ecx); // push return address __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); // Try floating point case. __ bind(&try_float); __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); __ cmp(edx, Factory::heap_number_map()); __ j(not_equal, &slow); __ mov(edx, Operand(eax)); // edx: operand FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx); // eax: allocated 'empty' number __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset)); __ fchs(); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ bind(&done); masm->StubReturn(1);}class ArgumentsAccessStub: public CodeStub { public: explicit ArgumentsAccessStub(bool is_length) : is_length_(is_length) { } private: bool is_length_; Major MajorKey() { return ArgumentsAccess; } int MinorKey() { return is_length_ ? 1 : 0; } void Generate(MacroAssembler* masm); const char* GetName() { return "ArgumentsAccessStub"; }#ifdef DEBUG void Print() { PrintF("ArgumentsAccessStub (is_length %s)\n", is_length_ ? "true" : "false"); }#endif};void ArgumentsAccessStub::Generate(MacroAssembler* masm) { // Check that the key is a smi for non-length access. Label slow; if (!is_length_) { __ mov(ebx, Operand(esp, 1 * kPointerSize)); // skip return address __ test(ebx, Immediate(kSmiTagMask)); __ j(not_zero, &slow, not_taken); } // Check if the calling frame is an arguments adaptor frame. Label adaptor; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL); __ j(equal, &adaptor); // The displacement is used for skipping the return address on the // stack. It is the offset of the last parameter (if any) relative // to the frame pointer. static const int kDisplacement = 1 * kPointerSize; ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this if (is_length_) { // Do nothing. The length is already in register eax. } else { // Check index against formal parameters count limit passed in // through register eax. Use unsigned comparison to get negative // check for free. __ cmp(ebx, Operand(eax)); __ j(above_equal, &slow, not_taken); // Read the argument from the stack. __ lea(edx, Operand(ebp, eax, times_2, 0)); __ neg(ebx); __ mov(eax, Operand(edx, ebx, times_2, kDisplacement)); } // Return the length or the argument. __ ret(0); // Arguments adaptor case: Find the length or the actual argument in // the calling frame. __ bind(&adaptor); if (is_length_) { // Read the arguments length from the adaptor frame. __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); } else { // Check index against actual arguments limit found in the // arguments adaptor frame. Use unsigned comparison to get // negative check for free. __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ cmp(ebx, Operand(ecx)); __ j(above_equal, &slow, not_taken); // Read the argument from the stack. __ lea(edx, Operand(edx, ecx, times_2, 0)); __ neg(ebx); __ mov(eax, Operand(edx, ebx, times_2, kDisplac
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -