📄 jit.cpp
字号:
size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters(); for (size_t j = 0; j < count; ++j) emitInitRegister(j); NEXT_OPCODE(op_enter); } case op_enter_with_activation: { // Even though CTI doesn't use them, we initialize our constant // registers to zap stale pointers, to avoid unnecessarily prolonging // object lifetime and increasing GC pressure. size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters(); for (size_t j = 0; j < count; ++j) emitInitRegister(j); emitCTICall(JITStubs::cti_op_push_activation); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_enter_with_activation); } case op_create_arguments: { if (m_codeBlock->m_numParameters == 1) emitCTICall(JITStubs::cti_op_create_arguments_no_params); else emitCTICall(JITStubs::cti_op_create_arguments); NEXT_OPCODE(op_create_arguments); } case op_convert_this: { emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); emitJumpSlowCaseIfNotJSCell(regT0); loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1); addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); NEXT_OPCODE(op_convert_this); } case op_profile_will_call: { emitGetCTIParam(STUB_ARGS_profilerReference, regT0); Jump noProfiler = branchTestPtr(Zero, Address(regT0)); emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT0); emitCTICall(JITStubs::cti_op_profile_will_call); noProfiler.link(this); NEXT_OPCODE(op_profile_will_call); } case op_profile_did_call: { emitGetCTIParam(STUB_ARGS_profilerReference, regT0); Jump noProfiler = branchTestPtr(Zero, Address(regT0)); emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT0); emitCTICall(JITStubs::cti_op_profile_did_call); noProfiler.link(this); NEXT_OPCODE(op_profile_did_call); } case op_get_array_length: case op_get_by_id_chain: case op_get_by_id_generic: case op_get_by_id_proto: case op_get_by_id_proto_list: case op_get_by_id_self: case op_get_by_id_self_list: case op_get_string_length: case op_put_by_id_generic: case op_put_by_id_replace: case op_put_by_id_transition: ASSERT_NOT_REACHED(); } } ASSERT(propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos()); ASSERT(callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());#ifndef NDEBUG // reset this, in order to guard it's use with asserts m_bytecodeIndex = (unsigned)-1;#endif}void JIT::privateCompileLinkPass(){ unsigned jmpTableCount = m_jmpTable.size(); for (unsigned i = 0; i < jmpTableCount; ++i) m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeIndex], this); m_jmpTable.clear();}void JIT::privateCompileSlowCases(){ Instruction* instructionsBegin = m_codeBlock->instructions().begin(); unsigned propertyAccessInstructionIndex = 0; unsigned callLinkInfoIndex = 0; for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { // FIXME: enable peephole optimizations for slow cases when applicable killLastResultRegister(); m_bytecodeIndex = iter->to;#ifndef NDEBUG unsigned firstTo = m_bytecodeIndex;#endif Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex; switch (OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { case op_convert_this: { linkSlowCase(iter); linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitCTICall(JITStubs::cti_op_convert_this); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_convert_this); } case op_add: { compileFastArithSlow_op_add(currentInstruction, iter); NEXT_OPCODE(op_add); } case op_construct_verify: { linkSlowCase(iter); linkSlowCase(iter); emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_construct_verify); } case op_get_by_val: { // The slow case that handles accesses to arrays (below) may jump back up to here. Label beginGetByValSlow(this); Jump notImm = getSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); emitFastArithIntToImmNoCheck(regT1, regT1); notImm.link(this); emitPutJITStubArg(regT0, 1); emitPutJITStubArg(regT1, 2); emitCTICall(JITStubs::cti_op_get_by_val); emitPutVirtualRegister(currentInstruction[1].u.operand); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val)); // This is slow case that handles accesses to arrays above the fast cut-off. // First, check if this is an access to the vector linkSlowCase(iter); branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow); // okay, missed the fast region, but it is still in the vector. Get the value. loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT2); // Check whether the value loaded is zero; if so we need to return undefined. branchTestPtr(Zero, regT2, beginGetByValSlow); move(regT2, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand, regT0); NEXT_OPCODE(op_get_by_val); } case op_sub: { compileFastArithSlow_op_sub(currentInstruction, iter); NEXT_OPCODE(op_sub); } case op_rshift: { compileFastArithSlow_op_rshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter); NEXT_OPCODE(op_rshift); } case op_lshift: { compileFastArithSlow_op_lshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter); NEXT_OPCODE(op_lshift); } case op_loop_if_less: { unsigned op2 = currentInstruction[2].u.operand; unsigned target = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitPutJITStubArgFromVirtualRegister(op2, 2, regT2); emitCTICall(JITStubs::cti_op_loop_if_less); emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); } else { linkSlowCase(iter); linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitPutJITStubArg(regT1, 2); emitCTICall(JITStubs::cti_op_loop_if_less); emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); } NEXT_OPCODE(op_loop_if_less); } case op_put_by_id: { compilePutByIdSlowCase(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, iter, propertyAccessInstructionIndex++); NEXT_OPCODE(op_put_by_id); } case op_get_by_id: { compileGetByIdSlowCase(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), iter, propertyAccessInstructionIndex++); NEXT_OPCODE(op_get_by_id); } case op_loop_if_lesseq: { unsigned op2 = currentInstruction[2].u.operand; unsigned target = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, regT2); emitCTICall(JITStubs::cti_op_loop_if_lesseq); emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); } else { linkSlowCase(iter); linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitPutJITStubArg(regT1, 2); emitCTICall(JITStubs::cti_op_loop_if_lesseq); emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); } NEXT_OPCODE(op_loop_if_lesseq); } case op_pre_inc: { compileFastArithSlow_op_pre_inc(currentInstruction[1].u.operand, iter); NEXT_OPCODE(op_pre_inc); } case op_put_by_val: { // Normal slow cases - either is not an immediate imm, or is an array. Jump notImm = getSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); emitFastArithIntToImmNoCheck(regT1, regT1); notImm.link(this); emitGetVirtualRegister(currentInstruction[3].u.operand, regT2); emitPutJITStubArg(regT0, 1); emitPutJITStubArg(regT1, 2); emitPutJITStubArg(regT2, 3); emitCTICall(JITStubs::cti_op_put_by_val); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val)); // slow cases for immediate int accesses to arrays linkSlowCase(iter); linkSlowCase(iter); emitGetVirtualRegister(currentInstruction[3].u.operand, regT2); emitPutJITStubArg(regT0, 1); emitPutJITStubArg(regT1, 2); emitPutJITStubArg(regT2, 3); emitCTICall(JITStubs::cti_op_put_by_val_array); NEXT_OPCODE(op_put_by_val); } case op_loop_if_true: { linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitCTICall(JITStubs::cti_op_jtrue); unsigned target = currentInstruction[2].u.operand; emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2); NEXT_OPCODE(op_loop_if_true); } case op_pre_dec: { compileFastArithSlow_op_pre_dec(currentInstruction[1].u.operand, iter); NEXT_OPCODE(op_pre_dec); } case op_jnless: { unsigned op2 = currentInstruction[2].u.operand; unsigned target = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, regT2); emitCTICall(JITStubs::cti_op_jless); emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3); } else { linkSlowCase(iter); linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitPutJITStubArg(regT1, 2); emitCTICall(JITStubs::cti_op_jless); emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3); } NEXT_OPCODE(op_jnless); } case op_not: { linkSlowCase(iter); xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0); emitPutJITStubArg(regT0, 1); emitCTICall(JITStubs::cti_op_not); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_not); } case op_jfalse: { linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitCTICall(JITStubs::cti_op_jtrue); unsigned target = currentInstruction[2].u.operand; emitJumpSlowToHot(branchTest32(Zero, regT0), target + 2); // inverted! NEXT_OPCODE(op_jfalse); } case op_post_inc: { compileFastArithSlow_op_post_inc(currentInstruction[1].u.operand, currentInstruction[2].u.operand, iter); NEXT_OPCODE(op_post_inc); } case op_bitnot: { linkSlowCase(iter); emitPutJITStubArg(regT0, 1); emitCTICall(JITStubs::cti_op_bitnot); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_bitnot); } case op_bitand: {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -