📄 jitemitter.c
字号:
} } else { CVMUint32 offset = jspOffset * 4 + prevOffset; invokerFunc = NULL; invokerName = NULL; /* Emit non-synchronized prologue: */ CVMJITprintCodegenComment(("Set up frame for method")); if (CVMCPUmemspecIsEncodableAsImmediate(offset)) { CVMJITaddCodegenComment((con, "Store curr JFP into new frame")); CVMCPUemitMemoryReferenceImmediate(con, CVMCPU_STR32_OPCODE, CVMCPU_JFP_REG, CVMCPU_JSP_REG, offset); } else { /* Not encodable. Stash JFP in PREV. We'll flush it explicitly */ /* MOV PREV, JFP */ needFlushPREV = CVM_TRUE; CVMJITaddCodegenComment((con, "Set up PREV to be curr JFP")); CVMCPUemitMoveRegister(con, CVMCPU_MOV_OPCODE, CVMCPU_PROLOGUE_PREVFRAME_REG, CVMCPU_JFP_REG, CVMJIT_NOSETCC); } jfpReg = CVMCPU_JFP_REG; jfpStr = "JFP"; simpleInvoke = CVM_TRUE; } /* Common tail of above two prologues is */ /* add JFP_VARIABLE, TOS, maxLocalsX * 4 */ CVMJITaddCodegenComment((con, "%s = JSP + (maxLocals - argsSize) * 4", jfpStr)); CVMCPUemitALUConstant16Scaled(con, CVMCPU_ADD_OPCODE, jfpReg, CVMCPU_JSP_REG, jspOffset, 2); if (simpleInvoke) { CVMassert(invokerFunc == NULL); /* Simple invocations don't need to call out to glue code */ emitRestOfSimpleInvocation(con, needFlushPREV); } else { /* Call helper */ CVMJITaddCodegenComment((con, "call %s", invokerName)); CVMJITsetSymbolName((con, invokerName)); CVMCPUemitAbsoluteCall(con, invokerFunc, CVMJIT_NOCPDUMP, CVMJIT_NOCPBRANCH); } /* And finally the spill instruction. This is also the interpreted to compiled entry point. */ CVMJITprintCodegenComment(("Interpreted -> compiled entry point")); CVMJITaddCodegenComment((con, "spill adjust goes here")); /* This is an estimate. Will be patched by method prologue patch. */ spillAdjust = (con->maxTempWords << 2) + CVMoffsetof(CVMCompiledFrame, opstackX); spillPC = CVMJITcbufGetLogicalPC(con); CVMJITcsSetEmitInPlace(con); /* Record start logicalPC for spill adjustment */ rec->spillStartPC = spillPC; CVMCPUemitALUConstant16Scaled(con, CVMCPU_ADD_OPCODE, CVMCPU_JSP_REG, CVMCPU_JFP_REG, spillAdjust, 0); CVMJITcsClearEmitInPlace(con); /* Record end logicalPC for spill adjustment */ rec->spillEndPC = CVMJITcbufGetLogicalPC(con);#ifdef CVMCPU_HAS_CP_REG { int i; /* reserve space for setting up the constant pool base register */ CVMJITprintCodegenComment(("%d words for setting up cp base register", CVMCPU_RESERVED_CP_REG_INSTRUCTIONS)); for (i = 0; i < CVMCPU_RESERVED_CP_REG_INSTRUCTIONS; i++) { CVMCPUemitNop(con); } }#endif rec->intToCompOffset = spillPC - prologueStart;}#ifdef CVMCPU_HAS_DELAY_SLOT#define FIXUP_PC_OFFSET (2 * CVMCPU_INSTRUCTION_SIZE)#else#define FIXUP_PC_OFFSET (1 * CVMCPU_INSTRUCTION_SIZE)#endif#define emitConditionalInstructions(con, condCode, conditionalInstructions) \{ \ int fixupPC; \ \ /* emit branch around conditional instruction(s) */ \ if (condCode != CVMCPU_COND_AL) { \ CVMCodegenComment *comment; \ CVMJITpopCodegenComment(con, comment); \ CVMJITaddCodegenComment((con, "br .skip")); \ CVMCPUemitBranch(con, 0, CVMCPUoppositeCondCode[condCode]); \ CVMJITpushCodegenComment(con, comment); \ } \ fixupPC = CVMJITcbufGetLogicalPC(con) - FIXUP_PC_OFFSET; \ \ /* emit conditional instruction(s) */ \ con->inConditionalCode = CVM_TRUE; \ conditionalInstructions \ con->inConditionalCode = CVM_FALSE; \ \ /* fixup target address of branch */ \ if (condCode != CVMCPU_COND_AL) { \ CVMtraceJITCodegen(("\t\t.skip\n")); \ CVMJITfixupAddress(con, fixupPC, CVMJITcbufGetLogicalPC(con), \ CVMJIT_COND_BRANCH_ADDRESS_MODE); \ } \}#ifndef CVMCPU_HAS_CONDITIONAL_LOADSTORE_INSTRUCTIONSvoidCVMCPUemitMemoryReferenceConditional(CVMJITCompilationContext* con, int opcode, int destreg, int basereg, CVMCPUMemSpecToken memSpecToken, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitMemoryReference(con, opcode, destreg, basereg, memSpecToken); });}extern voidCVMCPUemitMemoryReferenceImmediateConditional(CVMJITCompilationContext* con, int opcode, int destreg, int basereg, CVMInt32 immOffset, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitMemoryReferenceImmediate(con, opcode, destreg, basereg, immOffset); });}#endif /* !CVMCPU_HAS_CONDITIONAL_LOADSTORE_INSTRUCTIONS */#ifndef CVMCPU_HAS_CONDITIONAL_CALL_INSTRUCTIONSextern voidCVMCPUemitAbsoluteCallConditional(CVMJITCompilationContext* con, const void* target, CVMBool okToDumpCp, CVMBool okToBranchAroundCpDump, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitAbsoluteCall(con, target, okToDumpCp, okToBranchAroundCpDump); });}#endif /* !CVMCPU_HAS_CONDITIONAL_CALL_INSTRUCTIONS */#ifndef CVMCPU_HAS_CONDITIONAL_ALU_INSTRUCTIONSvoidCVMCPUemitUnaryALUConditional(CVMJITCompilationContext *con, int opcode, int destRegID, int srcRegID, CVMBool setcc, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitUnaryALU(con, opcode, destRegID, srcRegID, setcc); });}voidCVMCPUemitBinaryALUConditional(CVMJITCompilationContext* con, int opcode, int destRegID, int lhsRegID, CVMCPUALURhsToken rhsToken, CVMBool setcc, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitBinaryALU(con, opcode, destRegID, lhsRegID, rhsToken, setcc); });}voidCVMCPUemitBinaryALUConstantConditional(CVMJITCompilationContext* con, int opcode, int destRegID, int lhsRegID, CVMInt32 rhsConstValue, CVMBool setcc, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitBinaryALUConstant(con, opcode, destRegID, lhsRegID, rhsConstValue, setcc); });}voidCVMCPUemitBinaryALURegisterConditional(CVMJITCompilationContext* con, int opcode, int destRegID, int lhsRegID, int rhsRegID, CVMBool setcc, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitBinaryALURegister(con, opcode, destRegID, lhsRegID, rhsRegID, setcc); });}voidCVMCPUemitLoadConstantConditional(CVMJITCompilationContext *con, int regID, CVMInt32 v, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitLoadConstant(con, regID, v); });}voidCVMCPUemitMoveConditional(CVMJITCompilationContext* con, int opcode, int destRegID, CVMCPUALURhsToken srcToken, CVMBool setcc, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitMove(con, opcode, destRegID, srcToken, setcc); });}voidCVMCPUemitMoveRegisterConditional(CVMJITCompilationContext* con, int opcode, int destRegID, int srcRegID, CVMBool setcc, CVMCPUCondCode condCode){ emitConditionalInstructions(con, condCode, { CVMCPUemitMoveRegister(con, opcode, destRegID, srcRegID, setcc); });}#endif /* !CVMCPU_HAS_CONDITIONAL_ALU_INSTRUCTIONS */#if !defined(CVMCPU_HAVE_PLATFORM_SPECIFIC_C_CALL_CONVENTION) && \ !defined(CVMCPU_ALLOW_C_ARGS_BEYOND_MAX_ARG_REGS)/* Purpose: Pins an arguments to the appropriate register or store it into the appropriate stack location. */CVMRMResource *CVMCPUCCALLpinArg(CVMJITCompilationContext *con, CVMCPUCallContext *callContext, CVMRMResource *arg, int argType, int argNo, int argWordIndex, CVMRMregset *outgoingRegs, CVMBool useRegArgs){ int regno = CVMCPU_ARG1_REG + argWordIndex; /* * In future, we would like to deal with arguments in fp regs, too. * But for now, just passing arguments in int registers will have * to do. */ arg = CVMRMpinResourceSpecific(CVMRM_INT_REGS(con), arg, regno); CVMassert(argWordIndex + arg->size <= CVMCPU_MAX_ARG_REGS); *outgoingRegs |= arg->rmask; return arg;}#endif /* !CVMCPU_HAVE_PLATFORM_SPECIFIC_C_CALL_CONVENTION && !CVMCPU_ALLOW_C_ARGS_BEYOND_MAX_ARG_REGS *//* Purpose: Emits a constantpool dump with a branch around. */void CVMRISCemitConstantPoolDumpWithBranchAround( CVMJITCompilationContext* con){ if (CVMJITcpoolNeedDump(con)) { CVMInt32 startPC = CVMJITcbufGetLogicalPC(con); CVMInt32 endPC; CVMJITaddCodegenComment((con, "branch over constant pool dump")); CVMCPUemitBranch(con, startPC, CVMCPU_COND_AL); CVMJITdumpRuntimeConstantPool(con, CVM_TRUE); endPC = CVMJITcbufGetLogicalPC(con); /* Emit branch around the constant pool dump: */ CVMJITcbufPushFixup(con, startPC); CVMJITaddCodegenComment((con, "branch over constant pool dump")); CVMCPUemitBranch(con, endPC, CVMCPU_COND_AL); CVMJITcbufPop(con); }}/* Purpose: Emits a constantpool dump with a branch around it if needed. */voidCVMRISCemitConstantPoolDumpWithBranchAroundIfNeeded( CVMJITCompilationContext* con){ if (CVMJITcpoolNeedDump(con)) { CVMRISCemitConstantPoolDumpWithBranchAround(con); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -