jitir.c
来自「This is a resource based on j2me embedde」· C语言 代码 · 共 2,041 行 · 第 1/5 页
C
2,041 行
CVMJITIRNode* loc; CVMassert(localNo < mc->localsSize); loc = mc->locals[localNo]; if (loc == NULL) { CVMInt32 mappedLocalNo = mc->firstLocal + localNo; CVMassert(mappedLocalNo < mc->currLocalWords); CVMassert(mappedLocalNo >= mc->currLocalWords - mc->nOwnLocals);#if 1 /* These are the same, so one of them should probably be removed */ CVMassert(mc->nOwnLocals == mc->localsSize);#endif loc = CVMJITirnodeNewLocal(con, CVMJIT_ENCODE_LOCAL(typeTag), mappedLocalNo); CVMassert(mc->compilationDepth <= mc->currBlock->inDepth); CVMassert(mappedLocalNo < mc->currBlock->numInitialLocals); mc->currBlock->initialLocals[mappedLocalNo] = loc; CVMJITirnodeSetIsInitialLocal(loc); mc->locals[localNo] = loc; mc->physLocals[localNo] = loc; CVMJITstatsRecordInc(con, CVMJIT_STATS_NUMBER_OF_LOCAL_NODES);#ifdef CVM_JIT_REGISTER_LOCALS addIncomingLocal(con, mc->currBlock, loc); /* Add node decoration for better register targeting in backend. * Note that successorBlocksIdx is set to the *next* block we will * branch to, not the previous block. It is used by the backend * so it can find out what register the local is expected to be in * in the next block we may branch to. */ loc->decorationType = CVMJIT_LOCAL_DECORATION; loc->decorationData.successorBlocksIdx = mc->currBlock->successorBlocksCount;#endif }#ifdef CVM_DEBUG_ASSERTS { CVMJITIRNode* value = CVMJITirnodeValueOf(loc); if (CVMJITirnodeIsLocalNode(value) && typeTag == CVM_TYPEID_OBJ) { CVMInt32 l = CVMJITirnodeGetLocal(value)->localNo; CVMassert(CVMJITlocalrefsIsRef(&con->curRefLocals, l)); } }#endif return loc;}static voidpushLocal(CVMJITCompilationContext* con, CVMJITMethodContext* mc, CVMUint16 localNo, CVMUint16 typeTag) { CVMJITirnodeStackPush(con, getLocal(con, mc, localNo, typeTag));}/* * Create various flavors of CVMJITConstant32 and CVMJITConstant64 nodes * and push them on the stack. */static voidpushConstantJavaFloat(CVMJITCompilationContext* con, CVMJavaFloat f){ CVMJavaVal32 val32; CVMJITIRNode* node; val32.f = f; node = CVMJITirnodeNewConstantJavaNumeric32(con, val32.i, CVM_TYPEID_FLOAT); CVMJITirnodeStackPush(con, node);}static voidpushConstantJavaInt(CVMJITCompilationContext* con, CVMJavaInt i){ CVMJITIRNode* node = CVMJITirnodeNewConstantJavaNumeric32(con, i, CVM_TYPEID_INT); CVMJITirnodeStackPush(con, node);}static voidpushConstant32(CVMJITCompilationContext* con, CVMJavaInt i){ CVMJITIRNode* node = CVMJITirnodeNewConstantJavaNumeric32(con, i, CVMJIT_TYPEID_32BITS); CVMJITirnodeStackPush(con, node);}static voidpushConstantStringICell(CVMJITCompilationContext* con, CVMStringICell* str){ CVMJITIRNode* node = CVMJITirnodeNewConstantStringICell(con, str); CVMJITirnodeStackPush(con, node);}static voidpushConstantStringObject(CVMJITCompilationContext* con, CVMStringObject* str){ CVMJITIRNode* node = CVMJITirnodeNewConstantStringObject(con, str); CVMJITirnodeStackPush(con, node);}static voidpushConstantJavaVal64(CVMJITCompilationContext* con, CVMJavaVal64* val64, CVMUint8 typeTag){ CVMJITIRNode* node = CVMJITirnodeNewConstantJavaNumeric64(con, val64, typeTag); CVMJITirnodeStackPush(con, node);}/* * Force evaluation of expressions on the operand stack if they * have side effects, but leave them on the stack instead of * popping them. */voidCVMJITirDoSideEffectOperator(CVMJITCompilationContext* con, CVMJITIRBlock* curbk){ CVMJITStack* operandStack = con->operandStack; CVMUint32 stkCnt; CVMwithAssertsOnly({ con->operandStackIsBeingEvaluated = CVM_TRUE; }); for (stkCnt = 0; stkCnt < CVMJITstackCnt(con, operandStack); stkCnt++) { CVMJITIRNode* stkNode = CVMJITstackGetElementAtIdx(con, operandStack, stkCnt); if (!CVMJITirnodeHasBeenEvaluated(stkNode) && CVMJITirnodeHasSideEffects(stkNode)) { /* Make sure this node gets evaluated. */ CVMJITirForceEvaluation(con, curbk, stkNode); } } /* end of for loop */ CVMwithAssertsOnly({ con->operandStackIsBeingEvaluated = CVM_FALSE; });}/* * We are about to do something that might throw an exception, * so we "flow" into our exception handlers. * * Return CVM_TRUE if the merge causes any changes, CVM_FALSE * otherwise. */static CVMBoolconnectFlowToExcHandlers(CVMJITCompilationContext* con, CVMJITMethodContext* mc, CVMUint16 curPC, CVMBool flushLocals){ CVMBool change = CVM_FALSE; CVMBool done = CVM_FALSE; CVMJITIRBlock *curbk = con->mc->currBlock; /* We begin at the current translation context and work our way back through our callers. This is a general solution for the time when we allow inlined methods to have exception handlers. */ do { CVMJavaMethodDescriptor* jmd = mc->jmd; CVMExceptionHandler *eh = CVMjmdExceptionTable(jmd); CVMExceptionHandler *ehEnd = eh + CVMjmdExceptionTableLength(jmd); /* Check each exception handler to see if the pc is in its range */ for (; eh < ehEnd; eh++) { if (curPC >= eh->startpc && curPC < eh->endpc) { /* Merge into exception handlers in range */ CVMJITIRBlock* handlerBlock = mc->pcToBlock[eh->handlerpc]; CVMassert(handlerBlock != NULL); /* Flush locals during translation, but not during refinement pass */ if (flushLocals) { CVMJITirFlushOutBoundLocals(con, curbk, mc, CVM_TRUE); /* We only need to flush locals once, at the "innermost" context. The flush operation deals intelligently with caller contexts. */ flushLocals = CVM_FALSE; } /* Connect flow, which also merges local refs. Keep track if the local refs have changed in any target blocks. */ change |= CVMJITirblockConnectFlow(con, curbk, handlerBlock); /* If the handler catches everything, then we are done. No need to do flow for enclosing contexts. */ if (eh->catchtype == 0) { done = CVM_TRUE; } } } /* For exception purposes, the PC for the caller context is where the invocation opcode was located. */ curPC = mc->invokePC; mc = mc->caller; } while (mc != NULL && !done); return change;}static CVMBoolgotoBranchToTargetBlock(CVMJITCompilationContext* con, CVMJITIRBlock* curbk, CVMJITIRBlock* targetBlock, CVMUint16 opcodeTag, CVMBool fallthroughOK);/* * conditional branch */static CVMBoolcondBranchToTargetBlock(CVMJITCompilationContext* con, CVMJITIRBlock* curbk, CVMUint16 typeTag, CVMJITCondition condition, CVMJITIRNode* rhsNode, CVMJITIRNode* lhsNode, CVMJITIRBlock* targetbk, CVMUint8 flags){ CVMJITIRNode *lhs = CVMJITirnodeValueOf(lhsNode); CVMJITIRNode *rhs = CVMJITirnodeValueOf(rhsNode); /* Check to see if the condition for this branch is in fact a NULL check. nullCheck will be TRUE if the condition is a NULL check: */ CVMBool nullCheck = (condition == CVMJIT_EQ && CVMJITirnodeIsReferenceType(lhs) && CVMJITirnodeIsConstantAddrNode(rhs) && CVMJITirnodeGetConstantAddr(rhs)->vAddr == (CVMAddr)NULL); /* If the condition is a NULL check and we know that the lhs node is never NULL, then the branch will never happen and we don't need to emit code for it. However, we still need to make sure that the operands for the condition are fully evaluated if they can have noticeable side effects. */ if (nullCheck && !nullCheckNeeded(con, lhs)) { if (CVMJITirnodeHasSideEffects(lhsNode) || CVMJITirnodeHasSideEffects(rhsNode)) { CVMJITirDoSideEffectOperator(con, curbk); if (CVMJITirnodeHasSideEffects(lhsNode)) { CVMJITirForceEvaluation(con, curbk, lhsNode); } if (CVMJITirnodeHasSideEffects(rhsNode)) { CVMJITirForceEvaluation(con, curbk, rhsNode); } } /* No need to check again! */ CVMJITstatsRecordInc(con, CVMJIT_STATS_NULL_CHECKS_ELIMINATED); return CVM_TRUE; } /* We only handle integer compares for now */ if (typeTag == CVM_TYPEID_INT && CVMJITirnodeIsConstant32Node(lhs) && CVMJITirnodeIsConstant32Node(rhs)) { CVMBool pass; CVMassert(!CVMJITirnodeHasSideEffects(lhsNode)); CVMassert(!CVMJITirnodeHasSideEffects(rhsNode)); switch (condition) { case CVMJIT_LT: pass = CVMJITirnodeGetConstant32(lhs)->j.i < CVMJITirnodeGetConstant32(rhs)->j.i; break; case CVMJIT_LE: pass = CVMJITirnodeGetConstant32(lhs)->j.i <= CVMJITirnodeGetConstant32(rhs)->j.i; break; case CVMJIT_EQ: pass = CVMJITirnodeGetConstant32(lhs)->j.i == CVMJITirnodeGetConstant32(rhs)->j.i; break; case CVMJIT_GE: pass = CVMJITirnodeGetConstant32(lhs)->j.i >= CVMJITirnodeGetConstant32(rhs)->j.i; break; case CVMJIT_GT: pass = CVMJITirnodeGetConstant32(lhs)->j.i > CVMJITirnodeGetConstant32(rhs)->j.i; break; case CVMJIT_NE: pass = CVMJITirnodeGetConstant32(lhs)->j.i != CVMJITirnodeGetConstant32(rhs)->j.i; break; default: CVMassert(CVM_FALSE); pass = CVM_FALSE; /* resolve compiler warning. */ } if (pass) { return gotoBranchToTargetBlock(con, curbk, targetbk, CVMJIT_GOTO, CVM_TRUE); } else { /* do nothing */ } return CVM_TRUE; } { /* build BCOND node */ CVMJITIRNode* bcondNode = CVMJITirnodeNewCondBranchOp(con, lhsNode, rhsNode, typeTag, condition, targetbk, flags); /* Push target block onto block stack connect control flow arc between curbk and target block phiMerge is needed. The stack items are needed to finish up translating the rest of opcodes in the extended basic block */ CVMJITirblockAtBranch(con, curbk, targetbk, lhsNode, rhsNode, bcondNode, CVM_FALSE); /* * Add target block to list of blocks we flow incoming locals from. * * We must do this after calling CVMJITirblockAtBranch so ASSIGN * nodes have been flushed. */#ifdef CVM_JIT_REGISTER_LOCALS addIncomingLocalsSuccessorBlock(con, curbk, targetbk, CVM_FALSE);#endif#ifdef CVMJIT_PATCH_BASED_GC_CHECKS if (CVMJITirblockIsBackwardBranchTarget(targetbk)) { con->gcCheckPcsSize++; }#endif } if (nullCheck) { nullCheckMark(con, lhsNode); } return CVM_TRUE;}/* * conditional branch */static CVMBoolcondBranch(CVMJITCompilationContext* con, CVMJITIRBlock* curbk, CVMUint16 typeTag, CVMJITCondition condition, CVMJITIRNode* rhsNode, CVMJITIRNode* lhsNode, CVMUint16 curPC, CVMUint8 flags){ /* Build target block */ CVMJITMethodContext* mc = con->mc; CVMUint8* codeBegin = CVMjmdCode(mc->jmd); CVMUint16 targetPC = CVMgetInt16(codeBegin+curPC+1) + curPC; CVMJITIRBlock* targetbk = mc->pcToBlock[targetPC]; return condBranchToTargetBlock(con, curbk, typeTag, condition, rhsNode, lhsNode, targetbk, flags);}/* * goto and jsr handling */static CVMBoolgotoBranchToTargetBlock(CVMJITCompilationContext* con, CVMJITIRBlock* curbk, CVMJITIRBlock* targetBlock, CVMUint16 opcodeTag, CVMBool fallthroughOK){ con->mc->abortTranslation = CVM_TRUE; if (fallthroughOK && CVMJITirblockGetNext(curbk) == targetBlock) { /* If branching to next block, just fall through */ /* If this is the only branch to the target block, we should remove the IsBranchTarget flag so that merging can happen. */ return CVM_TRUE; } else { CVMJITIRNode* branchNode; CVMJITIRBlock* thisBlock = curbk;#ifdef CVMJIT_PATCH_BASED_GC_CHECKS if (CVMJITirblockIsBackwardBranchTarget(targetBlock)) { con->gcCheckPcsSize++; }#endif CVMJITirblockAtBranch(con, thisBlock, targetBlock, NULL, NULL, NULL, fallthroughOK); /* Append branch node to the current root list */ branchNode = CVMJITirnodeNewBranchOp(con, CVMJIT_ENCODE_BRANCH(opcodeTag), targetBlock); CVMJITirnodeNewRoot(con, thisBlock, branchNode); /* * Add target block to list of blocks we flow incoming locals from. * A goto is treated like a fallthrough in that we always want to add * the target block. * * We must do this after calling CVMJITirblockAtBranch so ASSIGN * nodes have been flushed. */#ifdef CVM_JIT_REGISTER_LOCALS addIncomingLocalsSuccessorBlock(con, curbk, targetBlock, CVM_TRUE);#endif return CVM_FALSE; }}/* * goto and jsr handling */static CVMBoolgotoBranch(CVMJITCompilationContext* con, CVMJITIRBlock* curbk, CVMUint16 targetPC, CVMUint16 opcodeTag){ CVMJITIRBlock* targetBlock = con->mc->pcToBlock[targetPC];
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?