stackmaps.c
来自「This is a resource based on j2me embedde」· C语言 代码 · 共 1,928 行 · 第 1/5 页
C
1,928 行
CVMCellTypeState* refVarsToInitialize; CVMUint32 numRefVarsToInitialize; /* * For variable splitting, ref-pc and ref-val conflicts */ CVMUint32** localRefsWithAddrAtTOS; CVMUint32 nlocalRefsWithAddrAtTOS; CVMUint32 maxlocalRefsWithAddrAtTOS; int nRefValConflicts; int nRefPCConflicts; int varMapSize; CVMUint32* newVarMap; /* for re-mapping conflicting variables */ int newVars; /* The number of new variables */ /* * Whether this method may need to be re-written to eliminate conflicts. */ CVMBool mayNeedRewriting; /* * A buffer of the right size to print variable and stack state */ char* printBuffer; /* * A flag indicating whether we do consider "conditional GC-points", * encountered during quickening of an opcode. If there is a GC at that * point, we have to re-generate stackmaps to include entries for these * conditional GC points, since two threads racing to quicken an * instruction may cause a conditional GC point to be visible to GC. */ CVMBool doConditionalGcPoints; /* Error code */ CVMMapError error;} CVMStackmapContext;/* Basic block macros: */#ifdef CVM_JIT#define CVMbbEndsWithRet(bb) ((bb)->endsWithRet)#define CVMbbStartTopOfStackCount(bb) ((bb)->topOfStack)#define CVMbbEndTopOfStackCount(bb) ((bb)->endTopOfStack)#endif/* * Forward declarations */static voidCVMstackmapRewriteMethodForConflicts(CVMStackmapContext* con);static CVMBoolisJsrTarget(CVMStackmapContext* con, CVMUint8* pc);static voidremapConflictVarno(CVMStackmapContext* con, int varNo);static intmapVarno(CVMStackmapContext * con, int varNo, CVMUint8 * pc );static CVMBoolCVMstackmapAnalyze( CVMStackmapContext *con );#ifdef CVM_JVMTIstatic CVMUint32getOpcodeLength(CVMStackmapContext* con, CVMUint8* pc){ CVMUint32 opLen; /* Make sure we get the underlying instruction length, and not that of opc_breakpoint */ if (*pc == opc_breakpoint) { /* Find the length of the original opcode, so we can skip over it by the appropriate amount */ CVMOpcode instr = CVMjvmtiGetBreakpointOpcode(con->ee, pc, CVM_FALSE); *pc = instr; opLen = CVMopcodeGetLength(pc); *pc = opc_breakpoint;#ifdef CVM_HW CVMhwFlushCache(pc, pc + 1);#endif } else { opLen = CVMopcodeGetLength(pc); } return opLen;}#else#define getOpcodeLength(con, pc) (CVMopcodeGetLength(pc))#endifstatic voidCVMstackmapInitializeContext(CVMExecEnv* ee, CVMStackmapContext* con, CVMMethodBlock* mb, CVMBool doConditionalGcPoints){ CVMJavaMethodDescriptor* jmd; memset((void*)con, 0, sizeof(CVMStackmapContext)); jmd = CVMmbJmd(mb); con->ee = ee; con->mb = mb; con->cb = CVMmbClassBlock(mb);#ifdef CVM_JVMTI if (CVMjvmtiMbIsObsolete(mb)) { con->cp = CVMjvmtiMbConstantPool(mb); if (con->cp == NULL) { con->cp = CVMcbConstantPool(con->cb); } } else#endif { /* Matching else from above */ con->cp = CVMcbConstantPool(con->cb); } con->jmd = jmd; con->code = CVMjmdCode(jmd); con->codeLen = CVMjmdCodeLength(jmd); con->nVars = CVMjmdMaxLocals(jmd); /* * This expression is obviously a rather small pointer * difference. So just cast it to the type of 'maxStack'. */ con->maxStack = (CVMUint16)(CVMjmdMaxStack(jmd)); con->stateSize = con->nVars + con->maxStack; con->nExceptionHandlers = CVMmbExceptionTableLength(con->mb); con->doConditionalGcPoints = doConditionalGcPoints;}static voidCVMstackmapDestroyContext(CVMStackmapContext* con){ free(con->mapsArea); free(con->basicBlocks); if (con->stackMaps != NULL) { /* con->stackMaps was not assigned to the method */ free(con->stackMaps); } if ( con->localRefsWithAddrAtTOS != NULL ) free( con->localRefsWithAddrAtTOS ); if ( con->newVarMap != NULL ) free( con->newVarMap );}#ifdef CVM_TRACEstatic const char*errorString(CVMMapError errorCode){ switch(errorCode) { case CVM_MAP_SUCCESS: return "Success"; case CVM_MAP_OUT_OF_MEMORY: return "Out of memory"; case CVM_MAP_EXCEEDED_LIMITS: return "Exceeded limits"; case CVM_MAP_CANNOT_MAP: return "Cannot compute maps"; default: return "Unknown error code"; }}#endifstatic voidthrowError(CVMStackmapContext* con, CVMMapError error){ con->error = error; longjmp(con->errorHandlerContext, 1);}#if CVM_JIT/* Purpose: Filter out some uncompilable methods based on the stackmap data flow info. */voidCVMstackmapFilterNonCompilableMethod(CVMExecEnv *ee, CVMStackmapContext *con){ CVMUint32 i; for (i = 0; i < con->nBasicBlocks; i++) { CVMBasicBlock *bb = con->basicBlocks + i; /* We cannot compile method which have: 1. jsr opcodes with a non empty stack at that point which is the same as the start of a jsr target block with non empty stack. 2. ret opcodes with a non-empty stack at that point. These kinds of scenarios are rare because javac does not generate code like this. So, mark these methods as never compile and let them run interpretered instead. */ if ((isJsrTarget(con, bb->startPC) && (CVMbbStartTopOfStackCount(bb) > 1)) || (CVMbbEndsWithRet(bb) && (CVMbbEndTopOfStackCount(bb) != 0))) { CVMJITneverCompileMethod(ee, con->mb); return; } }}#endif /* CVM_JIT */static void CVMstackmapMarkGCPoint(CVMStackmapContext* con, CVMUint32* gcPointsBitmap, CVMUint16 pc){ CVMUint32 idx = pc / 32; CVMUint32 bit = (1 << (pc % 32)); /* Mark and count if it has not been seen before */ if ((gcPointsBitmap[idx] & bit) == 0) { gcPointsBitmap[idx] |= bit; con->nGCPoints++; } }static CVMBool CVMstackmapIsGCPoint(CVMUint32* gcPointsBitmap, CVMUint16 pc){ CVMUint32 idx = pc / 32; CVMUint32 bit = (1 << (pc % 32)); return ((gcPointsBitmap[idx] & bit) != 0);}/* * For maintaining the liveness analysis stack. * LIFO stack, to which we only add blocks that aren't already * marked as live. Mark them as live as we push on the stack * to prevent re-adding later. */static voidCVMstackmapLivenessPush(CVMStackmapContext* con, CVMBasicBlock* bb){ if (bb->isLive){ return; /* already on list or analysed */ } bb->isLive = CVM_TRUE; bb->liveNext = con->liveStack; con->liveStack = bb;}static CVMBasicBlock*CVMstackmapLivenessPop(CVMStackmapContext* con){ CVMBasicBlock* nextBlock = con->liveStack; if (nextBlock == NULL){ return NULL; } con->liveStack = nextBlock->liveNext; nextBlock->liveNext = NULL; return nextBlock;}static CVMBoolCVMstackmapLivenessProcessNext(CVMStackmapContext* con){ CVMBasicBlock* bb = CVMstackmapLivenessPop(con); CVMBasicBlock**successor; CVMBasicBlock* successorBlock; if (bb == NULL){ return CVM_FALSE; } CVMassert(bb->isLive); successor = bb->successors; /* If there is no list of successors, then there can be no JSR either */ if (successor != NULL){ while ((successorBlock = *successor++) != NULL){ CVMstackmapLivenessPush(con, successorBlock); } if (bb->endsWithJsr){ /* * the block immediately following this one * is live, too, as the jsr/ret combination * will end up there. The jsr is a "fall through" * instruction in this sense, unlike goto. */ CVMstackmapLivenessPush(con, bb+1); } } return CVM_TRUE;}static voidCVMstackmapLivenessAnalyze(CVMStackmapContext* con){ CVMBasicBlock* bb; CVMUint32 nBasicBlocks; /* * First, do the flow to find all the live blocks * and thus detect the dead ones. */ while (CVMstackmapLivenessProcessNext(con)) ; /* * Now find any that are dead and which end * with a JSR. The following block is on a list * of jsr targets and must be removed. */ for (bb = con->basicBlocks, nBasicBlocks = con->nBasicBlocks; nBasicBlocks>0; bb++, nBasicBlocks--){ CVMBasicBlock * nextBb; CVMJsrTableEntry* jsrTable; CVMInt32 jsrTableSize; CVMInt32 ncallers; if (bb->isLive){ /* this block is fine */ continue; } if (!(bb->endsWithJsr)){ /* don't care - this is not the situation we're looking for */ continue; } nextBb = bb+1; /* which will really appear in the tables. */ /* * This may not be the most efficient search, * but it is certainly correct. * Since this never happens, it doesn't matter much. */ for( jsrTable = con->jsrTable, jsrTableSize = con->jsrTableSize; jsrTableSize> 0; jsrTableSize--, jsrTable++){ ncallers = jsrTable->jsrNoCalls; while (ncallers-- > 0){ if (jsrTable->jsrCallers[ncallers] == nextBb){ /* expunge this reference */ jsrTable->jsrCallers[ncallers] = NULL; } } } }}/* * Find the CVMJsrTableEntry for a given targetPC. Return 0 if the * target does not occur in the JSR table. Linear search is good * enough, since this is so uncommon. */static CVMJsrTableEntry*CVMstackmapGetJsrTargetEntry(CVMStackmapContext* con, CVMUint32 targetPC){ int i; for (i = 0; i < con->jsrTableSize; i++) { if (con->jsrTable[i].jsrPC == targetPC) { return &con->jsrTable[i]; } } return 0;}/* * Add a new jsr call to the list of jsr's encountered. If this is the first * time this jsr target was encountered, make a new entry for it. *//* * CVMstackmapAddJsrCall() * the argument returnPC is cast to type CVMBasicBlock* * therefore the type has to be CVMAddr which is 4 byte on * 32 bit platforms and 8 byte on 64 bit platforms */static voidCVMstackmapAddJsrCall(CVMStackmapContext* con, CVMUint16 targetPC, CVMAddr returnPC, CVMBasicBlock*** jsrCallersArea, CVMUint32 nJsrs){ CVMJsrTableEntry* targetEntry = CVMstackmapGetJsrTargetEntry(con, targetPC); /* * If we haven't seen this target yet, add a new * CVMJsrTableEntry to the list of known jsr's. */ if (targetEntry == 0) { targetEntry = &con->jsrTable[con->jsrTableSize++]; targetEntry->jsrPC = targetPC; targetEntry->jsrNoCalls = 0; targetEntry->jsrCallers = *jsrCallersArea; /* * Allocate the worst-case number of callers + * one word for zero-termination. * * This is uncommon enough and the number * of jsr's is few enough, so I am not * worried about the space waste here. */ *jsrCallersArea += nJsrs; } /* * Store the return from this jsr as the caller * address. This address will be the successor * of a matching ret instruction. * * Note: We will be mapping the returnPC to a basic block with * returnPC as the header.
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?