stackmaps.c
来自「This is a resource based on j2me embedde」· C语言 代码 · 共 1,928 行 · 第 1/5 页
C
1,928 行
*/ targetEntry->jsrCallers[targetEntry->jsrNoCalls++] = (CVMBasicBlock*)returnPC;}/* * Make two passes over the code in order to build basic blocks. * * The first pass: * * 1) Find out where basic block headers are. * 2) Count the number of basic blocks * 3) Count the total number of "successors" to each basic block * * At the end of this pass, we are ready to create the basic blocks * themselves, as well as the data area for all successors. * * The second pass: * * 1) Make the basic block structures * 2) Set successors of basic blocks * 3) Do fast mappings of exception edges and return tables to basic blocks. * * %comment rt010 */static voidCVMstackmapFindBasicBlocks(CVMStackmapContext* con){ /* * Two maps. pcToBBMap indicates at first whether a bytecode is a * basic block header or not and later on holds a mapping of basic * block header PC's to basic block structures. gcPointsBitmap * tells us which PC's hold GC points (including backwards * branches, etc.) */ void* mapsArea; CVMBasicBlock** pcToBBMap; CVMUint32* gcPointsBitmap;#define CVM_MAP_MARK_BB_HDR(pc) \ pcToBBMap[(CVMUint32)(pc)] = (CVMBasicBlock*)1#define CVM_MAP_IS_BB_HDR(pc) \ (pcToBBMap[(CVMUint32)(pc)] != 0)#define CVM_MAP_SET_BB_FOR_PC(pc, bb) \ pcToBBMap[(CVMUint32)(pc)] = (bb)#define CVM_MAP_GET_BB_FOR_PC(pc) \ (pcToBBMap[(CVMUint32)(pc)]) CVMUint32 nSuccessors = 0; CVMUint32 nBasicBlocks = 0; CVMUint32 nInvocations = 0; CVMUint32 nJsrs = 0; /* No of 'jsr' calls */ CVMUint32 nExceptionHandlers = con->nExceptionHandlers; CVMBasicBlock** jsrCallersArea; CVMUint8* pc; CVMUint8* codeBegin; CVMUint8* codeEnd; void* bbAndSuccessorsArea; CVMBasicBlock* basicBlocks; CVMBasicBlock* currentBasicBlock; CVMBasicBlock** successorsArea; CVMBasicBlock** successorsAreaEnd; CVMBasicBlock** exceptionsArea; CVMCellTypeState* stateArea; CVMUint32 memSize; /* The size of allocated extra memory */ /* * mapsArea needs space to hold native pointers because * it is cast to CVMBasicBlock** * therefore use sizeof(CVMAddr) which is 4 byte on 32 bit * platforms and 8 byte on 64 bit platforms */ mapsArea = calloc(con->codeLen + (con->codeLen + 31) / 32, sizeof(CVMAddr)); if (mapsArea == NULL) { throwError(con, CVM_MAP_OUT_OF_MEMORY); } pcToBBMap = (CVMBasicBlock**)mapsArea; gcPointsBitmap = (CVMUint32*)(pcToBBMap + con->codeLen); con->mapsArea = mapsArea; /* Will free when done */ con->gcPointsBitmap = gcPointsBitmap; /* A summary of GC points seen */ /* * Start counting and marking */ codeBegin = con->code; codeEnd = &con->code[con->codeLen]; pc = codeBegin; CVM_MAP_MARK_BB_HDR(0); /* The first instruction is a basic block */ nBasicBlocks++; /* The first instruction is also a GC point */ CVMstackmapMarkGCPoint(con, gcPointsBitmap, 0); while(pc < codeEnd) { CVMOpcode instr = (CVMOpcode)*pc; CVMUint32 instrLen = getOpcodeLength(con, pc); if (CVMbcAttr(instr, BRANCH)) { /* * Set headers, and count successors for all branches. * We either have a goto or jsr, a tableswitch, a lookupswitch, * or one of the if's. */ switch(instr) { case opc_goto: case opc_jsr: { /* An unconditional goto, 2-byte offset */ CVMInt16 offset = CVMgetInt16(pc+1); /* * The one and only use for 'codeOffset' is to * store pointer differences. Even if we know that * they will always fit into 16 bits I can see no * sense in using a 16 bit variable since the code * will never be slower (but often faster) if we * use a variable matching the machine size * instead. */ CVMAddr codeOffset = pc + offset - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } CVMstackmapMarkGCPoint(con, gcPointsBitmap, (CVMUint16)(pc - codeBegin)); nSuccessors++; /* And mark the following instruction */ codeOffset = pc + instrLen - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } if (instr == opc_jsr) { nJsrs++; } break; } case opc_goto_w: case opc_jsr_w: { /* An unconditional goto, 4-byte offset */ CVMInt32 offset = CVMgetInt32(pc+1); /* * The one and only use for 'codeOffset' is to * store pointer differences. Even if we know that * they will always fit into 32 bits I can see no * sense in using a 32 bit variable since the code * will never be slower (but often faster) if we * use a variable matching the machine size * instead. */ CVMAddr codeOffset = pc + offset - codeBegin; /* We don't know how to deal with code size > 64k */ CVMassert(codeOffset <= 65535); if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } nSuccessors++; /* And mark the following instruction */ codeOffset = pc + instrLen - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } CVMstackmapMarkGCPoint(con, gcPointsBitmap, (CVMUint16)(pc - codeBegin)); if (instr == opc_jsr_w) { nJsrs++; } break; } case opc_lookupswitch: { CVMInt32* lpc = (CVMInt32*)CVMalignWordUp(pc+1); CVMInt32 skip = CVMgetAlignedInt32(lpc); /* default */ CVMInt32 npairs = CVMgetAlignedInt32(&lpc[1]); /* First mark the default */ /* * The one and only use for 'codeOffset' is to * store pointer differences. Even if we know that * they will always fit into 16 bits I can see no * sense in using a 16 bit variable since the code * will never be slower (but often faster) if we * use a variable matching the machine size * instead. */ CVMAddr codeOffset = pc + skip - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } CVMstackmapMarkGCPoint(con, gcPointsBitmap, (CVMUint16)(pc - codeBegin)); nSuccessors += npairs + 1; /* And all the possible case arms */ lpc += 3; /* Go to the first offset */ while (--npairs >= 0) { skip = CVMgetAlignedInt32(lpc); codeOffset = pc + skip - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } lpc += 2; /* next offset */ } /* Mark the following instruction as block header */ codeOffset = pc + instrLen - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } break; } case opc_tableswitch: { CVMInt32* lpc = (CVMInt32*)CVMalignWordUp(pc+1); CVMInt32 skip = CVMgetAlignedInt32(lpc); /* default */ CVMInt32 low = CVMgetAlignedInt32(&lpc[1]); CVMInt32 high = CVMgetAlignedInt32(&lpc[2]); CVMInt32 noff = high - low + 1; /* First mark the default */ /* * The one and only use for 'codeOffset' is to * store pointer differences. Even if we know that * they will always fit into 16 bits I can see no * sense in using a 16 bit variable since the code * will never be slower (but often faster) if we * use a variable matching the machine size * instead. */ CVMAddr codeOffset = pc + skip - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } CVMstackmapMarkGCPoint(con, gcPointsBitmap, (CVMUint16)(pc - codeBegin)); nSuccessors += noff + 1; lpc += 3; /* Skip default, low, high */ while (--noff >= 0) { skip = CVMgetAlignedInt32(lpc); codeOffset = pc + skip - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } lpc++; } /* Mark the following instruction as block header */ codeOffset = pc + instrLen - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } break; } default: { CVMInt16 skip; /* * The one and only use for 'codeOffset' is to * store pointer differences. Even if we know that * they will always fit into 16 bits I can see no * sense in using a 16 bit variable since the code * will never be slower (but often faster) if we * use a variable matching the machine size * instead. */ CVMAddr codeOffset; /* This had better be one of the 'if' guys */ CVMassert(((instr >= opc_ifeq) && (instr <= opc_if_acmpne)) || (instr == opc_ifnull) || (instr == opc_ifnonnull)); CVMassert(instrLen == 3); skip = CVMgetInt16(pc+1); /* Mark the target of the 'if' */ codeOffset = pc + skip - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } CVMstackmapMarkGCPoint(con, gcPointsBitmap, (CVMUint16)(pc - codeBegin)); /* And mark the following instruction */ codeOffset = pc + 3 - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } nSuccessors += 2; /* Target + fall-through */ } } } else if (CVMbcAttr(instr, NOCONTROLFLOW) || ((instr == opc_wide) && (pc[1] == opc_ret))) { if (pc + instrLen < codeEnd) { /* Don't unless there are more instructions */ /* * The one and only use for 'codeOffset' is to store a pointer * difference. Even if we know that they will always fit into * 16 bits I can see no sense in using a 16 bit variable since * the code will never be slower (but often faster) if we use * a variable matching the machine size instead. */ CVMAddr codeOffset; /* * Mark the next guy as a new basic block header */ codeOffset = pc + instrLen - codeBegin; if (!CVM_MAP_IS_BB_HDR(codeOffset)) { CVM_MAP_MARK_BB_HDR(codeOffset); nBasicBlocks++; } } } /* JVMPI needs this in order to support instruction tracing: */#if (!defined(CVM_JVMTI) && !defined(CVM_JVMPI_TRACE_INSTRUCTION)) /* * We counted the potential backwards branches. Now count the * other GC points. */ if (CVMbcAttr(instr, GCPOINT) || (con->doConditionalGcPoints && CVMbcAttr(instr, COND_GCPOINT)))#else /* JVMTI and/or CVM_JVMPI_TRACE_INSTRUCTION */ /* Mark all instructions as GC points because a thread can be suspended anywhere. */#endif /* (!defined(CVM_JVMPI_TRACE_INSTRUCTION)) */ CVMstackmapMarkGCPoint(con, gcPointsBitmap, (CVMUint16)(pc - codeBegin)); /* * Also count the number of invocations in this method. */ if (CVMbcAttr(instr, INVOCATION)) { nInvocations++; } pc += instrLen; } /* * And finally, make sure we mark exception handlers as basic blocks. * Instructions that may throw exceptions will propagate their states * to all possible exception handlers. */ if (nExceptionHandlers > 0) { CVMExceptionHandler* excTab = CVMmbExceptionTable(con->mb); CVMInt32 nEntries = nExceptionHandlers; while(--nEntries >= 0) { if (!CVM_MAP_IS_BB_HDR(excTab->handlerpc)) { CVM_MAP_MARK_BB_HDR(excTab->handlerpc); nBasicBlocks++; } /* The start PC of an exception handler is a GC point */ CVMstackmapMarkGCPoint(con, gcPointsBitmap, excTab->handlerpc); excTab++; } } /* * Do this just to make sure. There may be two consecutive basic blocks * with no branches from one to the other. In that case, we want to be * acting as if we have a jump from the end of one to the start of the * other. To get a right number for nSuccessors, we would have to make * another pass over the code, which is really not that essential, so * just guess over the real number a little bit. */ nSuccessors += nBasicBlocks; /* * And this conservative one for the NULL-termination */ nSuccessors += nBasicBlocks; /* * At this point, we have nBasicBlocks b.b.'s, nSuccessors * successors, nExceptionHandlers exception edges, and nGCPoints * GC points. Each basic block also holds con->stateSize * CVMCellTypeState's of local variable and stack state. * * Also, a conservative estimate of all the JSR table memory * needed is nJsr's CVMJsrTableEntry's, nJsr ^ 2 * CVMBasicBlock*'s to hold callers of each. * The number of jsr's is typically quite small so the above conservative * estimate is really not that bad. * * Allocate memory to hold all that. */ memSize = /* Basic blocks */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?