stackmaps.c

来自「This is a resource based on j2me embedde」· C语言 代码 · 共 1,928 行 · 第 1/5 页

C
1,928
字号
	nBasicBlocks * sizeof(CVMBasicBlock) +	/* Basic block successors */	nSuccessors * sizeof(CVMBasicBlock*) +	/* Exception mappings */	nExceptionHandlers * sizeof(CVMBasicBlock*) +	/* Jsr tables */	nJsrs * sizeof(CVMJsrTableEntry) +	nJsrs * nJsrs * sizeof(CVMBasicBlock*) +	/* Basic block stack and var states with	   an additional one for the context state */	(nBasicBlocks + 1) * con->stateSize * sizeof(CVMCellTypeState) +	/* space for recording conflict uses of variables */	con->nVars * sizeof(CVMCellTypeState) +	/* printBuffer size for the state */	(con->stateSize + 2) * sizeof(char);    bbAndSuccessorsArea = calloc(1, memSize);    if (bbAndSuccessorsArea == 0) {	throwError(con, CVM_MAP_OUT_OF_MEMORY);    }    basicBlocks = (CVMBasicBlock*)bbAndSuccessorsArea;    con->basicBlocks  = basicBlocks;    con->nBasicBlocks = nBasicBlocks;    successorsArea = (CVMBasicBlock**)(basicBlocks + nBasicBlocks);    con->successorsArea = successorsArea;    exceptionsArea = successorsArea + nSuccessors;    con->exceptionsArea = exceptionsArea;    /*     * Get the JSR table ready     */    con->jsrTable  = (CVMJsrTableEntry*)(exceptionsArea + nExceptionHandlers);    jsrCallersArea = (CVMBasicBlock**)(con->jsrTable + nJsrs);    con->jsrTableSize = 0;    /*     * This is the area for the basic block variable and stack states     */    stateArea = (CVMCellTypeState*)(jsrCallersArea + nJsrs * nJsrs);    /*     * There is need for code re-writing only if the code contains      * a jsr instruction.     */    if (nJsrs > 0) {	con->mayNeedRewriting = CVM_TRUE;    }    /*     * Now make a second pass over the code. This time make up the actual     * basic blocks, and set successors. Make sure the basic blocks are     * ordered by PC.     *     * Set successors by PC initially. We will then make a separate pass     * over successors, and map each PC to basic block.     */    pc = codeBegin;    currentBasicBlock = 0; /* Just to make sure */    while (pc < codeEnd) {	CVMOpcode instr = (CVMOpcode)*pc;	CVMUint32 instrLen = getOpcodeLength(con, pc);	/*	 * Assign basic block structures to all basic blocks.	 */	if (CVM_MAP_IS_BB_HDR(pc - codeBegin)) {	    currentBasicBlock = basicBlocks++;	    CVM_MAP_SET_BB_FOR_PC(pc - codeBegin, currentBasicBlock);	    currentBasicBlock->startPC = pc;	    currentBasicBlock->varState = stateArea;	    currentBasicBlock->stackState = stateArea + con->nVars;	    stateArea += con->stateSize;	    /* This is the part that needs to be re-done on each	       full abstract interpretation */	    currentBasicBlock->topOfStack = -1; /* un-interpreted */	    currentBasicBlock->successors = NULL;	    currentBasicBlock->changed = CVM_FALSE;	}	if (CVMbcAttr(instr, BRANCH)) {	    /* Assert that we are not overriding a previously set	       successors pointer */	    CVMassert(currentBasicBlock->successors == NULL);	    currentBasicBlock->successors = successorsArea;	    /*	     * Set successors for each of the possible branches.	     * This is possible for all but jsr. Jsr's can only be 'succeeded'	     * at abstract interpretation time, since only then can we know	     * which jsr target a given 'ret <n>' corresponds to.	     *	     * We either have a goto or jsr, a tableswitch, a lookupswitch,	     * or one of the if's.	     */	    switch(instr) {	        case opc_goto:	        case opc_jsr: {		    /* An unconditional goto, 2-byte offset */		    CVMInt16 offset = CVMgetInt16(pc+1);                    /*                      * The one and only use for 'codeOffset' is to                     * store a pointer difference. Even if we know                     * that they will always fit into 16 bits I can                     * see no sense in using a 16 bit variable since                     * the code will never be slower (but often                     * faster) if we use a variable matching the                     * machine size instead.  */		    CVMAddr codeOffset = pc + offset - codeBegin;		    *successorsArea++ = (CVMBasicBlock*)codeOffset;		    if (instr == opc_jsr) {			CVMstackmapAddJsrCall(con, (CVMUint16)(codeOffset),					      pc + instrLen - codeBegin,					      &jsrCallersArea, nJsrs);			currentBasicBlock->endsWithJsr = CVM_TRUE;		    }		    break;		}	        case opc_goto_w:	        case opc_jsr_w: {		    /* An unconditional goto, 4-byte offset */		    CVMInt32 offset = CVMgetInt32(pc+1);                    /*                      * The one and only use for 'codeOffset' is to                     * store a pointer difference. Even if we know                     * that they will always fit into 32 bits I can                     * see no sense in using a 32 bit variable since                     * the code will never be slower (but often                     * faster) if we use a variable matching the                     * machine size instead.  */                    CVMAddr codeOffset = pc + offset - codeBegin;		    *successorsArea++ = (CVMBasicBlock*)codeOffset;		    if (instr == opc_jsr_w) {			CVMstackmapAddJsrCall(con, (CVMUint16)codeOffset,					      pc + instrLen - codeBegin,					      &jsrCallersArea, nJsrs);			currentBasicBlock->endsWithJsr = CVM_TRUE;		    }		    break;	        }	        case opc_lookupswitch: {		    CVMInt32* lpc  = (CVMInt32*)CVMalignWordUp(pc+1);		    CVMInt32  skip = CVMgetAlignedInt32(lpc); /* default */		    CVMInt32  npairs = CVMgetAlignedInt32(&lpc[1]);		    /* First mark the default */                    /*                      * The one and only use for 'codeOffset' is to                     * store pointer differences. Even if we know that                     * they will always fit into 16 bits I can see no                     * sense in using a 16 bit variable since the code                     * will never be slower (but often faster) if we                     * use a variable matching the machine size                     * instead.  */		    CVMAddr codeOffset = pc + skip - codeBegin;		    *successorsArea++ = (CVMBasicBlock*)codeOffset;		    /* And all the possible case arms */		    lpc += 3; /* Go to the first offset */		    while (--npairs >= 0) {			skip = CVMgetAlignedInt32(lpc);			codeOffset = pc + skip - codeBegin;			*successorsArea++ = (CVMBasicBlock*)codeOffset;			lpc += 2; /* next offset */		    }		    break;	        }	        case opc_tableswitch: {		    CVMInt32* lpc  = (CVMInt32*)CVMalignWordUp(pc+1);		    CVMInt32  skip = CVMgetAlignedInt32(lpc); /* default */		    CVMInt32  low  = CVMgetAlignedInt32(&lpc[1]);		    CVMInt32  high = CVMgetAlignedInt32(&lpc[2]);		    CVMInt32  noff = high - low + 1;		    /* First mark the default */                    /*                      * The one and only use for 'codeOffset' is to                     * store pointer differences. Even if we know that                     * they will always fit into 16 bits I can see no                     * sense in using a 16 bit variable since the code                     * will never be slower (but often faster) if we                     * use a variable matching the machine size                     * instead.  */		    CVMAddr codeOffset = pc + skip - codeBegin;		    *successorsArea++ = (CVMBasicBlock*)codeOffset;		    lpc += 3; /* Skip default, low, high */		    while (--noff >= 0) {			skip = CVMgetAlignedInt32(lpc);			codeOffset = pc + skip - codeBegin;			*successorsArea++ = (CVMBasicBlock*)codeOffset;			lpc++;		    }		    break;	        }	        default: {                    /*                      * The one and only use for 'codeOffset' is to                     * store pointer differences. Even if we know that                     * they will always fit into 16 bits I can see no                     * sense in using a 16 bit variable since the code                     * will never be slower (but often faster) if we                     * use a variable matching the machine size                     * instead.  */		    CVMAddr codeOffset;		    /* This had better be one of the 'if' guys */		    CVMassert(((instr >= opc_ifeq) &&			       (instr <= opc_if_acmpne)) ||			      (instr == opc_ifnull) ||			      (instr == opc_ifnonnull));		    CVMassert(instrLen == 3);		    /* Mark the target of the 'if' */		    codeOffset = pc + CVMgetInt16(pc+1) - codeBegin;		    *successorsArea++ = (CVMBasicBlock*)codeOffset;		    /* And mark the following instruction */		    codeOffset = pc + 3 - codeBegin;		    *successorsArea++ = (CVMBasicBlock*)codeOffset;		}	    }	    /* We'll convert these shortly */	    *successorsArea++ = (CVMBasicBlock*)~0; 	} else if ((pc + instrLen < codeEnd) &&		   CVM_MAP_IS_BB_HDR(pc + instrLen - codeBegin)) {	    /*	     * If we are the last instruction of a basic block, and	     * our instruction is not a branch, and it is not a	     * NOCONTROLFLOW one, then we'd better express the	     * 'fall-through' in the successors array.	     */	    if (!CVMbcAttr(instr, NOCONTROLFLOW) &&		!((instr == opc_wide) && (pc[1] == opc_ret))) {		CVMassert(pc + instrLen < codeEnd); /* can't fall off !! */		currentBasicBlock->successors = successorsArea;		*successorsArea++ =		    (CVMBasicBlock*)(pc + instrLen - codeBegin);		/* We'll convert these shortly */		*successorsArea++ = (CVMBasicBlock*)~0; 	    }	}	pc += instrLen;    }    /*     * The last state goes to the context     */    con->varState = stateArea;    con->stackState = stateArea + con->nVars;    /*     * Then come the conflict uses of variables     */    con->refVarsToInitialize = stateArea + con->stateSize;    /*     * The printing buffer comes right afterwards     */    con->printBuffer = (char*)(con->refVarsToInitialize + con->nVars);    successorsAreaEnd = successorsArea;    successorsArea    = con->successorsArea;    /*      * Make pass over the successors array, and map 16-bit pc's     * to basic block pointers.     */    while(successorsArea < successorsAreaEnd) {	CVMAddr codeOffset = (CVMAddr)(*successorsArea);	if (codeOffset == ~0) {	    *successorsArea++ = 0;	} else {	    CVMBasicBlock* bb = CVM_MAP_GET_BB_FOR_PC(codeOffset);	    CVMassert((CVMAddr)bb > 1);	    *successorsArea++ = bb;	}    }    /*     * Do a fast mapping for exception handlers.     */    if (nExceptionHandlers > 0) {	CVMExceptionHandler* excTab = CVMmbExceptionTable(con->mb);	CVMInt32 nEntries = nExceptionHandlers;	while(--nEntries >= 0) {	    CVMBasicBlock* bb = CVM_MAP_GET_BB_FOR_PC(excTab->handlerpc);	    CVMassert((CVMAddr)bb > 1);	    *exceptionsArea++ = bb;	    excTab++;	    /* all exception entries will be considered as live */	    CVMstackmapLivenessPush(con, bb);	}    }    /* and of course the entry point is live */    CVMstackmapLivenessPush(con, con->basicBlocks);    /*     * And for the jsr return table as well.     */    if (nJsrs > 0) {	int i;	for (i = 0; i < con->jsrTableSize; i++) {	    CVMBasicBlock** callers  = con->jsrTable[i].jsrCallers;	    CVMInt32        ncallers = con->jsrTable[i].jsrNoCalls;	    while(--ncallers >= 0) {		/* relPC must be able to hold a native pointer		 * because *callers is of type CVMBasicBlock*		 * therefore the type has to be CVMAddr which is 4 byte on		 * 32 bit platforms and 8 byte on 64 bit platforms		 */		CVMAddr relPC = (CVMAddr)*callers;		CVMBasicBlock* bb = CVM_MAP_GET_BB_FOR_PC(relPC);		CVMassert((CVMAddr)bb > 1);		*callers++ = bb;	    }	}    }    /*     * A duplicate entry for the method entry map. We might use it in case     * we do code re-writing for ref-uninit conflicts.     */    if (con->mayNeedRewriting) {	con->nGCPoints++;    }#ifdef CVM_DEBUG    /*     * Print stats on what we saw in this method      */    if ( CVMstackmapVerboseDebug ){	CVMtraceStackmaps(("\tCode size          = %d\n", con->codeLen));	CVMtraceStackmaps(("\tNo of locals       = %d\n", con->nVars));	CVMtraceStackmaps(("\tMax stack          = %d\n", con->maxStack));	CVMtraceStackmaps(("\tNo of basic blocks = %d\n", nBasicBlocks));	CVMtraceStackmaps(("\tNo of invocations  = %d\n", nInvocations));	CVMtraceStackmaps(("\tNo of GC points    = %d\n", con->nGCPoints));	CVMtraceStackmaps(("\tNo of JSR calls    = %d\n", nJsrs));	CVMtraceStackmaps(("\tNo of JSRtab elems = %d\n", con->jsrTableSize));	CVMtraceStackmaps(("\tNo of exc handlers = %d\n", nExceptionHandlers));	CVMtraceStackmaps(("\tSize of mapsArea   = %d\n",			 (con->codeLen + (con->codeLen + 31) / 32) *			 sizeof(CVMUint32)));	CVMtraceStackmaps(("\tSize of other mem  = %d\n", memSize));    }#endif}/* * Convert a cell type-state to a printable character. */#ifdef CVM_DEBUGstatic charCVMstackmapCtsToChar(CVMCellTypeState cts){    CVMCellTypeState flags = CVMctsGetFlags(cts);    switch(flags) {        case CVMctsUninit: return 'u';        case CVMctsRef:    return 'r';        case CVMctsVal:    return 'v';        case CVMctsPC:     return 'p';        case CVMctsBottom: return '@';        case CVMctsRef | CVMctsVal: 	    CVMtraceStackmaps(("CFL\n")); return '!';        case CVMctsRef | CVMctsUninit: 	    CVMtraceStackmaps(("CFL\n")); return '$';        case CVMctsRef | CVMctsPC: 	    CVMtraceStackmaps(("CFL\n")); return '%';        case CVMctsVal | CVMctsUninit: 	    CVMtraceStackmaps(("CFL\n")); return '^';        case CVMctsVal | CVMctsPC: 	    CVMtraceStackmaps(("CFL\n")); return '&';        case CVMctsPC | CVMctsUninit: 	    CVMtraceStackmaps(("CFL\n")); return '*';        default:           	    CVMtraceStackmaps(("CFL\n0x%x\n", flags)); return '#';    }}#endif#ifdef CVM_DEBUGstatic voidCVMstackmapPrintState(CVMStackmapContext* con,		      CVMInt32            topOfStack,		      CVMCellTypeState*   varState,		      CVMCellTypeState*   stackState){    CVMInt32 v;    char* state = con->printBuffer;    if (!CVMstackmapVerboseDebug ) return;    for (v = 0; v < con->nVars; v++) {	state[v] = CVMstackmapCtsToChar(varState[v]);    }

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?