⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 jitregman.c

📁 This is a resource based on j2me embedded,if you dont understand,you can connection with me .
💻 C
📖 第 1 页 / 共 5 页
字号:
    CVMRMResource* rp,    int target){    CVMRMregset t = 1U << target;    /* If the resource is already pinned, then we may have to clone the       resource if it is not pinned to the register we want: */    CVMRMassertContextMatch(con, rp);    if (rp->flags & CVMRMpinned) {        if (rp->regno != target) {            rp = CVMRMcloneResourceSpecific(con, rp, target);            CVMassert(rp->regno == target);        }        return rp;    }    /* Go ahead and pin the resource: */    CVMRMpinResourceStrict(con, rp, t, ~t);    CVMassert(rp->regno == target);    return rp;}/* * Pin the resource, but only if it is believed that pinning eagerly * will be desirabable. Pinning eagerly is desirable if it appears * that the register will not get spilled before used, and will not * require the spilling of another register. */voidCVMRMpinResourceEagerlyIfDesireable(    CVMJITRMContext* con, CVMRMResource* rp,    CVMRMregset target, CVMRMregset avoid){    int avail;    /* if the resource is already in a register, don't try to pin it.     * This may result in moving it into a different register, which     * we'd rather do lazily. */    if (rp->regno != -1) {	return;    }    /*     * We require that the target register not be in the avoid set. Otherwise     * it will get trashed before used. This happens, for example, when     * the lhs of an expression is a local and the rhs contains a method     * call which would trash the local if loaded eagerly.     */    avail = ~avoid;    /* Don't pin to an occupied register, since this will cause a spill.     * We don't want to be eager about pinning if it requires a spill     * of another register. */    avail &= ~con->occupiedRegisters;    /* Don't attempt to pin to a pinned register or we'll get an assert */    avail &= ~con->pinnedRegisters;    /* Don't pin to a register outside the sandboxRegSet*/    avail &= con->sandboxRegSet;    /* limit the target set to the available registers */    if (rp->nregs == 1) {	target &= avail;    } else {	target &= avail & (avail>>1) & ~0x80000000;    }    /* Pin if there appears to be a desirable and available target register.*/    if (target != CVMRM_EMPTY_SET) {	/* double check to make sure a register is available. */	int regNo = findMaxRegInSet(target,				    RM_MIN_INTERESTING_REG,				    RM_MAX_INTERESTING_REG,				    (rp->nregs>1) ? RM_DOUBLE_REG_ALIGN :				                    RM_SINGLE_REG_ALIGN);	if (regNo != -1) {	    /*CVMconsolePrintf("EAGER PIN (%d)\n",regNo);*/	    CVMRMpinResourceSpecific(con, rp, regNo);	}    }}voidCVMRMunpinResource(CVMJITRMContext* con, CVMRMResource* rp){    CVMassert( !(rp->flags & CVMRMtrash) );    CVMassert(!con->compilationContext->inConditionalCode);    CVMRMassertContextMatch(con, rp);#ifdef CVMCPU_HAS_ZERO_REG    if (rp == con->preallocatedResourceForConstantZero) {        return;    } else #endif    if (rp->flags & CVMRMpinned){	rp->flags &= ~CVMRMpinned;	con->pinnedRegisters &= ~rp->rmask;    }}voidCVMRMoccupyAndUnpinResource(    CVMJITRMContext* con,    CVMRMResource* rp,    CVMJITIRNode* expr){    CVMassert( !(rp->flags & CVMRMtrash) );    CVMRMassertContextMatch(con, rp);    rp->flags |= CVMRMoccupied;    /*      * If the expression occupying the register is the same one we just     * did a loadJavaLocal from, then don't set the dirty flag,     * because it isn't dirty: i.e. is not out of sync with respect to     * backing store (the local). In all other cases, this flag must be set.     */    if (!((expr != NULL) && 	  CVMJITirnodeIsLocalNode(CVMJITirnodeValueOf(expr)) &&	  CVMRMisLocal(rp) &&	  (rp->localNo ==	   CVMJITirnodeGetLocal(CVMJITirnodeValueOf(expr))->localNo)))    {	rp->flags |= CVMRMdirty;    }        if (expr != NULL) {	CVMBool isRef = CVMJITirnodeIsReferenceType(expr);	/*	 * Associate 'rp' with this expression if it's an identity node,	 * and set reference count appropriately.	 */	CVMJITidentitySetDecoration(con->compilationContext,				    (CVMJITIdentityDecoration*)rp, expr);		/* DEBUG	    if (CVMRMgetRefCount(con, rp) > 1 ){		CVMconsolePrintf("Expr node with ref count %d\n", 		CVMRMgetRefCount(con, rp));                CVMJITirdumpIRNode(NULL, expr, 0, "   ");		CVMconsolePrintf("\n   > \n\n");	    }	*/	/* if there is another alias for this register, then it	 * had better have the same reference state.	 * Try to ignore constants, such as 0, for purposes of this check.	 */	if (CVMRMisRef(rp)){	    CVMassert(isRef || CVMRMisConstant(rp));	}	if (isRef){	    rp->flags |= CVMRMref;	}#ifdef CVM_DEBUG	if (!CVMJITirnodeIsIdentity(expr) && 	    CVMJITirnodeGetRefCount(expr) > 1) {	    if (!CVMJITirnodeIsConstant32Node(expr) &&                !CVMJITirnodeIsConstantAddrNode(expr) &&		!CVMJITirnodeIsConstant64Node(expr)){		CVMconsolePrintf("Expr node with ref count >1\n" );		CVMtraceJITBCTOIRExec({		    CVMJITirdumpIRNodeAlways(con->compilationContext, expr, 0, "   ");		    CVMconsolePrintf("\n   > \n\n");		});	    }	}#endif	        } else {	rp->flags |= CVMRMdirty;    }    CVMRMunpinResource(con, rp);}CVMRMResource*CVMRMfindResource(    CVMJITRMContext* con,    CVMJITIRNode* expr){    CVMJITCompilationContext* cc = con->compilationContext;    CVMRMResource* rp = NULL;    CVMassert(CVMJITirnodeIsIdentity(expr));    rp = (CVMRMResource*)CVMJITidentityGetDecoration(cc, expr);        CVMassert((rp == NULL) ||	      CVMJITidentityDecorationIs(cc, expr, RESOURCE));        /* Don't want to pin it -- just find it! */    if ( (rp == NULL) || (rp->flags & CVMRMtrash)){	return NULL;    }    return rp;}voidCVMRMrelinquishResource( CVMJITRMContext* con, CVMRMResource* rp){    CVMRMunpinResource(con, rp);    /* We should never decrement below 0. */    CVMassert(!con->compilationContext->inConditionalCode);    CVMassert(CVMRMgetRefCount(con, rp) > 0);     CVMRMdecRefCount(con, rp);    if (CVMRMgetRefCount(con, rp) <= 0){	if ((rp->flags & (CVMRMLocalVar|CVMRMConstant32)) == 0) {	    /* no local, not a constant, no references, no reason to retain */	    if ((!CVMRMisBoundUse(rp)) && rp->spillLoc >= 0) {		/* CVMRMphi use spillLoc, but aren't managed the same. */		/* CVMRMclone uses spillLoc, but let original get rid of		   spill loc */		relinquishSpillLoc(con->compilationContext,				    rp->spillLoc, rp->size);	    }	    deleteResource(con, rp);	}    }}voidCVMRMsynchronizeJavaLocals(CVMJITCompilationContext* con){    /* does nothing since we always write back immediately */    CVMassert(!con->inConditionalCode);}intCVMRMdirtyJavaLocalsCount(CVMJITCompilationContext *con){    /* This needs to change if we allow dirty locals */    CVMassert(!con->inConditionalCode);    return 0;}#ifdef CVMCPU_HAS_ZERO_REGstatic void preallocateResourceForConstantZero(CVMJITRMContext* con){    CVMRMResource* rp = newResource(con);    CVMJITidentityInitDecoration(con, &rp->dec,                                 CVMJIT_IDENTITY_DECORATION_RESOURCE);#ifdef CVM_DEBUG_ASSERTS    rp->key = con->key;#endif    rp->next = NULL;    rp->prev = NULL;    rp->size = 1;    rp->nregs = 1;    rp->constant = 0;    rp->regno = CVMCPU_ZERO_REG;    rp->rmask = 0;    rp->spillLoc = -1;    rp->localNo = CVMRM_INVALID_LOCAL_NUMBER;    rp->flags = CVMRMpinned | CVMRMConstant32;    con->preallocatedResourceForConstantZero = rp;}#endifstatic voidRMContextInit(    CVMJITCompilationContext* cc,    CVMJITRMContext* con,    CVMUint16   minInteresting,    CVMUint16   maxInteresting,    CVMRMregset phiRegSet,    CVMRMregset busySet,    CVMRMregset safeSet,    CVMRMregset unsafeSet,    CVMRMregset anySet,    CVMUint32   loadopcode1,    CVMUint32   loadopcode2,    CVMUint32   storeopcode1,    CVMUint32   storeopcode2,    CVMUint32   movopcode,#ifdef  CVM_DEBUG_ASSERTS    int		key,#endif    int		singleRegAlignment,    int		doubleRegAlignment,    int		maxRegSize,    CVMJITRMConstantLoader32 ldc){    con->compilationContext = cc;    con->numberLocalWords = cc->numberLocalWords;    con->resourceList = NULL;    con->minInteresting = minInteresting;    con->maxInteresting = maxInteresting;    con->loadOpcode[0] = loadopcode1;    con->loadOpcode[1] = loadopcode2;    con->storeOpcode[0]= storeopcode1;    con->storeOpcode[1]= storeopcode2;    con->movOpcode     = movopcode;    con->reg2res = CVMJITmemNew(cc, JIT_ALLOC_CGEN_REGMAN, 	sizeof(CVMRMResource *) * (maxInteresting + 1));    con->local2res = CVMJITmemNew(cc, JIT_ALLOC_CGEN_REGMAN, 	sizeof(CVMRMResource *) * con->numberLocalWords);#ifdef CVM_JIT_REGISTER_LOCALS    con->pinnedOutgoingLocals = CVMJITmemNew(cc, JIT_ALLOC_CGEN_REGMAN, 	sizeof(CVMRMResource *) * con->numberLocalWords);#endif    /* NOTE: The following need to be adjusted when we want to reserve regs       for shared values across blocks due to global CSE.       We should allocate global CSE regs from phiRegSet.       RMbusySet need to have the corresponding bits for the global CSE regs       set.    */    con->phiRegSet = phiRegSet;    con->busySet = busySet;    con->safeSet = safeSet;    con->unsafeSet = unsafeSet;    con->anySet = anySet;    con->singleRegAlignment = singleRegAlignment;    con->doubleRegAlignment = doubleRegAlignment;    con->maxRegSize     = maxRegSize;    con->constantLoader32 = ldc;#ifdef CVM_DEBUG_ASSERTS    con->key = key;#endif}voidCVMRMinit(CVMJITCompilationContext* con){    CVMJITRMCommonContext* common = &(con->RMcommonContext);    con->maxTempWords = con->maxPhiSize;    /* initialize the common context information ourselves */    CVMJITsetInit(con, &(common->spillBusySet));    CVMJITsetInit(con, &(common->spillRefSet));    common->spillBusyRefSize = RM_INITIAL_REF_COUNT_SIZE;    common->spillBusyRefCount = CVMJITmemNew(con, JIT_ALLOC_CGEN_REGMAN,				 RM_INITIAL_REF_COUNT_SIZE*sizeof(CVMUint8));    /* go initialize the individual regman context(s) */    RMContextInit(con, &con->RMcontext[0], CVMCPU_MIN_INTERESTING_REG,	   CVMCPU_MAX_INTERESTING_REG,	   CVMCPU_PHI_REG_SET,	   CVMRM_BUSY_SET, CVMRM_SAFE_SET, CVMRM_UNSAFE_SET, CVMRM_ANY_SET,	   CVMCPU_LDR32_OPCODE, CVMCPU_LDR64_OPCODE,	   CVMCPU_STR32_OPCODE, CVMCPU_STR64_OPCODE,	   CVMCPU_MOV_OPCODE,#ifdef  CVM_DEBUG_ASSERTS	   RM_INT_KEY,#endif	   CVMCPU_SINGLE_REG_ALIGNMENT, CVMCPU_DOUBLE_REG_ALIGNMENT,	   CVMCPU_MAX_REG_SIZE, CVMCPUemitLoadConstant);#ifdef CVMCPU_HAS_ZERO_REG    preallocateResourceForConstantZero(&con->RMcontext[0]);#endif#ifdef CVM_JIT_USE_FP_HARDWARE    RMContextInit(con, &con->RMcontext[1], CVMCPU_FP_MIN_INTERESTING_REG,	   CVMCPU_FP_MAX_INTERESTING_REG,	   CVMCPU_FP_PHI_REG_SET,	   CVMRM_FP_BUSY_SET, CVMRM_FP_SAFE_SET, CVMRM_FP_UNSAFE_SET,	   CVMRM_FP_ANY_SET,	   CVMCPU_FLDR32_OPCODE, CVMCPU_FLDR64_OPCODE,	   CVMCPU_FSTR32_OPCODE, CVMCPU_FSTR64_OPCODE,	   CVMCPU_FMOV_OPCODE,#ifdef  CVM_DEBUG_ASSERTS	   RM_FP_KEY,#endif	   CVMCPU_FP_SINGLE_REG_ALIGNMENT, CVMCPU_FP_DOUBLE_REG_ALIGNMENT,	   CVMCPU_FP_MAX_REG_SIZE, CVMCPUemitLoadConstantFP);#endif}static voidRMbeginBlock(CVMJITRMContext* con, CVMJITIRBlock* b){    con->resourceList = NULL;    /* NOTE: RMbusySet need to need to be adjusted here for block       reserved regs. */    con->pinnedRegisters = con->busySet;    con->occupiedRegisters = con->busySet;    con->phiPinnedRegisters = 0;    /* By default there are no restrictions on register allocation. */    con->sandboxRegSet = RM_ANY_SET;    memset(con->reg2res, 0,	sizeof(CVMRMResource *) * (RM_MAX_INTERESTING_REG + 1));    memset(con->local2res, 0,	sizeof(CVMRMResource *) * con->numberLocalWords);}static voidRMapplyRegSandboxRestriction(CVMJITRMContext* con, CVMJITIRBlock* b){    CVMassert(con->sandboxRegSet == RM_ANY_SET);    if (b->sandboxRegSet != 0) {        con->sandboxRegSet = b->sandboxRegSet;    }}voidCVMRMbeginBlock(CVMJITCompilationContext* con, CVMJITIRBlock* b){    resetSpillMap(con, b);    RMbeginBlock(CVMRM_INT_REGS(con), b);#ifdef CVM_JIT_USE_FP_HARDWARE    RMbeginBlock(CVMRM_FP_REGS(con), b);#endif    CVMJITlocalrefsCopy(con, &con->localRefSet, &b->localRefs);    CVMtraceJITCodegenExec({	CVMconsolePrintf("\t\t\t@ Initial Temp REF set is " );	CVMJITsetDumpElements(con, &(con->RMcommonContext.spillRefSet));	CVMconsolePrintf("\n" );    });        /* bind phi values to the appropriate registers */    CVMRMbindAllUsedNodes(con, b);    if (CVMJITirblockIsBackwardBranchTarget(b) ||        CVMJITirblockIsJSRReturnTarget(b)) {	/* emit gcRendezvous code */	CVMJITcheckGC(con, b);    } else if (CVMJITirblockIsExcHandler(b)){	CVMtraceJITCodegen((            "\tL%d:\t%d:\t@ entry point for exception handler\n",            CVMJITirblockGetBlockID(b), CVMJITcbufGetLogicalPC(con)));	b->logicalAddress = CVMJITcbufGetLogicalPC(con);	/* Must always write a stackmap at the top of each exception handler.*/	CVMJITcaptureStackmap(con, 0);    } else {	/* This is where all branches to this block will branch to */	b->logicalAddress = CVMJITcbufGetLogicalPC(con);	if (b == (CVMJITIRBlock*)CVMJITirlistGetHead(&(con->bkList))) {	    /* Start of method. Need to preload locals explicitly. */	    CVMtraceJITCodegen((                "\tL%d:\t%d:\t@ entry point for first block\n",                CVMJITirblockGetBlockID(b), CVMJITcbufGetLogicalPC(con)));	    CVMRMpinAllIncomingLocals(con, b, CVM_TRUE);	    CVMRMunpinAllIncomingLocals(con, b);	} else {	    CVMtraceJITCodegen(("\tL%d:\t%d:\t@ entry point for branches\n",                CVMJITirblockGetBlockID(b), CVMJITcbufGetLogicalPC(con)));#ifdef CVM

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -