📄 jitgrammarrules.jcs
字号:
} pushIConst32(con, idx); };%dag arrayIndex: IDENT32 arrayIndex : 0 : IDENT_SYNTHESIS(con, $$); : IDENT_INHERITANCE(con, $$); : : { ScaledIndexInfo* src; if (!CVMJIT_DID_SEMANTIC_ACTION($$)){ src = popScaledIndexInfo(con); CVMJITidentitySetDecoration(con, (CVMJITIdentityDecoration*)src, $$); } else { src = (ScaledIndexInfo*)CVMJITidentityGetDecoration(con, $$); CVMassert((src == NULL) || CVMJITidentityDecorationIs(con, $$, SCALEDINDEX)); /* CVMconsolePrintf("Reiteration of "); */ CVMassert(src != NULL); } /* CVMconsolePrintf("IDENT32 ID %d, resource 0x%x\n", $$->nodeID, src); */ #ifdef IAI_CS_EXCEPTION_ENHANCEMENT2 src->isIDENTITYOutofBoundsCheck = CVM_TRUE;#endif pushScaledIndexInfo(con, src); };%dag reg64: IDENT64 reg64 : 0 : IDENT_SYNTHESIS(con, $$); : IDENT_INHERITANCE(con, $$); : : { CVMRMResource* src; if (!CVMJIT_DID_SEMANTIC_ACTION($$)){ src = popResource(con); CVMRMoccupyAndUnpinResource(CVMRM_INT_REGS(con), src, $$); } else { src = CVMRMfindResource(CVMRM_INT_REGS(con), $$); CVMassert(src != NULL); } pushResource(con, src); };reg32: INEG32 reg32 : 10 : : : : wordUnaryOp(con, CVMCPU_NEG_OPCODE, $$, GET_REGISTER_GOALS);reg32: NOT32 reg32 : 10 : : : : wordUnaryOp(con, CVMCPU_NOT_OPCODE, $$, GET_REGISTER_GOALS);reg32: INT2BIT32 reg32 : 10 : : : : wordUnaryOp(con, CVMCPU_INT2BIT_OPCODE, $$, GET_REGISTER_GOALS);%{#ifdef CVMJIT_INTRINSICS/* Get absolute value of srcReg and set condition codes: adds rDest, rSrc, #0 neglt rDest, rSrc*/static void emitAbsolute(CVMJITCompilationContext* con, int destReg, int srcReg){ CVMCPUemitBinaryALU(con, CVMCPU_ADD_OPCODE, destReg, srcReg, CVMCPUALURhsTokenConstZero, CVMJIT_SETCC);#ifndef CVMCPU_HAS_ALU_SETCC CVMCPUemitCompare(con, CVMCPU_CMP_OPCODE, CVMCPU_COND_LT, srcReg, CVMCPUALURhsTokenConstZero);#endif CVMCPUemitUnaryALUConditional(con, CVMCPU_NEG_OPCODE, destReg, srcReg, CVMJIT_NOSETCC, CVMCPU_COND_LT);}CVMJITRegsRequiredTypeCVMJITRISCintrinsicDefaultGetRequired(CVMJITCompilationContext *con, CVMJITIRNode *intrinsicNode, CVMJITRegsRequiredType argsRequiredSet){ return argsRequiredSet;}CVMRMregsetCVMJITRISCintrinsicDefaultGetArgTarget(CVMJITCompilationContext *con, int typeTag, CVMUint16 argNumber, CVMUint16 argWordIndex){ return CVMRM_ANY_SET;}#ifdef CVMJIT_SIMPLE_SYNC_METHODSCVMJITRegsRequiredTypeCVMJITRISCintrinsicSimpleLockReleaseGetRequired( CVMJITCompilationContext *con, CVMJITIRNode *intrinsicNode, CVMJITRegsRequiredType argsRequiredSet){#if CVM_FASTLOCK_TYPE == CVM_FASTLOCK_ATOMICOPS /* During the release, we may call CVMCCMruntimeSimpleSyncUnlock, * which requires ARG1 and ARG2 */ return argsRequiredSet | ARG1 | ARG2 | CVMCPU_AVOID_C_CALL;#else return argsRequiredSet;#endif}CVMRMregsetCVMJITRISCintrinsicSimpleLockReleaseGetArgTarget( CVMJITCompilationContext *con, int typeTag, CVMUint16 argNumber, CVMUint16 argWordIndex){ CVMassert(argNumber == 0);#if CVM_FASTLOCK_TYPE == CVM_FASTLOCK_ATOMICOPS /* During the release, we may call CVMCCMruntimeSimpleSyncUnlock, * which requires "this" to be in ARG2 */ return ARG2;#else return CVMRM_ANY_SET;#endif}#endif /* CVMJIT_SIMPLE_SYNC_METHODS *//* intrinsic emitter for Thread.currentThread(). */static voidjava_lang_Thread_currentThread_EmitOperator(CVMJITCompilationContext *con, CVMJITIRNode *intrinsicNode){ struct CVMJITCompileExpression_rule_computation_state *goal_top = (struct CVMJITCompileExpression_rule_computation_state *) (con->goal_top); CVMRMResource* dest = CVMRMgetResource(CVMRM_INT_REGS(con), GET_REGISTER_GOALS, 1); int destReg = CVMRMgetRegisterNumber(dest); int eeReg; /* ldr eeReg, [sp, #OFFSET_CVMCCExecEnv_ee] @ Get ee. ldr rDest, [eeReg, #OFFSET_CVMExecEnv_threadICell] @ Get threadICell. ldr rDest, [rDest] @ Get thread obj. */#ifdef CVMCPU_EE_REG eeReg = CVMCPU_EE_REG;#else eeReg = destReg; /* Get the ee from the ccee: */ CVMJITaddCodegenComment((con, "eeReg = ccee->ee")); CVMCPUemitCCEEReferenceImmediate(con, CVMCPU_LDR32_OPCODE, eeReg, offsetof(CVMCCExecEnv, eeX));#endif /* Get the thread icell from the ee: */ CVMJITaddCodegenComment((con, "destReg = ee->threadICell")); CVMCPUemitMemoryReferenceImmediate(con, CVMCPU_LDR32_OPCODE, destReg, eeReg, offsetof(CVMExecEnv, threadICell)); /* Get the thread object from the thread icell: */ CVMJITaddCodegenComment((con, "destReg = *ee->threadICell")); CVMCPUemitMemoryReferenceImmediate(con, CVMCPU_LDR32_OPCODE, destReg, destReg, 0); CVMRMoccupyAndUnpinResource(CVMRM_INT_REGS(con), dest, intrinsicNode); pushResource(con, dest);}static voidiabsEmitOperator(CVMJITCompilationContext *con, CVMJITIRNode *intrinsicNode){ struct CVMJITCompileExpression_rule_computation_state *goal_top = (struct CVMJITCompileExpression_rule_computation_state *) (con->goal_top); CVMRMResource* src = popResource(con); CVMRMResource* dest = CVMRMgetResource(CVMRM_INT_REGS(con), GET_REGISTER_GOALS, 1); CVMRMpinResource(CVMRM_INT_REGS(con), src, CVMRM_ANY_SET, CVMRM_EMPTY_SET); emitAbsolute(con, CVMRMgetRegisterNumber(dest), CVMRMgetRegisterNumber(src)); CVMRMrelinquishResource(CVMRM_INT_REGS(con), src); CVMRMoccupyAndUnpinResource(CVMRM_INT_REGS(con), dest, intrinsicNode); pushResource(con, dest);}#ifdef CVMJIT_SIMPLE_SYNC_METHODS#if CVM_FASTLOCK_TYPE == CVM_FASTLOCK_MICROLOCK && \ CVM_MICROLOCK_TYPE == CVM_MICROLOCK_SWAP_SPINLOCK/* * Intrinsic emitter for spinlock microlock version of CVM.simpleLockGrab(). * * Grabs CVMglobals.objGlobalMicroLock using atomic swap. If it fails, * returns FALSE. If successful, checks if the object is already locked. * If locked, releases CVMglobals.objGlobalMicroLock and returns FALSE. * Otherwise returns TRUE. */static voidsimpleLockGrabEmitter( CVMJITCompilationContext * con, CVMJITIRNode *intrinsicNode){ CVMRMResource* obj = popResource(con); CVMRMResource* objHdr; CVMRMResource* dest; CVMRMResource* microLock; int objRegID, objHdrRegID, destRegID, microLockRegID; int fixupPC1, fixupPC2; /* To patch the conditional branches */ struct CVMJITCompileExpression_rule_computation_state *goal_top = (struct CVMJITCompileExpression_rule_computation_state *) (con->goal_top); dest = CVMRMgetResource(CVMRM_INT_REGS(con), GET_REGISTER_GOALS, 1); objHdr = CVMRMgetResource(CVMRM_INT_REGS(con), CVMRM_ANY_SET, CVMRM_SAFE_SET, 1); CVMRMpinResource(CVMRM_INT_REGS(con), obj, CVMRM_ANY_SET, CVMRM_SAFE_SET); objRegID = CVMRMgetRegisterNumber(obj); objHdrRegID = CVMRMgetRegisterNumber(objHdr); destRegID = CVMRMgetRegisterNumber(dest); /* load microlock address into microLockRegID */ CVMJITsetSymbolName((con, "&CVMglobals.objGlobalMicroLock")); microLock = CVMRMgetResourceForConstant32( CVMRM_INT_REGS(con), CVMRM_ANY_SET, CVMRM_SAFE_SET, (CVMUint32)&CVMglobals.objGlobalMicroLock); microLockRegID = CVMRMgetRegisterNumber(microLock); /* preload the address to help caching */ CVMJITaddCodegenComment((con, "tmp = CVMglobals.objGlobalMicroLock")); CVMCPUemitMemoryReferenceImmediate(con, CVMCPU_LDR32_OPCODE, destRegID, microLockRegID, 0); /* Get microlock LOCKED flag */ CVMJITaddCodegenComment((con, "CVM_MICROLOCK_LOCKED")); CVMCPUemitLoadConstant(con, destRegID, CVM_MICROLOCK_LOCKED); /* atomic swap LOCKED into the microlock */ CVMJITaddCodegenComment((con, "swp(CVMglobals.objGlobalMicroLock, CVM_MICROLOCK_LOCKED)")); CVMCPUemitAtomicSwap(con, destRegID, microLockRegID); CVMCPUemitMemBarAcquire(con); /* check if microlock is already locked */ CVMJITaddCodegenComment((con, "check if microlock is locked")); CVMCPUemitCompareConstant(con, CVMCPU_CMP_OPCODE, CVMCPU_COND_EQ, destRegID, CVM_MICROLOCK_LOCKED); /* branch if microlock already locked */ CVMJITaddCodegenComment((con, "br failed if microlock is locked")); CVMCPUemitBranch(con, 0, CVMCPU_COND_EQ);#ifdef CVMCPU_HAS_DELAY_SLOT fixupPC1 = CVMJITcbufGetLogicalPC(con) - 2 * CVMCPU_INSTRUCTION_SIZE;#else fixupPC1 = CVMJITcbufGetLogicalPC(con) - 1 * CVMCPU_INSTRUCTION_SIZE;#endif#ifdef IAI_CODE_SCHEDULER_SCORE_BOARD fixupPC1 = CVMJITcbufGetLogicalInstructionPC(con);#endif /* load the object header */ CVMJITaddCodegenComment((con, "get obj.hdr.various32")); CVMCPUemitMemoryReferenceImmediate(con, CVMCPU_LDR32_OPCODE, objHdrRegID, objRegID, CVMoffsetof(CVMObjectHeader,various32)); /* assume not locked and set intrinsic result */ CVMJITaddCodegenComment((con, "assume not locked: result = true")); CVMCPUemitLoadConstant(con, destRegID, CVM_TRUE); /* get sync bits from object header */ CVMJITaddCodegenComment((con, "get obj sync bits")); CVMCPUemitBinaryALUConstant(con, CVMCPU_AND_OPCODE, objHdrRegID, objHdrRegID, CVM_SYNC_MASK, CVMJIT_NOSETCC); /* check if object is unlocked */ CVMJITaddCodegenComment((con, "check if obj unlocked")); CVMCPUemitCompareConstant(con, CVMCPU_CMP_OPCODE, CVMCPU_COND_EQ, objHdrRegID, CVM_LOCKSTATE_UNLOCKED); /* branch if object not locked */ CVMJITaddCodegenComment((con, "br done if object is not locked")); CVMCPUemitBranch(con, 0, CVMCPU_COND_EQ);#ifdef CVMCPU_HAS_DELAY_SLOT fixupPC2 = CVMJITcbufGetLogicalPC(con) - 2 * CVMCPU_INSTRUCTION_SIZE;#else fixupPC2 = CVMJITcbufGetLogicalPC(con) - 1 * CVMCPU_INSTRUCTION_SIZE;#endif#ifdef IAI_CODE_SCHEDULER_SCORE_BOARD fixupPC2 = CVMJITcbufGetLogicalInstructionPC(con);#endif /* Object is locked. Release microlock */ CVMJITaddCodegenComment((con, "CVM_MICROLOCK_UNLOCKED")); CVMCPUemitLoadConstant(con, destRegID, CVM_MICROLOCK_UNLOCKED); CVMJITaddCodegenComment((con, "CVMglobals.objGlobalMicroLock = CVM_MICROLOCK_UNLOCKED")); CVMCPUemitMemoryReferenceImmediate(con, CVMCPU_STR32_OPCODE, destRegID, microLockRegID, 0); /* Failure target. Make instrinsic return false. */ CVMtraceJITCodegen(("\t\tfailed:\n")); CVMJITfixupAddress(con, fixupPC1, CVMJITcbufGetLogicalPC(con), CVMJIT_COND_BRANCH_ADDRESS_MODE); CVMCPUemitLoadConstant(con, destRegID, CVM_FALSE); /* "done" target. No change is made instrinc result. */ CVMtraceJITCodegen(("\t\tdone:\n")); CVMJITfixupAddress(con, fixupPC2, CVMJITcbufGetLogicalPC(con), CVMJIT_COND_BRANCH_ADDRESS_MODE);#ifdef CVM_DEBUG /* For Debug builds, we do the following: * * 1. Set the ee's microlock depth to 0 or 1 based on success. * 2. Set CVMglobals.jit.currentSimpleSyncMB to the Simple Sync * mb we are currently generating code for. * * (1) is done so C code will assert if the microlock gets out * of balance. Note we don't assert in here in the generated code * because it is too ugly. * * (2) is done in case there is ever a problem, we can find out * the last Simple Sync method called by looking in CVMglobals. * It is disabled with #if 0 by default. */ { /* 1. Set the ee's microlock depth to 0 or 1 based on success. */ int eeReg;#ifndef CVMCPU_EE_REG CVMRMResource *eeRes = CVMRMgetResource(CVMRM_INT_REGS(con), CVMRM_ANY_SET, CVMRM_EMPTY_SET, 1); eeReg = CVMRMgetRegisterNumber(eeRes); /* Get the ee: */ CVMJITaddCodegenComment((con, "eeReg = ccee->ee")); CVMCPUemitCCEEReferenceImmediate(con, CVMCPU_LDR32_OPCODE, eeReg, CVMoffsetof(CVMCCExecEnv, eeX));#else eeReg = CVMCPU_EE_REG;#endif /* Set the ee's microlock depth. We just set it to the result * of this intrinsic, which will be 0 or 1. */ CVMJITaddCodegenComment((con, "ee->microLock = <result>")); CVMCPUemitMemoryReferenceImmediate(con, CVMCPU_STR32_OPCODE, destRegID, eeReg, offsetof(CVMExecEnv, microLock));#ifndef CVMCPU_EE_REG CVMRMrelinquishResource(CVMRM_INT_REGS(con), eeRes);#endif } /* * The following debugging code is disabled for now, but can be enabled * if Simple Sync methods are suspected of causing problems, like a * deadlock or assert. */#if 0 { /* Store the mb of the currently executing Simple Sync method into * CVMglobals.jit.currentSimpleSyncMB. */ CVMRMResource* currentSimpleSyncMBRes; CVMRMResource* simpleSyncMBRes; CVMJITMethodContext* mc = con->inliningStack[con->inliningDepth-1].mc; /* load CVMglobals.jit.currentSimpleSyncMB address into a register */ CVMJITsetSymbolName((con, "&CVMglobals.jit.currentSimpleSyncMB")); currentSimpleSyncMBRes = CVMRMgetResourceForConstant32( CVMRM_INT_REGS(con), CVMRM_ANY_SET, CVMRM_SAFE_SET, (CVMUint32)&CVMglobals.jit.currentSimpleSyncMB); /* load the Simple Sync mb address into a register */ CVMJITsetSymbolName((con, "mb %C.%M", mc->cb, mc->mb)); simpleSyncMBRes = CVMRMgetResourceForConstant32( CVMRM_INT_REGS(con), CVMRM_ANY_SET, CVMRM_SAFE_SET, (CVMUint32)mc->mb); /* Store the Simple Sync mb into CVMglobals.jit.currentSimpleSyncMB. */ CVMJITaddCodegenComment((con, "CVMglobals.jit.currentSimpleSyncMB = %C.%M", mc->cb, mc->mb)); CVMCPUemitMemoryReferenceImmediate(con, CVMCPU_STR32_OPCODE, CVMRMgetRegisterNumber(simpleSyncMBRes), CVMRMgetRegisterNumber(currentSimpleSyncMBRes), 0); CVMRMrelinquishResource(CVMRM_INT_REGS(con), currentSimpleSyncMBRes); CVMRMrelinquishResource(CVMRM_INT_REGS(con), simpleSyncMBRes); } { /* Store the mb of the currently executing method into * CVMglobals.jit.currentMB. */ CVMRMResource* currentMBRes; CVMRMResource* mbRes; /* load CVM
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -