📄 ccminvokers_cpu.s
字号:
ldrb r1, [r0,#OFFSET_CVMMethodBlock_argsSizeX] /* r1 = argsSize */ add JSP, ARGS, r1, LSL #2 /* adjust TOS past args */ str JSP,[JFP,#OFFSET_CVMFrame_topOfStack] b returnToInterpreter1 /* check if a new transition frame to execute has been setup */LABEL(new_transition) cmp r0, #CONSTANT_CNI_NEW_TRANSITION_FRAME streq ARGS, [JFP, #OFFSET_CVMFrame_topOfStack] /* pop args */ beq returnToInterpreter0 /* an exception has occurred */ b returnToInterpreter#undef MB#undef ARGS SET_SIZE( CVMCCMinvokeCNIMethod ) ENTRY(CVMCCMinvokeJNIMethod)ENTRY1( CVMCCMinvokeJNIMethod ) /* r4 = v1 = jfp * r5 = v2 = jsp * r0 = target mb */#define MB r0 str lr,[JFP,#OFFSET_CVMCompiledFrame_PC] FIXUP_FRAMES(JFP, {r0}, 1) mov r1, MB ldr r0, [sp, #OFFSET_CVMCCExecEnv_ee] str JSP,[JFP,#OFFSET_CVMFrame_topOfStack] str JFP,[r0,#OFFSET_CVMExecEnv_interpreterStack+OFFSET_CVMStack_currentFrame] CALL_VM_FUNCTION(CVMinvokeJNIHelper) ldr JSP,[JFP,#OFFSET_CVMFrame_topOfStack] /* check for exception */ cmp r0, #0 ldrne pc,[JFP,#OFFSET_CVMCompiledFrame_PC] b returnToInterpreter0 SET_SIZE( CVMCCMinvokeJNIMethod ) /* * Invoke a virtual method * * This function is used for doing a true virtual invocation in the patched * method invocations implementation. In particular, it is used if we * have emitted a direct branch to a method known not to be overridden during * compilation of the method that invokes it, and later it becomes overridden. * When this happens the original direct method call is patched to instead * call here so a true virtual invocation can be done. */#ifdef CVM_JIT_PATCHED_METHOD_INVOCATIONS /* * r0 = invokeMb (not the targetMb) */ ENTRY(CVMCCMinvokeVirtual)ENTRY1( CVMCCMinvokeVirtual ) /* Get "this" by looking back on the stack */ ldrb r1, [r0, #OFFSET_CVMMethodBlock_argsSizeX] sub r1, JSP, r1, LSL #2 /* r1 = stack address of "this" */ ldr r1, [r1, #0] /* r1 = "this" */ /* get the method table from "this" */ ldr r2, [r1, #OFFSET_CVMObjectHeader_clas] bic r2, r2, #3 /* mask off low 2 bits of object cb */ ldr r2, [r2, #OFFSET_CVMClassBlock_methodTablePtrX] /* get method table index from invokeMb */ ldrh r3, [r0, #OFFSET_CVMMethodBlock_methodTableIndexX] /* r0 = cb->methodTablePtrX[ip]: */ ldr r0, [r2, r3, asl #2] /* call the method */ ldr pc, [r0, #OFFSET_CVMMethodBlock_jitInvokerX] SET_SIZE( CVMCCMinvokeVirtual ) #endif ENTRY(CVMCCMletInterpreterDoInvoke)ENTRY1( CVMCCMletInterpreterDoInvoke )LABEL(letInterpreterDoInvoke_store_lr) str lr,[JFP,#OFFSET_CVMCompiledFrame_PC] ENTRY(CVMCCMletInterpreterDoInvokeWithoutFlushRetAddr)ENTRY1( CVMCCMletInterpreterDoInvokeWithoutFlushRetAddr )LABEL(letInterpreterDoInvoke) /* * Trying to invoke something beyond our ability. * Return the mb to the interpreter and let it do the * dirty work. * we have already set up the return PC in our own frame * We need to set topOfStack then return the target MB* * as a C return value. */ FIXUP_FRAMES(JFP, {r0}, 1) ldr r1, [sp, #OFFSET_CVMCCExecEnv_ee] str JSP,[JFP,#OFFSET_CVMFrame_topOfStack] str JFP,[r1,#OFFSET_CVMExecEnv_interpreterStack+OFFSET_CVMStack_currentFrame] /* Keep sp 64-bit aligned for AAPCS */ add sp, sp, #((CONSTANT_CVMCCExecEnv_size+7) BIT_AND ~7)#ifdef WINCE ldmfd sp, {v1-v7, fp, sp, pc}#else /* r4-r10 = v1-v7 */ ldmfd sp!, {r4-r10, fp, ip, pc}#endif/* * Do a GC check, and rendezvous if one is requested */LABEL(handleGCForReturn) /* At this point a GC is requested. */ FIXUP_FRAMES(JFP, {lr}, 1) ldr r0, [sp, #OFFSET_CVMCCExecEnv_ee] /* r0 = ee */ ldr r1, [JFP, #OFFSET_CVMFrame_mb] /* r1 = mb */ /* Special flag that signals we are handling gc for return. */ /* Used by CVMcompiledFrameScanner. */ mov lr, #CONSTANT_HANDLE_GC_FOR_RETURN str lr, [JFP, #OFFSET_CVMCompiledFrame_PC]/* IAI-04 */#ifdef IAI_CACHE_GLOBAL_VARIABLES_IN_WMMX textrmuw lr, W_CVMGLOBALS, #0#else ldr lr, SYMBOL(CVMglobals)#endif /* We will be gc safe soon. Prevent this method from being decompiled */ str r1, [r0, #OFFSET_CVMExecEnv_invokeMb] str JFP, [r0, #OFFSET_CVMExecEnv_interpreterStack+OFFSET_CVMStack_currentFrame] /* r0 is ee already. Fill in the arguments to CVMcsRendezvous */ add r1, lr, #OFFSET_CVMGlobalState_cstate_GCSAFE add r2, r0, #OFFSET_CVMExecEnv_tcstate_GCSAFE mov r3, #1 CALL_VM_FUNCTION(CVMcsRendezvous) /* GC done. */ /* Reload the ee */ ldr r0, [sp, #OFFSET_CVMCCExecEnv_ee] /* r0 = ee */ /* we no longer need to prevent the method from being decompiled */ mov lr, #0 str lr, [r0, #OFFSET_CVMExecEnv_invokeMb] /* This is expected at the beginning of returns */ ldr PREV, [JFP,#OFFSET_CVMFrame_prevX] /* Return to caller */ mov pc, r7/* * The GC checks for the various return variants */ ENTRY(CVMARMhandleGCForReturnFromMethod)ENTRY1( CVMARMhandleGCForReturnFromMethod ) ldr r7, SYMBOL(CVMCCMreturnFromMethod) b handleGCForReturn ENTRY(CVMARMhandleGCForReturnFromSyncMethod)ENTRY1( CVMARMhandleGCForReturnFromSyncMethod ) ldr r7, SYMBOL(CVMCCMreturnFromSyncMethod) b handleGCForReturn SET_SIZE( CVMARMhandleGCForReturnFromSyncMethod )/* * Native code doing a return comes here. * It may as well branch, since the return address is not interesting. * * CVMMethodBlock* CVMCCMreturnFromMethod(); */ ENTRY(CVMCCMreturnFromMethod)ENTRY1( CVMCCMreturnFromMethod ) /* GC check - gc will patch at this location when a rendezvous is * needed. See ccmGcPatchPoints in jitinit_cpu.c. The patch will * be a "b CVMARMhandleGCForReturnFromMethod" * * see if previous frame is compiled or not * PREV is set up by all code that branches here */ tst PREV, #CONSTANT_CVM_FRAME_MASK_SLOW#ifdef CVM_TRACE bne returnToInterpreter mov r1, JFP and JFP, PREV, #~CONSTANT_CVM_FRAME_MASK_ALL ldr r0, [sp, #OFFSET_CVMCCExecEnv_ee] bl SYM_NAME(CCMtraceMethodReturn)#ifdef CVMCPU_HAS_CP_REG ldr CVMARM_CP_REGNAME, [JFP, #OFFSET_CVMCompiledFrame_cpBaseRegX]#endif ldr pc, [JFP,#OFFSET_CVMCompiledFrame_PC]#else /* returning from one native to another. * do this ourselves. * java sp already set */ andeq JFP, PREV, #~CONSTANT_CVM_FRAME_MASK_ALL#ifdef CVMCPU_HAS_CP_REG ldreq CVMARM_CP_REGNAME, [JFP, #OFFSET_CVMCompiledFrame_cpBaseRegX]#endif ldreq pc, [JFP,#OFFSET_CVMCompiledFrame_PC] /* fall through to CVMCCMreturnToInterpreter */#endif ENTRY(CVMCCMreturnToInterpreter)ENTRY1( CVMCCMreturnToInterpreter )LABEL(returnToInterpreter) FIXUP_FRAMES_0(JFP) ldr r1, [sp, #OFFSET_CVMCCExecEnv_ee] /* JSP needs to point just past any return value */ str JSP,[JFP,#OFFSET_CVMFrame_topOfStack] /* set stack->currentFrame to current value of JFP, * then return NULL, meaning we don't want the interpreter * to take any further action on our behalf (except pop * the current frame) */ str JFP,[r1,#OFFSET_CVMExecEnv_interpreterStack+OFFSET_CVMStack_currentFrame] LABEL(returnToInterpreter0) mov r0,#0LABEL(returnToInterpreter1) /* Align sp to 64-bit for AAPCS */ add sp, sp, #((CONSTANT_CVMCCExecEnv_size+7) BIT_AND ~7)#ifdef WINCE ldmfd sp, {v1-v7, fp, sp, pc}#else /* r4-r10 = v1-v7 */ ldmfd sp!, {r4-r10, fp, ip, pc}#endif SET_SIZE( CVMCCMreturnFromMethod )/* Support fastlocking using C helpers */#if defined(CVM_JIT_CCM_USE_C_HELPER) || defined(CVM_JIT_CCM_USE_C_SYNC_HELPER)/* * Native code doing a synchronized return comes here. * * CVMMethodBlock* * CVMCCMreturnFromSyncMethod(); */ ENTRY(CVMCCMreturnFromSyncMethod)ENTRY1( CVMCCMreturnFromSyncMethod ) b returnToInterpreter /* We only support fastlocking with swap/spinlock based microlocks */#elif CVM_FASTLOCK_TYPE == CVM_FASTLOCK_MICROLOCK && \ CVM_MICROLOCK_TYPE == CVM_MICROLOCK_SWAP_SPINLOCK/* * Native code doing a synchronized return comes here. * * CVMMethodBlock* * CVMCCMreturnFromSyncMethod(); */ ENTRY(CVMCCMreturnFromSyncMethod)ENTRY1( CVMCCMreturnFromSyncMethod ) /* GC check - gc will patch at this location when a rendezvous is * needed. See ccmGcPatchPoints in jitinit_cpu.c. The patch will * be a "b CVMARMhandleGCForReturnFromSyncMethod" */ /* r0 = a1 = ee r1 = a2 = obj r4 = v1 = JFP r5 = v2 = JSP r7 = v4 = PREV */#define EE r0#define OBJ r1#define MICROLOCK r8#define LOCKREC ip/* IAI-04 */#ifdef IAI_CACHE_GLOBAL_VARIABLES_IN_WMMX textrmuw MICROLOCK, W_MICROLOCK, #0 mov r3, #CVM_MICROLOCK_LOCKED#else ldr MICROLOCK, SYMBOL(CVMobjGlobalMicroLockPtr) /* Set up r3 for swp below */ mov r3, #CVM_MICROLOCK_LOCKED /* Swap CVM_MICROLOCK_LOCKED into */ /* Get address of object microlock */ ldr MICROLOCK, [MICROLOCK]#endif /* see if previous frame is compiled or not */ /* PREV is set up by all code that branches here */ tst PREV, #CONSTANT_CVM_FRAME_MASK_SLOW ldr TEMP, [MICROLOCK] bne returnToInterpreter /* Do fastTryUnlock(): */ ldr EE, [sp, #OFFSET_CVMCCExecEnv_ee] ldr OBJ, [JFP, #OFFSET_CVMCompiledFrame_receiverObjX] /* Acquire the microlock: */#ifndef CVM_MP_SAFE swp r3, r3, [MICROLOCK] /* the lockWord. */ /* Get LOCKREC. INVARIANT: All branches and fallthroughs to _unlockObj */ /* have to set up LOCKREC first */ ldr LOCKREC, [OBJ, #OFFSET_CVMObjectHeader_various32] /* Get obits.*/ cmp r3, #CVM_MICROLOCK_UNLOCKED /* See if we succeeded. */ bne _fastUnlockAcquireMicrolock /* Branch if failed. */#elseLABEL(_syncReturnMicrolockMPSafe) ldrex TEMP2, [MICROLOCK] cmp TEMP2, #CVM_MICROLOCK_UNLOCKED /* is the lock free? */ bne _fastUnlockAcquireMicrolock /* already locked */ strex TEMP2, r3, [MICROLOCK] /* acquire the lock */ cmp TEMP2, #0 /* strex succeed? */ bne _syncReturnMicrolockMPSafe /* No. Try again */ mcr p15, 0, TEMP2, c7, c10, 5 /* memory barrier */ ldr LOCKREC, [OBJ, #OFFSET_CVMObjectHeader_various32] /* Get obits.*/#endif#ifdef CVM_GLOBAL_MICROLOCK_CONTENTION_STATS ldr TEMP, SYMBOL(fastMlockimplCount) ldr TEMP2, [TEMP] add TEMP2, TEMP2, #1 str TEMP2, [TEMP]#endif /* The microlock has been acquired: */LABEL(_unlockObj) /* Check to see if the object is locked with a fastlock: */ tst LOCKREC, #0x3 /* (obits & 0x3) == CVM_LOCKSTATE_LOCKED? */ bne _fastTryUnlockFailed /* If not, we failed. */ /* If we get here, then the object is locked with a fastlock: */ /* Make sure that the current thread owns the monitor: */ ldr r3, [LOCKREC, #OFFSET_CVMOwnedMonitor_owner] /* Optimistically load count */ ldr TEMP, [LOCKREC, #OFFSET_CVMOwnedMonitor_count] /* Are we the owner? */ cmp r3, EE bne _fastTryUnlockFailed /* If not, we failed. */ /* If we get here, then the current thread does own the monitor, and all is well. Proceed with unlocking: */ subs TEMP, TEMP, #1 bne _fastTryUnlockSuccess /* If not zero, we are done. */ /* If we get here, then the re-entry count has reached 0. */ /* Restore the obits to the object header: */ ldr r3, [LOCKREC, #OFFSET_CVMOwnedMonitor_u_fast_bits] mov TEMP2, #CVM_MICROLOCK_UNLOCKED str r3, [OBJ, #OFFSET_CVMObjectHeader_various32]#ifdef CVM_DEBUG /* Make the lockrec play nice with the debug assertions: */ /* Now TEMP is not going to be the entry count anymore */ mov TEMP, #CONSTANT_CVM_OWNEDMON_FREE str TEMP, [LOCKREC, #OFFSET_CVMOwnedMonitor_state] mov TEMP, #0 str TEMP, [LOCKREC, #OFFSET_CVMOwnedMonitor_u_fast_bits] str TEMP, [LOCKREC, #OFFSET_CVMOwnedMonitor_object] str TEMP, [LOCKREC, #OFFSET_CVMOwnedMonitor_count]#endif /* r3 = ee->objLocksOwned (advanced load for below) */ ldr r3, [EE, #OFFSET_CVMExecEnv_objLocksOwned]#ifdef CVM_MP_SAFE
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -