📄 ccminvokers_cpu.s
字号:
/* * @(#)ccminvokers_cpu.S 1.53 06/10/29 * * Portions Copyright 2000-2008 Sun Microsystems, Inc. All Rights * Reserved. Use is subject to license terms. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 only, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is * included at /legal/license.txt). * * You should have received a copy of the GNU General Public License * version 2 along with this work; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa * Clara, CA 95054 or visit www.sun.com if you need additional * information or have any questions. *//* * Copyright 2005 Intel Corporation. All rights reserved. */#include "javavm/include/asmmacros_cpu.h"#include "javavm/include/jit/jitasmmacros_cpu.h"#include "javavm/include/jit/jitasmconstants.h"#include "javavm/include/porting/jit/jit.h"#include "javavm/include/sync.h"/* * Some of the code in this file gets patched at runtime for handling * gc rendezvous. If we aren't copying this code to the code cache, * then we must make this a writable section. */#ifdef CVM_JIT_COPY_CCMCODE_TO_CODECACHE/* * NOTE: Some linker such as the ARM RVCT (v2.2) linker sorts * sections by attributes and section name. To make sure * the CCM copied code in the same order as they are included * in ccmcodecachecopy_cpu.S, we need to name the sections * in alphabetical order. */ SET_SECTION_EXEC(s3_ccminvokers_cpu)#else SET_SECTION_EXEC_WRITE#endif/* Support fastlocking with C helper or swap/spinlock based microlocks */#if defined(CVM_JIT_CCM_USE_C_HELPER) || \ defined(CVM_JIT_CCM_USE_C_SYNC_HELPER) || \ (CVM_FASTLOCK_TYPE == CVM_FASTLOCK_MICROLOCK && \ CVM_MICROLOCK_TYPE == CVM_MICROLOCK_SWAP_SPINLOCK) ENTRY(CVMCCMinvokeNonstaticSyncMethodHelper)ENTRY1( CVMCCMinvokeNonstaticSyncMethodHelper )LABEL(CVMCCMinvokeNonstaticSyncMethodHelperLocal) /* r0 = mb * r1 = CVMObjectICell* of object to sync on. * * r4 = v1 = jfp * r5 = v2 = jsp */#define NEW_JFP CVMARM_NEWJFP_REGNAME /* r7 = v4 */ /* r0 = ee r1 = obj r4 = v1 = JFP r5 = v2 = JSP r7 = v4 = NEW_JFP */#define EE r0#define OBJ r1#define OBITS r3#define MICROLOCK r8#define LOCKREC ip#define TEMP r11#define TEMP2 r10#define ORIG_LR TEMP2 #define MB r0#define PREV CVMARM_PREVFRAME_REGNAME /* v3 = r6 */#if defined(CVM_JIT_CCM_USE_C_HELPER) || defined(CVM_JIT_CCM_USE_C_SYNC_HELPER)/* Support fastlocking with C helper */ bl letInterpreterDoInvoke#else/* Support fastlocking with swap/spinlock based microlocks *//* IAI-04 */#ifdef IAI_CACHE_GLOBAL_VARIABLES_IN_WMMX textrmuw MICROLOCK, W_MICROLOCK, #0 str MB, [NEW_JFP, #OFFSET_CVMFrame_mb]#else ldr MICROLOCK, SYMBOL(CVMobjGlobalMicroLockPtr) /* save r0 */ str MB, [NEW_JFP, #OFFSET_CVMFrame_mb] /* Schedule r8 = µLock early */ ldr MICROLOCK, [MICROLOCK]#endif ldr OBJ, [r1] /* get object to sync on. */ ldr TEMP, [MICROLOCK] /* now r0 = ee */ ldr EE, [sp, #OFFSET_CVMCCExecEnv_ee] /* optimistically store receiver object */ str OBJ, [NEW_JFP, #OFFSET_CVMCompiledFrame_receiverObjX] /* Do fastTryLock(): */ /* Acquire the microlock: */ mov TEMP, #CVM_MICROLOCK_LOCKED /* Swap CVM_MICROLOCK_LOCKED into */ #ifndef CVM_MP_SAFE swp TEMP, TEMP, [MICROLOCK] /* the lockWord. */ /* Get obits. INVARIANT: All branches and fallthroughs to _lockObj */ /* have to set up OBITS first */ ldr OBITS, [OBJ, #OFFSET_CVMObjectHeader_various32] cmp TEMP, #CVM_MICROLOCK_UNLOCKED /* See if we succeeded. */ bne _fastLockAcquireMicrolock /* Branch if failed. */#elseLABEL(_syncInvokeMicrolockMPSafe) ldrex TEMP2, [MICROLOCK] cmp TEMP2, #CVM_MICROLOCK_UNLOCKED /* is the lock free? */ bne _fastLockAcquireMicrolock /* already locked */ strex TEMP2, TEMP, [MICROLOCK] /* try acquire the lock */ cmp TEMP2, #0 /* strex succeed? */ bne _syncInvokeMicrolockMPSafe /* No. Try again */ mcr p15, 0, TEMP2, c7, c10, 5 /* memory barrier */ ldr OBITS, [OBJ, #OFFSET_CVMObjectHeader_various32]#endif#ifdef CVM_GLOBAL_MICROLOCK_CONTENTION_STATS ldr TEMP, SYMBOL(fastMlockimplCount) ldr TEMP2, [TEMP] add TEMP2, TEMP2, #1 str TEMP2, [TEMP]#endif /* The microlock has been acquired: */LABEL(_lockObj) and ip, OBITS, #0x3 cmp ip, #CONSTANT_CVM_LOCKSTATE_UNLOCKED bne _objAlreadyLocked /* If we get here, then the object has not been locked yet. */ /* lockrec = ee->objLocksFreeOwned: */ ldr LOCKREC, [EE, #OFFSET_CVMExecEnv_objLocksFreeOwned] mov TEMP, #1 /* Initial lock re-entry count. */ cmp LOCKREC, #0 beq _lockRecordNotAvailable#ifdef CVM_DEBUG /* lockrec->state = CONSTANT_CVM_OWNEDMON_OWNED: */ mov r2, #CONSTANT_CVM_OWNEDMON_OWNED str r2, [LOCKREC, #OFFSET_CVMOwnedMonitor_state]#endif /* obj->hdr.various32 = lockrec: */ str LOCKREC, [OBJ, #OFFSET_CVMObjectHeader_various32] /* lockrec->count = 1: (TEMP initialized above) */ str TEMP, [LOCKREC, #OFFSET_CVMOwnedMonitor_count] /* lockrec->u.fast.bits = obits: */ str OBITS, [LOCKREC, #OFFSET_CVMOwnedMonitor_u_fast_bits] /* lockrec->object = obj: */ str OBJ, [LOCKREC, #OFFSET_CVMOwnedMonitor_object]#ifdef CVM_MP_SAFE mov r3, #0 mcr p15, 0, r3, c7, c10, 5 /* memory barrier. */#endif /* Release the microlock: */ mov r3, #CVM_MICROLOCK_UNLOCKED str r3, [MICROLOCK] /* microlock->lockWord = UNLOCKED. */ /* Remove lockrec from the ee's free list: */ /* nextRec = lockrec->next: */ ldr TEMP, [LOCKREC, #OFFSET_CVMOwnedMonitor_next] /* Add the lockrec to the ee's owned list: */ /* nextRec2 = ee->objLocksOwned: */ ldr r1, [EE, #OFFSET_CVMExecEnv_objLocksOwned] /* ee->objLocksFreeOwned = nextRec: */ str TEMP, [EE, #OFFSET_CVMExecEnv_objLocksFreeOwned] /* lockrec->next = nextRec2: */ str r1, [LOCKREC, #OFFSET_CVMOwnedMonitor_next] /* ee->objLocksOwned = lockrec: */ str LOCKREC, [EE, #OFFSET_CVMExecEnv_objLocksOwned]LABEL(_fastlockSuccess) ldr MB, [NEW_JFP, #OFFSET_CVMFrame_mb] /* Reload MB. */ mov PREV, JFP mov JFP, NEW_JFPLABEL(jfp_set) /* compiled frame */#ifdef CVM_DEBUG_ASSERTS mov r3, #CONSTANT_CVM_FRAMETYPE_NONE strb r3, [JFP, #OFFSET_CVMFrame_type] mov r3, #-1 strb r3, [JFP, #OFFSET_CVMFrame_flags]#endif str PREV, [JFP, #OFFSET_CVMFrame_prevX] /* set up registers */ /* see about stack frame requirements. */#ifdef CVM_TRACE mov ORIG_LR, lr mov r1, JFP ldr r0, [sp, #OFFSET_CVMCCExecEnv_ee] bl SYM_NAME(CVMCCMtraceMethodCallGlue) mov lr, ORIG_LR#endif#endif /* CVM_MICROLOCK_SWAP_SPINLOCK */ ENTRY(CVMARMgcPatchPointAtInvoke)ENTRY1( CVMARMgcPatchPointAtInvoke )LABEL(CVMARMgcPatchPointAtInvokeLocal) /* GC check - gc will patch at this location when a rendezvous is * needed. See ccmGcPatchPoints in jitinit_cpu.c. The patch will * be a "b CVMARMhandleGCAtInvoke" */ mov pc, lr /* Return to method after handling possible GC request */ ENTRY(CVMARMhandleGCAtInvoke)ENTRY1( CVMARMhandleGCAtInvoke)#if !defined(CVM_JIT_CCM_USE_C_HELPER) && !defined(CVM_JIT_CCM_USE_C_SYNC_HELPER) /* At this point a GC is requested. */ mov ORIG_LR, lr FIXUP_FRAMES_0(JFP) ldr r0, [sp, #OFFSET_CVMCCExecEnv_ee] /* r0 = ee */ ldr r1, [JFP, #OFFSET_CVMFrame_mb] /* r1 = mb */ /* We will be gc safe soon. Prevent this method from being decompiled */ str r1, [r0, #OFFSET_CVMExecEnv_invokeMb] /* Check if this is a synchronized invocation * If it is, we have to stash the receiver in the * newly pushed frame into a safe location. The new frame is not * yet "committed" to the stack, and as such, cannot be located * by GC. */ ldrh r1, [r1, #OFFSET_CVMMethodBlock_invokerAndAccessFlagsX] tst r1, #CONSTANT_METHOD_ACC_SYNCHRONIZED /* Synchronized method if result of 'tst' is 'ne'. Stash * receiver in [ee->miscICell] */ ldrne r1, [JFP, #OFFSET_CVMCompiledFrame_receiverObjX] ldrne r2, [r0, #OFFSET_CVMExecEnv_miscICell] strne r1, [r2] /* stash in miscICell *//* IAI-04 */#ifdef IAI_CACHE_GLOBAL_VARIABLES_IN_WMMX textrmuw ip, W_CVMGLOBALS, #0#else ldr ip, SYMBOL(CVMglobals)#endif /* At this point r0 is guaranteed to contain the ee */ str PREV,[r0,#OFFSET_CVMExecEnv_interpreterStack+OFFSET_CVMStack_currentFrame] mov r3, #1 add r1, ip, #OFFSET_CVMGlobalState_cstate_GCSAFE add r2, r0, #OFFSET_CVMExecEnv_tcstate_GCSAFE CALL_VM_FUNCTION(CVMcsRendezvous) /* reload the ee and mb */ ldr r0, [sp, #OFFSET_CVMCCExecEnv_ee] /* r0 = ee */ ldr r1, [JFP, #OFFSET_CVMFrame_mb] /* r1 = mb */ /* we no longer need to prevent the method from being decompiled */ mov ip, #0 str ip, [r0, #OFFSET_CVMExecEnv_invokeMb] mov lr, ORIG_LR /* * We've returned from the GC. Check for a sync method * again to see if we should restore 'receiverObjX' * from miscICell. */ ldrh r2, [r1, #OFFSET_CVMMethodBlock_invokerAndAccessFlagsX] tst r2, #CONSTANT_METHOD_ACC_SYNCHRONIZED beq CVMARMgcPatchPointAtInvokeLocal /* Restore receiverObjX in new frame */ ldr r0, [r0, #OFFSET_CVMExecEnv_miscICell] /* r0 = &miscICell */ ldr ip, [r0] str ip, [JFP, #OFFSET_CVMCompiledFrame_receiverObjX] /* And clear miscICell for other uses */ mov ip, #0 str ip, [r0] /* reload the ee. The frame flush needs it at the return point */ ldr r0, [sp, #OFFSET_CVMCCExecEnv_ee] /* r0 = ee */ b CVMARMgcPatchPointAtInvokeLocalLABEL(_fastLockAcquireMicrolock) mov ORIG_LR, lr /* Call a C function to acquire the microlock: */ mov r0, MICROLOCK CALL_VM_FUNCTION(CVMmicrolockLockImpl) mov lr, ORIG_LR ldr OBJ, [NEW_JFP, #OFFSET_CVMCompiledFrame_receiverObjX] ldr EE, [sp, #OFFSET_CVMCCExecEnv_ee] ldr OBITS, [OBJ, #OFFSET_CVMObjectHeader_various32] /* Get obits. */ b _lockObjLABEL(_objAlreadyLocked) cmp ip, #CONSTANT_CVM_LOCKSTATE_LOCKED bne _fastReentryFailed /* Make sure the current thread owns this lock: */ ldr TEMP, [OBITS, #OFFSET_CVMOwnedMonitor_owner] /* Optimistically load count */ ldr ip, [OBITS, #OFFSET_CVMOwnedMonitor_count] /* Are we owner? */ cmp TEMP, EE bne _fastReentryFailed add ip, ip, #1 /* count++ */ str ip, [OBITS, #OFFSET_CVMOwnedMonitor_count] /* Release the microlock: */ mov r3, #CVM_MICROLOCK_UNLOCKED str r3, [MICROLOCK] /* microlock->lockWord = UNLOCKED. */ b _fastlockSuccessLABEL(_fastReentryFailed)LABEL(_lockRecordNotAvailable) /* Release the microlock: */ mov r3, #CVM_MICROLOCK_UNLOCKED str r3, [MICROLOCK] /* Fall through to _fastTryLockFailed. */LABEL(_fastTryLockFailed) ldr MB, [NEW_JFP, #OFFSET_CVMFrame_mb] b letInterpreterDoInvoke#endif /* CVM_MICROLOCK_SWAP_SPINLOCK support */ SET_SIZE( CVMCCMinvokeNonstaticSyncMethodHelper )#undef EE#undef OBJ#undef OBITS#undef MICROLOCK#undef LOCKREC/* Support for fastlocking with atomic ops */#elif CVM_FASTLOCK_TYPE == CVM_FASTLOCK_ATOMICOPS#define MB r0#define PREV CVMARM_PREVFRAME_REGNAME /* v3 = r6 */#define ORIG_LR r10 ENTRY(CVMCCMinvokeNonstaticSyncMethodHelper) LABEL(CVMCCMinvokeNonstaticSyncMethodHelperLocal) ; r0 = mb ; r1 = CVMObjectICell* of object to sync on. ;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -