📄 natobject.cc
字号:
// to acquire the lock, and hence request // conversion to heavyweight status.# define FLAGS (LOCKED | HEAVY | REQUEST_CONVERSION) volatile _Jv_ThreadId_t light_thr_id; // Thr_id of holder of lightweight lock. // Only updated by lightweight lock holder. // Must be recognizably invalid if the // lightweight lock is not held.# define INVALID_THREAD_ID 0 // Works for Linux? // If zero doesn't work, we have to // initialize lock table. volatile unsigned short light_count; // Number of times the lightweight lock // is held minus one. Zero if lightweight // lock is not held. unsigned short heavy_count; // Total number of times heavyweight locks // associated with this hash entry are held // or waiting to be acquired. // Threads in wait() are included eventhough // they have temporarily released the lock. struct heavy_lock * heavy_locks; // Chain of heavy locks. Protected // by lockbit for he. Locks may // remain allocated here even if HEAVY // is not set and heavy_count is 0. // If a lightweight and heavyweight lock // correspond to the same address, the // lightweight lock is the right one.};#ifndef JV_SYNC_TABLE_SZ# define JV_SYNC_TABLE_SZ 2048#endifhash_entry light_locks[JV_SYNC_TABLE_SZ];#define JV_SYNC_HASH(p) (((long)p ^ ((long)p >> 10)) % JV_SYNC_TABLE_SZ)// Note that the light_locks table is scanned conservatively by the// collector. It is essential the the heavy_locks field is scanned.// Currently the address field may or may not cause the associated object// to be retained, depending on whether flag bits are set.// This means that we can conceivable get an unexpected deadlock if// 1) Object at address A is locked.// 2) The client drops A without unlocking it.// 3) Flag bits in the address entry are set, so the collector reclaims// the object at A.// 4) A is reallocated, and an attempt is made to lock the result.// This could be fixed by scanning light_locks in a more customized// manner that ignores the flag bits. But it can only happen with hand// generated semi-illegal .class files, and then it doesn't present a// security hole.#ifdef LOCK_DEBUG void print_he(hash_entry *he) { fprintf(stderr, "lock hash entry = %p, index = %d, address = 0x%lx\n" "\tlight_thr_id = 0x%lx, light_count = %d, " "heavy_count = %d\n\theavy_locks:", he, he - light_locks, he -> address, he -> light_thr_id, he -> light_count, he -> heavy_count); print_hl_list(he -> heavy_locks); fprintf(stderr, "\n"); }#endif /* LOCK_DEBUG */static bool mp = false; // Known multiprocesssor.// Wait for roughly 2^n units, touching as little memory as possible.static voidspin(unsigned n){ const unsigned MP_SPINS = 10; const unsigned YIELDS = 4; const unsigned SPINS_PER_UNIT = 30; const unsigned MIN_SLEEP_USECS = 2001; // Shorter times spin under Linux. const unsigned MAX_SLEEP_USECS = 200000; static unsigned spin_limit = 0; static unsigned yield_limit = YIELDS; static bool spin_initialized = false; if (!spin_initialized) { mp = is_mp(); if (mp) { spin_limit = MP_SPINS; yield_limit = MP_SPINS + YIELDS; } spin_initialized = true; } if (n < spin_limit) { unsigned i = SPINS_PER_UNIT << n; for (; i > 0; --i) __asm__ __volatile__(""); } else if (n < yield_limit) { _Jv_ThreadYield(); } else { unsigned duration = MIN_SLEEP_USECS << (n - yield_limit); if (n >= 15 + yield_limit || duration > MAX_SLEEP_USECS) duration = MAX_SLEEP_USECS; _Jv_platform_usleep(duration); }}// Wait for a hash entry to become unlocked.static voidwait_unlocked (hash_entry *he){ unsigned i = 0; while (he -> address & LOCKED) spin (i++);}// Return the heavy lock for addr if it was already allocated.// The client passes in the appropriate hash_entry.// We hold the lock for he.static inline heavy_lock *find_heavy (obj_addr_t addr, hash_entry *he){ heavy_lock *hl = he -> heavy_locks; while (hl != 0 && hl -> address != addr) hl = hl -> next; return hl;}// Unlink the heavy lock for the given address from its hash table chain.// Dies miserably and conspicuously if it's not there, since that should// be impossible.static inline voidunlink_heavy (obj_addr_t addr, hash_entry *he){ heavy_lock **currentp = &(he -> heavy_locks); while ((*currentp) -> address != addr) currentp = &((*currentp) -> next); *currentp = (*currentp) -> next;}// Finalization procedure for objects that have associated heavy-weight// locks. This may replace the real finalization procedure.static voidheavy_lock_obj_finalization_proc (void *obj, void *cd){ heavy_lock *hl = (heavy_lock *)cd;// This only addresses misalignment of statics, not heap objects. It// works only because registering statics for finalization is a noop,// no matter what the least significant bits are.#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)0x7);#else obj_addr_t addr = (obj_addr_t)obj;#endif hash_entry *he = light_locks + JV_SYNC_HASH(addr); obj_addr_t he_address = (he -> address & ~LOCKED); // Acquire lock bit immediately. It's possible that the hl was already // destroyed while we were waiting for the finalizer to run. If it // was, the address field was set to zero. The address filed access is // protected by the lock bit to ensure that we do this exactly once. // The lock bit also protects updates to the objects finalizer. while (!compare_and_swap(&(he -> address), he_address, he_address|LOCKED )) { // Hash table entry is currently locked. We can't safely // touch the list of heavy locks. wait_unlocked(he); he_address = (he -> address & ~LOCKED); } if (0 == hl -> address) { // remove_all_heavy destroyed hl, and took care of the real finalizer. release_set(&(he -> address), he_address); return; } assert(hl -> address == addr); GC_finalization_proc old_finalization_proc = hl -> old_finalization_proc; if (old_finalization_proc != 0) { // We still need to run a real finalizer. In an idealized // world, in which people write thread-safe finalizers, that is // likely to require synchronization. Thus we reregister // ourselves as the only finalizer, and simply run the real one. // Thus we don't clean up the lock yet, but we're likely to do so // on the next GC cycle. // It's OK if remove_all_heavy actually destroys the heavy lock, // since we've updated old_finalization_proc, and thus the user's // finalizer won't be rerun. void * old_client_data = hl -> old_client_data; hl -> old_finalization_proc = 0; hl -> old_client_data = 0;# ifdef HAVE_BOEHM_GC GC_REGISTER_FINALIZER_NO_ORDER(obj, heavy_lock_obj_finalization_proc, cd, 0, 0);# endif release_set(&(he -> address), he_address); old_finalization_proc(obj, old_client_data); } else { // The object is really dead, although it's conceivable that // some thread may still be in the process of releasing the // heavy lock. Unlink it and, if necessary, register a finalizer // to destroy sync_info. unlink_heavy(addr, he); hl -> address = 0; // Don't destroy it again. release_set(&(he -> address), he_address);# if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) // Make sure lock is not held and then destroy condvar and mutex. _Jv_MutexLock(&(hl->si.mutex)); _Jv_MutexUnlock(&(hl->si.mutex)); heavy_lock_finalization_proc (hl);# endif }}// We hold the lock on he, and heavy_count is 0.// Release the lock by replacing the address with new_address_val.// Remove all heavy locks on the list. Note that the only possible way// in which a lock may still be in use is if it's in the process of// being unlocked.static voidremove_all_heavy (hash_entry *he, obj_addr_t new_address_val){ assert(he -> heavy_count == 0); assert(he -> address & LOCKED); heavy_lock *hl = he -> heavy_locks; he -> heavy_locks = 0; // We would really like to release the lock bit here. Unfortunately, that // Creates a race between or finalizer removal, and the potential // reinstallation of a new finalizer as a new heavy lock is created. // This may need to be revisited. for(; 0 != hl; hl = hl->next) { obj_addr_t obj = hl -> address; assert(0 != obj); // If this was previously finalized, it should no // longer appear on our list. hl -> address = 0; // Finalization proc might still see it after we // finish. GC_finalization_proc old_finalization_proc = hl -> old_finalization_proc; void * old_client_data = hl -> old_client_data;# ifdef HAVE_BOEHM_GC // Remove our finalization procedure. // Reregister the clients if applicable. GC_REGISTER_FINALIZER_NO_ORDER((GC_PTR)obj, old_finalization_proc, old_client_data, 0, 0); // Note that our old finalization procedure may have been // previously determined to be runnable, and may still run. // FIXME - direct dependency on boehm GC.# endif# if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) // Wait for a possible lock holder to finish unlocking it. // This is only an issue if we have to explicitly destroy the mutex // or possibly if we have to destroy a condition variable that is // still being notified. _Jv_MutexLock(&(hl->si.mutex)); _Jv_MutexUnlock(&(hl->si.mutex)); heavy_lock_finalization_proc (hl);# endif } release_set(&(he -> address), new_address_val);}// We hold the lock on he and heavy_count is 0.// We release it by replacing the address field with new_address_val.// Remove all heavy locks on the list if the list is sufficiently long.// This is called periodically to avoid very long lists of heavy locks.// This seems to otherwise become an issue with SPECjbb, for example.static inline voidmaybe_remove_all_heavy (hash_entry *he, obj_addr_t new_address_val){ static const int max_len = 5; heavy_lock *hl = he -> heavy_locks; for (int i = 0; i < max_len; ++i) { if (0 == hl) { release_set(&(he -> address), new_address_val); return; } hl = hl -> next; } remove_all_heavy(he, new_address_val);}// Allocate a new heavy lock for addr, returning its address.// Assumes we already have the hash_entry locked, and there// is currently no lightweight or allocated lock for addr.// We register a finalizer for addr, which is responsible for// removing the heavy lock when addr goes away, in addition// to the responsibilities of any prior finalizer.// This unfortunately holds the lock bit for the hash entry while it// allocates two objects (on for the finalizer).// It would be nice to avoid that somehow ...static heavy_lock *alloc_heavy(obj_addr_t addr, hash_entry *he){ heavy_lock * hl = (heavy_lock *) _Jv_AllocTraceTwo(sizeof (heavy_lock)); hl -> address = addr; _Jv_MutexInit (&(hl -> si.mutex)); _Jv_CondInit (&(hl -> si.condition));# if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) hl->si.init = true; // needed ?# endif hl -> next = he -> heavy_locks; he -> heavy_locks = hl; // FIXME: The only call that cheats and goes directly to the GC interface.# ifdef HAVE_BOEHM_GC GC_REGISTER_FINALIZER_NO_ORDER( (void *)addr, heavy_lock_obj_finalization_proc, hl, &hl->old_finalization_proc, &hl->old_client_data);# endif /* HAVE_BOEHM_GC */ return hl;}// Return the heavy lock for addr, allocating if necessary.// Assumes we have the cache entry locked, and there is no lightweight// lock for addr.static heavy_lock *get_heavy(obj_addr_t addr, hash_entry *he){ heavy_lock *hl = find_heavy(addr, he); if (0 == hl) hl = alloc_heavy(addr, he); return hl;}void_Jv_MonitorEnter (jobject obj){#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);#else obj_addr_t addr = (obj_addr_t)obj;#endif obj_addr_t address; unsigned hash = JV_SYNC_HASH(addr); hash_entry * he = light_locks + hash; _Jv_ThreadId_t self = _Jv_ThreadSelf(); unsigned count; const unsigned N_SPINS = 18; // We need to somehow check that addr is not NULL on the fast path. // A very predictable // branch on a register value is probably cheaper than dereferencing addr. // We could also permanently lock the NULL entry in the hash table. // But it's not clear that's cheaper either. if (__builtin_expect(!addr, false)) throw new java::lang::NullPointerException; assert(!(addr & FLAGS));retry: if (__builtin_expect(compare_and_swap(&(he -> address), 0, addr),true)) { assert(he -> light_thr_id == INVALID_THREAD_ID); assert(he -> light_count == 0); he -> light_thr_id = self; // Count fields are set correctly. Heavy_count was also zero, // but can change asynchronously. // This path is hopefully both fast and the most common. return; } address = he -> address; if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr) { if (he -> light_thr_id == self) { // We hold the lightweight lock, and it's for the right // address. count = he -> light_count; if (count == USHRT_MAX) { // I think most JVMs don't check for this. // But I'm not convinced I couldn't turn this into a security // hole, even with a 32 bit counter. throw new java::lang::IllegalMonitorStateException( JvNewStringLatin1("maximum monitor nesting level exceeded")); } he -> light_count = count + 1; return; } else { // Lightweight lock is held, but by somone else. // Spin a few times. This avoids turning this into a heavyweight // lock if the current holder is about to release it. for (unsigned int i = 0; i < N_SPINS; ++i) { if ((he -> address & ~LOCKED) != (address & ~LOCKED)) goto retry; spin(i); } address &= ~LOCKED; if (!compare_and_swap(&(he -> address), address, address | LOCKED )) { wait_unlocked(he); goto retry; } heavy_lock *hl = get_heavy(addr, he); ++ (he -> heavy_count); // The hl lock acquisition can't block for long, since it can // only be held by other threads waiting for conversion, and // they, like us, drop it quickly without blocking. _Jv_MutexLock(&(hl->si.mutex)); assert(he -> address == address | LOCKED ); release_set(&(he -> address), (address | REQUEST_CONVERSION | HEAVY)); // release lock on he while ((he -> address & ~FLAGS) == (address & ~FLAGS)) { // Once converted, the lock has to retain heavyweight // status, since heavy_count > 0 . _Jv_CondWait (&(hl->si.condition), &(hl->si.mutex), 0, 0); } keep_live(addr); // Guarantee that hl doesn't get unlinked by finalizer. // This is only an issue if the client fails to release // the lock, which is unlikely. assert(he -> address & HEAVY); // Lock has been converted, we hold the heavyweight lock, // heavy_count has been incremented. return; } } obj_addr_t was_heavy = (address & HEAVY);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -