📄 natobject.cc
字号:
heavy_lock_finalization_proc (hl);# endif }}// We hold the lock on he, and heavy_count is 0.// Release the lock by replacing the address with new_address_val.// Remove all heavy locks on the list. Note that the only possible way// in which a lock may still be in use is if it's in the process of// being unlocked.// FIXME: Why does this unlock the hash entry? I think that// could now be done more cleanly in MonitorExit.static voidremove_all_heavy (hash_entry *he, obj_addr_t new_address_val){ JvAssert(he -> heavy_count == 0); JvAssert(he -> address & LOCKED); heavy_lock *hl = he -> heavy_locks; he -> heavy_locks = 0; // We would really like to release the lock bit here. Unfortunately, that // Creates a race between or finalizer removal, and the potential // reinstallation of a new finalizer as a new heavy lock is created. // This may need to be revisited. for(; 0 != hl; hl = hl->next) { obj_addr_t obj = hl -> address; JvAssert(0 != obj); // If this was previously finalized, it should no // longer appear on our list. hl -> address = 0; // Finalization proc might still see it after we // finish. GC_finalization_proc old_finalization_proc = hl -> old_finalization_proc; void * old_client_data = hl -> old_client_data;# ifdef HAVE_BOEHM_GC // Remove our finalization procedure. // Reregister the clients if applicable. GC_REGISTER_FINALIZER_NO_ORDER((GC_PTR)obj, old_finalization_proc, old_client_data, 0, 0); // Note that our old finalization procedure may have been // previously determined to be runnable, and may still run. // FIXME - direct dependency on boehm GC.# endif# if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) // Wait for a possible lock holder to finish unlocking it. // This is only an issue if we have to explicitly destroy the mutex // or possibly if we have to destroy a condition variable that is // still being notified. _Jv_MutexLock(&(hl->si.mutex)); _Jv_MutexUnlock(&(hl->si.mutex)); heavy_lock_finalization_proc (hl);# endif } release_set(&(he -> address), new_address_val);}// We hold the lock on he and heavy_count is 0.// We release it by replacing the address field with new_address_val.// Remove all heavy locks on the list if the list is sufficiently long.// This is called periodically to avoid very long lists of heavy locks.// This seems to otherwise become an issue with SPECjbb, for example.static inline voidmaybe_remove_all_heavy (hash_entry *he, obj_addr_t new_address_val){ static const int max_len = 5; heavy_lock *hl = he -> heavy_locks; for (int i = 0; i < max_len; ++i) { if (0 == hl) { release_set(&(he -> address), new_address_val); return; } hl = hl -> next; } remove_all_heavy(he, new_address_val);}// Allocate a new heavy lock for addr, returning its address.// Assumes we already have the hash_entry locked, and there// is currently no lightweight or allocated lock for addr.// We register a finalizer for addr, which is responsible for// removing the heavy lock when addr goes away, in addition// to the responsibilities of any prior finalizer.// This unfortunately holds the lock bit for the hash entry while it// allocates two objects (on for the finalizer).// It would be nice to avoid that somehow ...static heavy_lock *alloc_heavy(obj_addr_t addr, hash_entry *he){ heavy_lock * hl = (heavy_lock *) _Jv_AllocTraceTwo(sizeof (heavy_lock)); hl -> address = addr; _Jv_MutexInit (&(hl -> si.mutex)); _Jv_CondInit (&(hl -> si.condition));# if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) hl->si.init = true; // needed ?# endif hl -> next = he -> heavy_locks; he -> heavy_locks = hl; // FIXME: The only call that cheats and goes directly to the GC interface.# ifdef HAVE_BOEHM_GC GC_REGISTER_FINALIZER_NO_ORDER( (void *)addr, heavy_lock_obj_finalization_proc, hl, &hl->old_finalization_proc, &hl->old_client_data);# endif /* HAVE_BOEHM_GC */ return hl;}// Return the heavy lock for addr, allocating if necessary.// Assumes we have the cache entry locked, and there is no lightweight// lock for addr.static heavy_lock *get_heavy(obj_addr_t addr, hash_entry *he){ heavy_lock *hl = find_heavy(addr, he); if (0 == hl) hl = alloc_heavy(addr, he); return hl;}void_Jv_MonitorEnter (jobject obj){#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);#else obj_addr_t addr = (obj_addr_t)obj;#endif obj_addr_t address; unsigned hash = JV_SYNC_HASH(addr); hash_entry * he = light_locks + hash; _Jv_ThreadId_t self = _Jv_ThreadSelf(); unsigned count; const unsigned N_SPINS = 18; // We need to somehow check that addr is not NULL on the fast path. // A very predictable // branch on a register value is probably cheaper than dereferencing addr. // We could also permanently lock the NULL entry in the hash table. // But it's not clear that's cheaper either. if (__builtin_expect(!addr, false)) throw new java::lang::NullPointerException; JvAssert(!(addr & FLAGS));retry: if (__builtin_expect(compare_and_swap(&(he -> address), 0, addr),true)) { JvAssert(he -> light_thr_id == INVALID_THREAD_ID); JvAssert(he -> light_count == 0); he -> light_thr_id = self; // Count fields are set correctly. Heavy_count was also zero, // but can change asynchronously. // This path is hopefully both fast and the most common. LOG(ACQ_LIGHT, addr, self); return; } address = he -> address; if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr) { if (he -> light_thr_id == self) { // We hold the lightweight lock, and it's for the right // address. count = he -> light_count; if (count == USHRT_MAX) { // I think most JVMs don't check for this. // But I'm not convinced I couldn't turn this into a security // hole, even with a 32 bit counter. throw new java::lang::IllegalMonitorStateException( JvNewStringLatin1("maximum monitor nesting level exceeded")); } he -> light_count = count + 1; return; } else { JvAssert(!(address & LOCKED)); // Lightweight lock is held, but by somone else. // Spin a few times. This avoids turning this into a heavyweight // lock if the current holder is about to release it. // FIXME: Does this make sense on a uniprocessor, where // it actually yields? It's probably cheaper to convert. for (unsigned int i = 0; i < N_SPINS; ++i) { if ((he -> address & ~LOCKED) != address) goto retry; spin(i); } if (!compare_and_swap(&(he -> address), address, address | LOCKED )) { wait_unlocked(he); goto retry; } heavy_lock *hl = get_heavy(addr, he); ++ (he -> heavy_count); // The hl lock acquisition can't block for long, since it can // only be held by other threads waiting for conversion, and // they, like us, drop it quickly without blocking. _Jv_MutexLock(&(hl->si.mutex)); JvAssert(he -> address == address | LOCKED ); release_set(&(he -> address), (address | REQUEST_CONVERSION | HEAVY)); // release lock on he LOG(REQ_CONV, (address | REQUEST_CONVERSION | HEAVY), self); // If _Jv_CondWait is interrupted, we ignore the interrupt, but // restore the thread's interrupt status flag when done. jboolean interrupt_flag = false; while ((he -> address & ~FLAGS) == (address & ~FLAGS)) { // Once converted, the lock has to retain heavyweight // status, since heavy_count > 0. int r = _Jv_CondWait (&(hl->si.condition), &(hl->si.mutex), 0, 0); if (r == _JV_INTERRUPTED) { interrupt_flag = true; Thread::currentThread()->interrupt_flag = false; } } if (interrupt_flag) Thread::currentThread()->interrupt_flag = interrupt_flag; keep_live(addr); // Guarantee that hl doesn't get unlinked by finalizer. // This is only an issue if the client fails to release // the lock, which is unlikely. JvAssert(he -> address & HEAVY); // Lock has been converted, we hold the heavyweight lock, // heavy_count has been incremented. return; } } obj_addr_t was_heavy = (address & HEAVY); if ((address & LOCKED) || !compare_and_swap(&(he -> address), address, (address | LOCKED ))) { wait_unlocked(he); goto retry; } if ((address & ~(HEAVY | REQUEST_CONVERSION)) == 0) { // Either was_heavy is true, or something changed out from under us, // since the initial test for 0 failed. JvAssert(!(address & REQUEST_CONVERSION)); // Can't convert a nonexistent lightweight lock. heavy_lock *hl; hl = (was_heavy? find_heavy(addr, he) : 0); // The CAS succeeded, so was_heavy is still accurate. if (0 == hl) { // It is OK to use the lighweight lock, since either the // heavyweight lock does not exist, or none of the // heavyweight locks are currently in use. Future threads // trying to acquire the lock will see the lightweight // one first and use that. he -> light_thr_id = self; // OK, since nobody else can hold // light lock or do this at the same time. JvAssert(he -> light_count == 0); JvAssert(was_heavy == (he -> address & HEAVY)); release_set(&(he -> address), (addr | was_heavy)); LOG(ACQ_LIGHT2, addr | was_heavy, self); } else { // Must use heavy lock. ++ (he -> heavy_count); JvAssert(0 == (address & ~HEAVY)); release_set(&(he -> address), HEAVY); LOG(ACQ_HEAVY, addr | was_heavy, self); _Jv_MutexLock(&(hl->si.mutex)); keep_live(addr); } return; } // Lightweight lock is held, but does not correspond to this object. // We hold the lock on the hash entry, and he -> address can't // change from under us. Neither can the chain of heavy locks. { JvAssert(0 == he -> heavy_count || (address & HEAVY)); heavy_lock *hl = get_heavy(addr, he); ++ (he -> heavy_count); release_set(&(he -> address), address | HEAVY); LOG(ACQ_HEAVY2, address | HEAVY, self); _Jv_MutexLock(&(hl->si.mutex)); keep_live(addr); }}void_Jv_MonitorExit (jobject obj){#ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);#else obj_addr_t addr = (obj_addr_t)obj;#endif _Jv_ThreadId_t self = _Jv_ThreadSelf(); unsigned hash = JV_SYNC_HASH(addr); hash_entry * he = light_locks + hash; _Jv_ThreadId_t light_thr_id; unsigned count; obj_addr_t address;retry: light_thr_id = he -> light_thr_id; // Unfortunately, it turns out we always need to read the address // first. Even if we are going to update it with compare_and_swap, // we need to reset light_thr_id, and that's not safe unless we know // that we hold the lock. address = he -> address; // First the (relatively) fast cases: if (__builtin_expect(light_thr_id == self, true)) // Above must fail if addr == 0 . { count = he -> light_count; if (__builtin_expect((address & ~HEAVY) == addr, true)) { if (count != 0) { // We held the lightweight lock all along. Thus the values // we saw for light_thr_id and light_count must have been valid. he -> light_count = count - 1; return; } else { // We hold the lightweight lock once. he -> light_thr_id = INVALID_THREAD_ID; if (compare_and_swap_release(&(he -> address), address, address & HEAVY)) { LOG(REL_LIGHT, address & HEAVY, self); return; } else { he -> light_thr_id = light_thr_id; // Undo prior damage. goto retry; } } } // else lock is not for this address, conversion is requested, // or the lock bit in the address field is set. } else { if (__builtin_expect(!addr, false)) throw new java::lang::NullPointerException; if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr) {# ifdef LOCK_DEBUG fprintf(stderr, "Lightweight lock held by other thread\n\t" "light_thr_id = 0x%lx, self = 0x%lx, " "address = 0x%lx, heavy_count = %d, pid = %d\n", light_thr_id, self, (unsigned long)address, he -> heavy_count, getpid()); print_he(he); for(;;) {}# endif // Someone holds the lightweight lock for this object, and // it can't be us. throw new java::lang::IllegalMonitorStateException( JvNewStringLatin1("current thread not owner")); } else
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -