📄 jslock.c
字号:
fl_list_chunk_len = listc;#endif /* !NSPR_LOCK */ return JS_TRUE;}voidjs_CleanupLocks(){#ifndef NSPR_LOCK uint32 i; if (global_locks) { for (i = 0; i < global_lock_count; i++) PR_DestroyLock(global_locks[i]); free(global_locks); global_locks = NULL; global_lock_count = 1; global_locks_log2 = 0; global_locks_mask = 0; } if (fl_list_table) { for (i = 0; i < fl_list_table_len; i++) { DeleteListOfFatlocks(fl_list_table[i].free); fl_list_table[i].free = NULL; DeleteListOfFatlocks(fl_list_table[i].taken); fl_list_table[i].taken = NULL; } free(fl_list_table); fl_list_table = NULL; fl_list_table_len = 0; }#endif /* !NSPR_LOCK */}#ifndef NSPR_LOCK/* * Fast locking and unlocking is implemented by delaying the allocation of a * system lock (fat lock) until contention. As long as a locking thread A * runs uncontended, the lock is represented solely by storing A's identity in * the object being locked. * * If another thread B tries to lock the object currently locked by A, B is * enqueued into a fat lock structure (which might have to be allocated and * pointed to by the object), and suspended using NSPR conditional variables * (wait). A wait bit (Bacon bit) is set in the lock word of the object, * signalling to A that when releasing the lock, B must be dequeued and * notified. * * The basic operation of the locking primitives (js_Lock, js_Unlock, * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0) * succeeds this implies that p is uncontended (no one is waiting because the * wait bit is not set). * * When dequeueing, the lock is released, and one of the threads suspended on * the lock is notified. If other threads still are waiting, the wait bit is * kept (in js_Enqueue), and if not, the fat lock is deallocated. * * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread * are serialized using a global lock. For scalability, a hashtable of global * locks is used, which is indexed modulo the thin lock pointer. *//* * Invariants: * (i) global lock is held * (ii) fl->susp >= 0 */static intjs_SuspendThread(JSThinLock *tl){ JSFatLock *fl; PRStatus stat; if (tl->fat == NULL) fl = tl->fat = GetFatlock(tl); else fl = tl->fat; JS_ASSERT(fl->susp >= 0); fl->susp++; PR_Lock(fl->slock); js_UnlockGlobal(tl); stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT); JS_ASSERT(stat != PR_FAILURE); PR_Unlock(fl->slock); js_LockGlobal(tl); fl->susp--; if (fl->susp == 0) { PutFatlock(fl, tl); tl->fat = NULL; } return tl->fat == NULL;}/* * (i) global lock is held * (ii) fl->susp > 0 */static voidjs_ResumeThread(JSThinLock *tl){ JSFatLock *fl = tl->fat; PRStatus stat; JS_ASSERT(fl != NULL); JS_ASSERT(fl->susp > 0); PR_Lock(fl->slock); js_UnlockGlobal(tl); stat = PR_NotifyCondVar(fl->svar); JS_ASSERT(stat != PR_FAILURE); PR_Unlock(fl->slock);}static voidjs_Enqueue(JSThinLock *tl, jsword me){ jsword o, n; js_LockGlobal(tl); for (;;) { o = ReadWord(tl->owner); n = Thin_SetWait(o); if (o != 0 && js_CompareAndSwap(&tl->owner, o, n)) { if (js_SuspendThread(tl)) me = Thin_RemoveWait(me); else me = Thin_SetWait(me); } else if (js_CompareAndSwap(&tl->owner, 0, me)) { js_UnlockGlobal(tl); return; } }}static voidjs_Dequeue(JSThinLock *tl){ jsword o; js_LockGlobal(tl); o = ReadWord(tl->owner); JS_ASSERT(Thin_GetWait(o) != 0); JS_ASSERT(tl->fat != NULL); if (!js_CompareAndSwap(&tl->owner, o, 0)) /* release it */ JS_ASSERT(0); js_ResumeThread(tl);}JS_INLINE voidjs_Lock(JSThinLock *tl, jsword me){ JS_ASSERT(CURRENT_THREAD_IS_ME(me)); if (js_CompareAndSwap(&tl->owner, 0, me)) return; if (Thin_RemoveWait(ReadWord(tl->owner)) != me) js_Enqueue(tl, me);#ifdef DEBUG else JS_ASSERT(0);#endif}JS_INLINE voidjs_Unlock(JSThinLock *tl, jsword me){ JS_ASSERT(CURRENT_THREAD_IS_ME(me)); /* * Only me can hold the lock, no need to use compare and swap atomic * operation for this common case. */ if (tl->owner == me) { tl->owner = 0; return; } JS_ASSERT(Thin_GetWait(tl->owner)); if (Thin_RemoveWait(ReadWord(tl->owner)) == me) js_Dequeue(tl);#ifdef DEBUG else JS_ASSERT(0); /* unbalanced unlock */#endif}#endif /* !NSPR_LOCK */voidjs_LockRuntime(JSRuntime *rt){ PR_Lock(rt->rtLock);#ifdef DEBUG rt->rtLockOwner = js_CurrentThreadId();#endif}voidjs_UnlockRuntime(JSRuntime *rt){#ifdef DEBUG rt->rtLockOwner = 0;#endif PR_Unlock(rt->rtLock);}voidjs_LockScope(JSContext *cx, JSScope *scope){ jsword me = CX_THINLOCK_ID(cx); JS_ASSERT(CURRENT_THREAD_IS_ME(me)); JS_ASSERT(scope->ownercx != cx); if (CX_THREAD_IS_RUNNING_GC(cx)) return; if (scope->ownercx && ClaimScope(scope, cx)) return; if (Thin_RemoveWait(ReadWord(scope->lock.owner)) == me) { JS_ASSERT(scope->u.count > 0); LOGIT(scope, '+'); scope->u.count++; } else { JSThinLock *tl = &scope->lock; JS_LOCK0(tl, me); JS_ASSERT(scope->u.count == 0); LOGIT(scope, '1'); scope->u.count = 1; }}voidjs_UnlockScope(JSContext *cx, JSScope *scope){ jsword me = CX_THINLOCK_ID(cx); /* We hope compilers use me instead of reloading cx->thread in the macro. */ if (CX_THREAD_IS_RUNNING_GC(cx)) return; if (cx->lockedSealedScope == scope) { cx->lockedSealedScope = NULL; return; } /* * If scope->ownercx is not null, it's likely that two contexts not using * requests nested locks for scope. The first context, cx here, claimed * scope; the second, scope->ownercx here, re-claimed it because the first * was not in a request, or was on the same thread. We don't want to keep * track of such nesting, because it penalizes the common non-nested case. * Instead of asserting here and silently coping, we simply re-claim scope * for cx and return. * * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world * case where an asymmetric thread model (Mozilla's main thread is known * to be the only thread that runs the GC) combined with multiple contexts * per thread has led to such request-less nesting. */ if (scope->ownercx) { JS_ASSERT(scope->u.count == 0); JS_ASSERT(scope->lock.owner == 0); scope->ownercx = cx; return; } JS_ASSERT(scope->u.count > 0); if (Thin_RemoveWait(ReadWord(scope->lock.owner)) != me) { JS_ASSERT(0); /* unbalanced unlock */ return; } LOGIT(scope, '-'); if (--scope->u.count == 0) { JSThinLock *tl = &scope->lock; JS_UNLOCK0(tl, me); }}/* * NB: oldscope may be null if our caller is js_GetMutableScope and it just * dropped the last reference to oldscope. */voidjs_TransferScopeLock(JSContext *cx, JSScope *oldscope, JSScope *newscope){ jsword me; JSThinLock *tl; JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, newscope)); /* * If the last reference to oldscope went away, newscope needs no lock * state update. */ if (!oldscope) return; JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, oldscope)); /* * Special case in js_LockScope and js_UnlockScope for the GC calling * code that locks, unlocks, or mutates. Nothing to do in these cases, * because scope and newscope were "locked" by the GC thread, so neither * was actually locked. */ if (CX_THREAD_IS_RUNNING_GC(cx)) return; /* * Special case in js_LockObj and js_UnlockScope for locking the sealed * scope of an object that owns that scope (the prototype or mutated obj * for which OBJ_SCOPE(obj)->object == obj), and unlocking it. */ JS_ASSERT(cx->lockedSealedScope != newscope); if (cx->lockedSealedScope == oldscope) { JS_ASSERT(newscope->ownercx == cx || (!newscope->ownercx && newscope->u.count == 1)); cx->lockedSealedScope = NULL; return; } /* * If oldscope is single-threaded, there's nothing to do. */ if (oldscope->ownercx) { JS_ASSERT(oldscope->ownercx == cx); JS_ASSERT(newscope->ownercx == cx || (!newscope->ownercx && newscope->u.count == 1)); return; } /* * We transfer oldscope->u.count only if newscope is not single-threaded. * Flow unwinds from here through some number of JS_UNLOCK_SCOPE and/or * JS_UNLOCK_OBJ macro calls, which will decrement newscope->u.count only * if they find newscope->ownercx != cx. */ if (newscope->ownercx != cx) { JS_ASSERT(!newscope->ownercx); newscope->u.count = oldscope->u.count; } /* * Reset oldscope's lock state so that it is completely unlocked. */ LOGIT(oldscope, '0'); oldscope->u.count = 0; tl = &oldscope->lock; me = CX_THINLOCK_ID(cx); JS_UNLOCK0(tl, me);}voidjs_LockObj(JSContext *cx, JSObject *obj){ JSScope *scope; JS_ASSERT(OBJ_IS_NATIVE(obj)); /* * We must test whether the GC is calling and return without mutating any * state, especially cx->lockedSealedScope. Note asymmetry with respect to * js_UnlockObj, which is a thin-layer on top of js_UnlockScope. */ if (CX_THREAD_IS_RUNNING_GC(cx)) return; for (;;) { scope = OBJ_SCOPE(obj); if (SCOPE_IS_SEALED(scope) && scope->object == obj && !cx->lockedSealedScope) { cx->lockedSealedScope = scope; return; } js_LockScope(cx, scope); /* If obj still has this scope, we're done. */ if (scope == OBJ_SCOPE(obj)) return; /* Lost a race with a mutator; retry with obj's new scope. */ js_UnlockScope(cx, scope); }}voidjs_UnlockObj(JSContext *cx, JSObject *obj){ JS_ASSERT(OBJ_IS_NATIVE(obj)); js_UnlockScope(cx, OBJ_SCOPE(obj));}#ifdef DEBUGJSBooljs_IsRuntimeLocked(JSRuntime *rt){ return js_CurrentThreadId() == rt->rtLockOwner;}JSBooljs_IsObjLocked(JSContext *cx, JSObject *obj){ JSScope *scope = OBJ_SCOPE(obj); return MAP_IS_NATIVE(&scope->map) && js_IsScopeLocked(cx, scope);}JSBooljs_IsScopeLocked(JSContext *cx, JSScope *scope){ /* Special case: the GC locking any object's scope, see js_LockScope. */ if (CX_THREAD_IS_RUNNING_GC(cx)) return JS_TRUE; /* Special case: locked object owning a sealed scope, see js_LockObj. */ if (cx->lockedSealedScope == scope) return JS_TRUE; /* * General case: the scope is either exclusively owned (by cx), or it has * a thin or fat lock to cope with shared (concurrent) ownership. */ if (scope->ownercx) { JS_ASSERT(scope->ownercx == cx || scope->ownercx->thread == cx->thread); return JS_TRUE; } return js_CurrentThreadId() == ((JSThread *)Thin_RemoveWait(ReadWord(scope->lock.owner)))->id;}#endif /* DEBUG */#endif /* JS_THREADSAFE */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -