📄 jslock.c
字号:
* If scope->u.link is non-null, scope has already been inserted on * the rt->scopeSharingTodo list, because another thread's context * already wanted to lock scope while ownercx was running a request. * We can't claim any scope whose u.link is non-null at this point, * even if ownercx->requestDepth is 0 (see below where we suspend our * request before waiting on rt->scopeSharingDone). */ if (!scope->u.link && (!js_ValidContextPointer(rt, ownercx) || !ownercx->requestDepth || ownercx->thread == cx->thread)) { JS_ASSERT(scope->u.count == 0); scope->ownercx = cx; JS_UNLOCK_GC(rt); JS_RUNTIME_METER(rt, claimedScopes); return JS_TRUE; } /* * Avoid deadlock if scope's owner context is waiting on a scope that * we own, by revoking scope's ownership. This approach to deadlock * avoidance works because the engine never nests scope locks, except * for the notable case of js_SetProtoOrParent (see jsobj.c). * * If cx could hold locks on ownercx->scopeToShare, or if ownercx * could hold locks on scope, we would need to keep reentrancy counts * for all such "flyweight" (ownercx != NULL) locks, so that control * would unwind properly once these locks became "thin" or "fat". * Apart from the js_SetProtoOrParent exception, the engine promotes * a scope from exclusive to shared access only when locking, never * when holding or unlocking. * * If ownercx's thread is calling js_SetProtoOrParent, trying to lock * the inner scope (the scope of the object being set as the prototype * of the outer object), ShareScope will find the outer object's scope * at rt->setSlotScope. If it's the same as scope, we give it a lock * held by ownercx's thread with reentrancy count of 1, then we return * here and break. After that we unwind to js_[GS]etSlotThreadSafe or * js_LockScope (our caller), where we wait on the newly-fattened lock * until ownercx's thread unwinds from js_SetProtoOrParent. * * Avoid deadlock before any of this scope/context cycle detection if * cx is on the active GC's thread, because in that case, no requests * will run until the GC completes. Any scope wanted by the GC (from * a finalizer) that can't be claimed must be slated for sharing. */ if (rt->gcThread == cx->thread || (ownercx->scopeToShare && WillDeadlock(ownercx->scopeToShare, cx))) { ShareScope(rt, scope); break; } /* * Thanks to the non-zero NO_SCOPE_SHARING_TODO link terminator, we * can decide whether scope is on rt->scopeSharingTodo with a single * non-null test, and avoid double-insertion bugs. */ if (!scope->u.link) { scope->u.link = rt->scopeSharingTodo; rt->scopeSharingTodo = scope; js_HoldObjectMap(cx, &scope->map); } /* * Inline JS_SuspendRequest before we wait on rt->scopeSharingDone, * saving and clearing cx->requestDepth so we don't deadlock if the * GC needs to run on ownercx. * * Unlike JS_SuspendRequest and JS_EndRequest, we must take care not * to decrement rt->requestCount if cx is active on the GC's thread, * because the GC has already reduced rt->requestCount to exclude all * such such contexts. */ saveDepth = cx->requestDepth; if (saveDepth) { cx->requestDepth = 0; if (rt->gcThread != cx->thread) { JS_ASSERT(rt->requestCount > 0); rt->requestCount--; if (rt->requestCount == 0) JS_NOTIFY_REQUEST_DONE(rt); } } /* * We know that some other thread's context owns scope, which is now * linked onto rt->scopeSharingTodo, awaiting the end of that other * thread's request. So it is safe to wait on rt->scopeSharingDone. */ cx->scopeToShare = scope; stat = PR_WaitCondVar(rt->scopeSharingDone, PR_INTERVAL_NO_TIMEOUT); JS_ASSERT(stat != PR_FAILURE); /* * Inline JS_ResumeRequest after waiting on rt->scopeSharingDone, * restoring cx->requestDepth. Same note as above for the inlined, * specialized JS_SuspendRequest code: beware rt->gcThread. */ if (saveDepth) { if (rt->gcThread != cx->thread) { while (rt->gcLevel > 0) JS_AWAIT_GC_DONE(rt); rt->requestCount++; } cx->requestDepth = saveDepth; } /* * Don't clear cx->scopeToShare until after we're through waiting on * all condition variables protected by rt->gcLock -- that includes * rt->scopeSharingDone *and* rt->gcDone (hidden in JS_AWAIT_GC_DONE, * in the inlined JS_ResumeRequest code immediately above). * * Otherwise, the GC could easily deadlock with another thread that * owns a scope wanted by a finalizer. By keeping cx->scopeToShare * set till here, we ensure that such deadlocks are detected, which * results in the finalized object's scope being shared (it must, of * course, have other, live objects sharing it). */ cx->scopeToShare = NULL; } JS_UNLOCK_GC(rt); return JS_FALSE;}/* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */JS_FRIEND_API(jsval)js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot){ jsval v; JSScope *scope;#ifndef NSPR_LOCK JSThinLock *tl; jsword me;#endif /* * We handle non-native objects via JSObjectOps.getRequiredSlot, treating * all slots starting from 0 as required slots. A property definition or * some prior arrangement must have allocated slot. * * Note once again (see jspubtd.h, before JSGetRequiredSlotOp's typedef) * the crucial distinction between a |required slot number| that's passed * to the get/setRequiredSlot JSObjectOps, and a |reserved slot index| * passed to the JS_Get/SetReservedSlot APIs. */ if (!OBJ_IS_NATIVE(obj)) return OBJ_GET_REQUIRED_SLOT(cx, obj, slot); /* * Native object locking is inlined here to optimize the single-threaded * and contention-free multi-threaded cases. */ scope = OBJ_SCOPE(obj); JS_ASSERT(scope->ownercx != cx); JS_ASSERT(obj->slots && slot < obj->map->freeslot); /* * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h). * Also avoid locking an object owning a sealed scope. If neither of those * special cases applies, try to claim scope's flyweight lock from whatever * context may have had it in an earlier request. */ if (CX_THREAD_IS_RUNNING_GC(cx) || (SCOPE_IS_SEALED(scope) && scope->object == obj) || (scope->ownercx && ClaimScope(scope, cx))) { return obj->slots[slot]; }#ifndef NSPR_LOCK tl = &scope->lock; me = CX_THINLOCK_ID(cx); JS_ASSERT(CURRENT_THREAD_IS_ME(me)); if (js_CompareAndSwap(&tl->owner, 0, me)) { /* * Got the lock with one compare-and-swap. Even so, someone else may * have mutated obj so it now has its own scope and lock, which would * require either a restart from the top of this routine, or a thin * lock release followed by fat lock acquisition. */ if (scope == OBJ_SCOPE(obj)) { v = obj->slots[slot]; if (!js_CompareAndSwap(&tl->owner, me, 0)) { /* Assert that scope locks never revert to flyweight. */ JS_ASSERT(scope->ownercx != cx); LOGIT(scope, '1'); scope->u.count = 1; js_UnlockObj(cx, obj); } return v; } if (!js_CompareAndSwap(&tl->owner, me, 0)) js_Dequeue(tl); } else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) { return obj->slots[slot]; }#endif js_LockObj(cx, obj); v = obj->slots[slot]; /* * Test whether cx took ownership of obj's scope during js_LockObj. * * This does not mean that a given scope reverted to flyweight from "thin" * or "fat" -- it does mean that obj's map pointer changed due to another * thread setting a property, requiring obj to cease sharing a prototype * object's scope (whose lock was not flyweight, else we wouldn't be here * in the first place!). */ scope = OBJ_SCOPE(obj); if (scope->ownercx != cx) js_UnlockScope(cx, scope); return v;}voidjs_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v){ JSScope *scope;#ifndef NSPR_LOCK JSThinLock *tl; jsword me;#endif /* Any string stored in a thread-safe object must be immutable. */ if (JSVAL_IS_STRING(v)) MAKE_STRING_IMMUTABLE(cx->runtime, v, &v); /* * We handle non-native objects via JSObjectOps.setRequiredSlot, as above * for the Get case. */ if (!OBJ_IS_NATIVE(obj)) { OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v); return; } /* * Native object locking is inlined here to optimize the single-threaded * and contention-free multi-threaded cases. */ scope = OBJ_SCOPE(obj); JS_ASSERT(scope->ownercx != cx); JS_ASSERT(obj->slots && slot < obj->map->freeslot); /* * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h). * Also avoid locking an object owning a sealed scope. If neither of those * special cases applies, try to claim scope's flyweight lock from whatever * context may have had it in an earlier request. */ if (CX_THREAD_IS_RUNNING_GC(cx) || (SCOPE_IS_SEALED(scope) && scope->object == obj) || (scope->ownercx && ClaimScope(scope, cx))) { obj->slots[slot] = v; return; }#ifndef NSPR_LOCK tl = &scope->lock; me = CX_THINLOCK_ID(cx); JS_ASSERT(CURRENT_THREAD_IS_ME(me)); if (js_CompareAndSwap(&tl->owner, 0, me)) { if (scope == OBJ_SCOPE(obj)) { obj->slots[slot] = v; if (!js_CompareAndSwap(&tl->owner, me, 0)) { /* Assert that scope locks never revert to flyweight. */ JS_ASSERT(scope->ownercx != cx); LOGIT(scope, '1'); scope->u.count = 1; js_UnlockObj(cx, obj); } return; } if (!js_CompareAndSwap(&tl->owner, me, 0)) js_Dequeue(tl); } else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) { obj->slots[slot] = v; return; }#endif js_LockObj(cx, obj); obj->slots[slot] = v; /* * Same drill as above, in js_GetSlotThreadSafe. Note that we cannot * assume obj has its own mutable scope (where scope->object == obj) yet, * because OBJ_SET_SLOT is called for the "universal", common slots such * as JSSLOT_PROTO and JSSLOT_PARENT, without a prior js_GetMutableScope. * See also the JSPROP_SHARED attribute and its usage. */ scope = OBJ_SCOPE(obj); if (scope->ownercx != cx) js_UnlockScope(cx, scope);}#ifndef NSPR_LOCKstatic JSFatLock *NewFatlock(){ JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */ if (!fl) return NULL; fl->susp = 0; fl->next = NULL; fl->prevp = NULL; fl->slock = PR_NewLock(); fl->svar = PR_NewCondVar(fl->slock); return fl;}static voidDestroyFatlock(JSFatLock *fl){ PR_DestroyLock(fl->slock); PR_DestroyCondVar(fl->svar); free(fl);}static JSFatLock *ListOfFatlocks(int listc){ JSFatLock *m; JSFatLock *m0; int i; JS_ASSERT(listc>0); m0 = m = NewFatlock(); for (i=1; i<listc; i++) { m->next = NewFatlock(); m = m->next; } return m0;}static voidDeleteListOfFatlocks(JSFatLock *m){ JSFatLock *m0; for (; m; m=m0) { m0 = m->next; DestroyFatlock(m); }}static JSFatLockTable *fl_list_table = NULL;static uint32 fl_list_table_len = 0;static uint32 fl_list_chunk_len = 0;static JSFatLock *GetFatlock(void *id){ JSFatLock *m; uint32 i = GLOBAL_LOCK_INDEX(id); if (fl_list_table[i].free == NULL) {#ifdef DEBUG if (fl_list_table[i].taken) printf("Ran out of fat locks!\n");#endif fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len); } m = fl_list_table[i].free; fl_list_table[i].free = m->next; m->susp = 0; m->next = fl_list_table[i].taken; m->prevp = &fl_list_table[i].taken; if (fl_list_table[i].taken) fl_list_table[i].taken->prevp = &m->next; fl_list_table[i].taken = m; return m;}static voidPutFatlock(JSFatLock *m, void *id){ uint32 i; if (m == NULL) return; /* Unlink m from fl_list_table[i].taken. */ *m->prevp = m->next; if (m->next) m->next->prevp = m->prevp; /* Insert m in fl_list_table[i].free. */ i = GLOBAL_LOCK_INDEX(id); m->next = fl_list_table[i].free; fl_list_table[i].free = m;}#endif /* !NSPR_LOCK */JSBooljs_SetupLocks(int listc, int globc){#ifndef NSPR_LOCK uint32 i; if (global_locks) return JS_TRUE;#ifdef DEBUG if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */ printf("Bad number %d in js_SetupLocks()!\n", listc); if (globc > 100 || globc < 0) /* globc == number of global locks */ printf("Bad number %d in js_SetupLocks()!\n", listc);#endif global_locks_log2 = JS_CeilingLog2(globc); global_locks_mask = JS_BITMASK(global_locks_log2); global_lock_count = JS_BIT(global_locks_log2); global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*)); if (!global_locks) return JS_FALSE; for (i = 0; i < global_lock_count; i++) { global_locks[i] = PR_NewLock(); if (!global_locks[i]) { global_lock_count = i; js_CleanupLocks(); return JS_FALSE; } } fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable)); if (!fl_list_table) { js_CleanupLocks(); return JS_FALSE; } fl_list_table_len = global_lock_count; for (i = 0; i < global_lock_count; i++) fl_list_table[i].free = fl_list_table[i].taken = NULL;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -