📄 spinlock.c
字号:
#if defined HAS_COMPARE_AND_SWAP long oldstatus, newstatus;#endif struct wait_node wait_node;#if defined TEST_FOR_COMPARE_AND_SWAP if (!__pthread_has_cas)#endif#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP { int suspend_needed = 0; __pthread_acquire(&lock->__spinlock); if (lock->__status == 0) lock->__status = 1; else { if (self == NULL) self = thread_self(); wait_node.abandoned = 0; wait_node.next = (struct wait_node *) lock->__status; wait_node.thr = self; lock->__status = (long) &wait_node; suspend_needed = 1; } __pthread_release(&lock->__spinlock); if (suspend_needed) suspend (self); return; }#endif#if defined HAS_COMPARE_AND_SWAP do { oldstatus = lock->__status; if (oldstatus == 0) { newstatus = 1; } else { if (self == NULL) self = thread_self(); wait_node.thr = self; newstatus = (long) &wait_node; } wait_node.abandoned = 0; wait_node.next = (struct wait_node *) oldstatus; /* Make sure the store in wait_node.next completes before performing the compare-and-swap */ MEMORY_BARRIER(); } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus)); /* Suspend. Note that unlike in __pthread_lock, we don't worry here about spurious wakeup. That's because this lock is not used in situations where that can happen; the restart can only come from the previous lock owner. */ if (oldstatus != 0) suspend(self); READ_MEMORY_BARRIER();#endif}/* Timed-out lock operation; returns 0 to indicate timeout. */int __pthread_alt_timedlock(struct _pthread_fastlock * lock, pthread_descr self, const struct timespec *abstime){ long oldstatus = 0;#if defined HAS_COMPARE_AND_SWAP long newstatus;#endif struct wait_node *p_wait_node = wait_node_alloc(); /* Out of memory, just give up and do ordinary lock. */ if (p_wait_node == 0) { __pthread_alt_lock(lock, self); return 1; }#if defined TEST_FOR_COMPARE_AND_SWAP if (!__pthread_has_cas)#endif#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP { __pthread_acquire(&lock->__spinlock); if (lock->__status == 0) lock->__status = 1; else { if (self == NULL) self = thread_self(); p_wait_node->abandoned = 0; p_wait_node->next = (struct wait_node *) lock->__status; p_wait_node->thr = self; lock->__status = (long) p_wait_node; oldstatus = 1; /* force suspend */ } __pthread_release(&lock->__spinlock); goto suspend; }#endif#if defined HAS_COMPARE_AND_SWAP do { oldstatus = lock->__status; if (oldstatus == 0) { newstatus = 1; } else { if (self == NULL) self = thread_self(); p_wait_node->thr = self; newstatus = (long) p_wait_node; } p_wait_node->abandoned = 0; p_wait_node->next = (struct wait_node *) oldstatus; /* Make sure the store in wait_node.next completes before performing the compare-and-swap */ MEMORY_BARRIER(); } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));#endif#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP suspend:#endif /* If we did not get the lock, do a timed suspend. If we wake up due to a timeout, then there is a race; the old lock owner may try to remove us from the queue. This race is resolved by us and the owner doing an atomic testandset() to change the state of the wait node from 0 to 1. If we succeed, then it's a timeout and we abandon the node in the queue. If we fail, it means the owner gave us the lock. */ if (oldstatus != 0) { if (timedsuspend(self, abstime) == 0) { if (!testandset(&p_wait_node->abandoned)) return 0; /* Timeout! */ /* Eat oustanding resume from owner, otherwise wait_node_free() below will race with owner's wait_node_dequeue(). */ suspend(self); } } wait_node_free(p_wait_node); READ_MEMORY_BARRIER(); return 1; /* Got the lock! */}void __pthread_alt_unlock(struct _pthread_fastlock *lock){ struct wait_node *p_node, **pp_node, *p_max_prio, **pp_max_prio; struct wait_node ** const pp_head = (struct wait_node **) &lock->__status; int maxprio; WRITE_MEMORY_BARRIER();#if defined TEST_FOR_COMPARE_AND_SWAP if (!__pthread_has_cas)#endif#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP { __pthread_acquire(&lock->__spinlock); }#endif while (1) { /* If no threads are waiting for this lock, try to just atomically release it. */#if defined TEST_FOR_COMPARE_AND_SWAP if (!__pthread_has_cas)#endif#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP { if (lock->__status == 0 || lock->__status == 1) { lock->__status = 0; break; } }#endif#if defined TEST_FOR_COMPARE_AND_SWAP else#endif#if defined HAS_COMPARE_AND_SWAP { long oldstatus = lock->__status; if (oldstatus == 0 || oldstatus == 1) { if (__compare_and_swap_with_release_semantics (&lock->__status, oldstatus, 0)) break; else continue; } }#endif /* Process the entire queue of wait nodes. Remove all abandoned wait nodes and put them into the global free queue, and remember the one unabandoned node which refers to the thread having the highest priority. */ pp_max_prio = pp_node = pp_head; p_max_prio = p_node = *pp_head; maxprio = INT_MIN; READ_MEMORY_BARRIER(); /* Prevent access to stale data through p_node */ while (p_node != (struct wait_node *) 1) { int prio; if (p_node->abandoned) { /* Remove abandoned node. */#if defined TEST_FOR_COMPARE_AND_SWAP if (!__pthread_has_cas)#endif#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP *pp_node = p_node->next;#endif#if defined TEST_FOR_COMPARE_AND_SWAP else#endif#if defined HAS_COMPARE_AND_SWAP wait_node_dequeue(pp_head, pp_node, p_node);#endif wait_node_free(p_node); /* Note that the next assignment may take us to the beginning of the queue, to newly inserted nodes, if pp_node == pp_head. In that case we need a memory barrier to stabilize the first of these new nodes. */ p_node = *pp_node; if (pp_node == pp_head) READ_MEMORY_BARRIER(); /* No stale reads through p_node */ continue; } else if ((prio = p_node->thr->p_priority) >= maxprio) { /* Otherwise remember it if its thread has a higher or equal priority compared to that of any node seen thus far. */ maxprio = prio; pp_max_prio = pp_node; p_max_prio = p_node; } /* This canno6 jump backward in the list, so no further read barrier is needed. */ pp_node = &p_node->next; p_node = *pp_node; } /* If all threads abandoned, go back to top */ if (maxprio == INT_MIN) continue; ASSERT (p_max_prio != (struct wait_node *) 1); /* Now we want to to remove the max priority thread's wait node from the list. Before we can do this, we must atomically try to change the node's abandon state from zero to nonzero. If we succeed, that means we have the node that we will wake up. If we failed, then it means the thread timed out and abandoned the node in which case we repeat the whole unlock operation. */ if (!testandset(&p_max_prio->abandoned)) {#if defined TEST_FOR_COMPARE_AND_SWAP if (!__pthread_has_cas)#endif#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP *pp_max_prio = p_max_prio->next;#endif#if defined TEST_FOR_COMPARE_AND_SWAP else#endif#if defined HAS_COMPARE_AND_SWAP wait_node_dequeue(pp_head, pp_max_prio, p_max_prio);#endif restart(p_max_prio->thr); break; } }#if defined TEST_FOR_COMPARE_AND_SWAP if (!__pthread_has_cas)#endif#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP { __pthread_release(&lock->__spinlock); }#endif}/* Compare-and-swap emulation with a spinlock */#ifdef TEST_FOR_COMPARE_AND_SWAPint __pthread_has_cas = 0;#endif#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAPint __pthread_compare_and_swap(long * ptr, long oldval, long newval, int * spinlock){ int res; __pthread_acquire(spinlock); if (*ptr == oldval) { *ptr = newval; res = 1; } else { res = 0; } __pthread_release(spinlock); return res;}#endif/* The retry strategy is as follows: - We test and set the spinlock MAX_SPIN_COUNT times, calling sched_yield() each time. This gives ample opportunity for other threads with priority >= our priority to make progress and release the spinlock. - If a thread with priority < our priority owns the spinlock, calling sched_yield() repeatedly is useless, since we're preventing the owning thread from making progress and releasing the spinlock. So, after MAX_SPIN_LOCK attemps, we suspend the calling thread using nanosleep(). This again should give time to the owning thread for releasing the spinlock. Notice that the nanosleep() interval must not be too small, since the kernel does busy-waiting for short intervals in a realtime process (!). The smallest duration that guarantees thread suspension is currently 2ms. - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT sched_yield(), then sleeping again if needed. */static void __pthread_acquire(int * spinlock){ int cnt = 0; struct timespec tm; READ_MEMORY_BARRIER(); while (testandset(spinlock)) { if (cnt < MAX_SPIN_COUNT) { sched_yield(); cnt++; } else { tm.tv_sec = 0; tm.tv_nsec = SPIN_SLEEP_DURATION; nanosleep(&tm, NULL); cnt = 0; } }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -