stl_threads.h

来自「ARM Linux Tool 各种代码包括MTD」· C头文件 代码 · 共 513 行 · 第 1/2 页

H
513
字号
/* * Copyright (c) 1997-1999 * Silicon Graphics Computer Systems, Inc. * * Permission to use, copy, modify, distribute and sell this software * and its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appear in all copies and * that both that copyright notice and this permission notice appear * in supporting documentation.  Silicon Graphics makes no * representations about the suitability of this software for any * purpose.  It is provided "as is" without express or implied warranty. */// WARNING: This is an internal header file, included by other C++// standard library headers.  You should not attempt to use this header// file directly.// Stl_config.h should be included before this file.#ifndef __SGI_STL_INTERNAL_THREADS_H#define __SGI_STL_INTERNAL_THREADS_H// Supported threading models are native SGI, pthreads, uithreads// (similar to pthreads, but based on an earlier draft of the Posix// threads standard), and Win32 threads.  Uithread support by Jochen// Schlick, 1999.// GCC extension begin// In order to present a stable threading configuration, in all cases,// gcc looks for it's own abstraction layer before all others.  All// modifications to this file are marked to allow easier importation of// STL upgrades.#if defined(__STL_GTHREADS)#include "bits/gthr.h"#else// GCC extension end#if defined(__STL_SGI_THREADS)#include <mutex.h>#include <time.h>#elif defined(__STL_PTHREADS)#include <pthread.h>#elif defined(__STL_UITHREADS)#include <thread.h>#include <synch.h>#elif defined(__STL_WIN32THREADS)#include <windows.h>#endif// GCC extension begin#endif// GCC extension endnamespace std{// Class _Refcount_Base provides a type, _RC_t, a data member,// _M_ref_count, and member functions _M_incr and _M_decr, which perform// atomic preincrement/predecrement.  The constructor initializes // _M_ref_count.// Hack for SGI o32 compilers.#if defined(__STL_SGI_THREADS) && !defined(__add_and_fetch) && \    (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))#  define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)  #  define __test_and_set(__l,__v)  test_and_set(__l,__v)#endif /* o32 */struct _Refcount_Base{  // The type _RC_t# ifdef __STL_WIN32THREADS  typedef long _RC_t;# else  typedef size_t _RC_t;#endif    // The data member _M_ref_count   volatile _RC_t _M_ref_count;  // Constructor// GCC extension begin#ifdef __STL_GTHREADS  __gthread_mutex_t _M_ref_count_lock;  _Refcount_Base(_RC_t __n) : _M_ref_count(__n)    {#ifdef __GTHREAD_MUTEX_INIT      __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;      _M_ref_count_lock = __tmp;#elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)      __GTHREAD_MUTEX_INIT_FUNCTION (&_M_ref_count_lock);#else#error __GTHREAD_MUTEX_INIT or __GTHREAD_MUTEX_INIT_FUNCTION should be defined by gthr.h abstraction layer, report problem to libstdc++@gcc.gnu.org.#endif    }#else// GCC extension end# ifdef __STL_PTHREADS  pthread_mutex_t _M_ref_count_lock;  _Refcount_Base(_RC_t __n) : _M_ref_count(__n)    { pthread_mutex_init(&_M_ref_count_lock, 0); }# elif defined(__STL_UITHREADS)  mutex_t         _M_ref_count_lock;  _Refcount_Base(_RC_t __n) : _M_ref_count(__n)    { mutex_init(&_M_ref_count_lock, USYNC_THREAD, 0); }# else  _Refcount_Base(_RC_t __n) : _M_ref_count(__n) {}# endif// GCC extension begin#endif// GCC extension end// GCC extension begin#ifdef __STL_GTHREADS  void _M_incr() {    __gthread_mutex_lock(&_M_ref_count_lock);    ++_M_ref_count;    __gthread_mutex_unlock(&_M_ref_count_lock);  }  _RC_t _M_decr() {    __gthread_mutex_lock(&_M_ref_count_lock);    volatile _RC_t __tmp = --_M_ref_count;    __gthread_mutex_unlock(&_M_ref_count_lock);    return __tmp;  }#else// GCC extension end  // _M_incr and _M_decr# ifdef __STL_SGI_THREADS  void _M_incr() {  __add_and_fetch(&_M_ref_count, 1); }  _RC_t _M_decr() { return __add_and_fetch(&_M_ref_count, (size_t) -1); }# elif defined (__STL_WIN32THREADS)   void _M_incr() { InterlockedIncrement((_RC_t*)&_M_ref_count); }  _RC_t _M_decr() { return InterlockedDecrement((_RC_t*)&_M_ref_count); }# elif defined(__STL_PTHREADS)  void _M_incr() {    pthread_mutex_lock(&_M_ref_count_lock);    ++_M_ref_count;    pthread_mutex_unlock(&_M_ref_count_lock);  }  _RC_t _M_decr() {    pthread_mutex_lock(&_M_ref_count_lock);    volatile _RC_t __tmp = --_M_ref_count;    pthread_mutex_unlock(&_M_ref_count_lock);    return __tmp;  }# elif defined(__STL_UITHREADS)  void _M_incr() {    mutex_lock(&_M_ref_count_lock);    ++_M_ref_count;    mutex_unlock(&_M_ref_count_lock);  }  _RC_t _M_decr() {    mutex_lock(&_M_ref_count_lock);    /*volatile*/ _RC_t __tmp = --_M_ref_count;    mutex_unlock(&_M_ref_count_lock);    return __tmp;  }# else  /* No threads */  void _M_incr() { ++_M_ref_count; }  _RC_t _M_decr() { return --_M_ref_count; }# endif// GCC extension begin#endif// GCC extension end};// Atomic swap on unsigned long// This is guaranteed to behave as though it were atomic only if all// possibly concurrent updates use _Atomic_swap.// In some cases the operation is emulated with a lock.// GCC extension begin#ifdef __STL_GTHREADS// We don't provide an _Atomic_swap in this configuration.  This only// affects the use of ext/rope with threads.  Someone could add this// later, if required.  You can start by cloning the __STL_PTHREADS// path while making the obvious changes.  Later it could be optimized// to use the atomicity.h abstraction layer from libstdc++-v3.#else// GCC extension end# ifdef __STL_SGI_THREADS    inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {#       if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))            return test_and_set(__p, __q);#       else            return __test_and_set(__p, (unsigned long)__q);#       endif    }# elif defined(__STL_WIN32THREADS)    inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {        return (unsigned long) InterlockedExchange((LPLONG)__p, (LONG)__q);    }# elif defined(__STL_PTHREADS)    // We use a template here only to get a unique initialized instance.    template<int __dummy>    struct _Swap_lock_struct {        static pthread_mutex_t _S_swap_lock;    };    template<int __dummy>    pthread_mutex_t    _Swap_lock_struct<__dummy>::_S_swap_lock = PTHREAD_MUTEX_INITIALIZER;    // This should be portable, but performance is expected    // to be quite awful.  This really needs platform specific    // code.    inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {        pthread_mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);        unsigned long __result = *__p;        *__p = __q;        pthread_mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);        return __result;    }# elif defined(__STL_UITHREADS)    // We use a template here only to get a unique initialized instance.    template<int __dummy>    struct _Swap_lock_struct {        static mutex_t _S_swap_lock;    };    template<int __dummy>    mutex_t    _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;    // This should be portable, but performance is expected    // to be quite awful.  This really needs platform specific    // code.    inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {        mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);        unsigned long __result = *__p;        *__p = __q;        mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);        return __result;    }# elif defined (__STL_SOLARIS_THREADS)    // any better solutions ?    // We use a template here only to get a unique initialized instance.    template<int __dummy>    struct _Swap_lock_struct {        static mutex_t _S_swap_lock;    };# if ( __STL_STATIC_TEMPLATE_DATA > 0 )    template<int __dummy>    mutex_t    _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;#  else    __DECLARE_INSTANCE(mutex_t, _Swap_lock_struct<__dummy>::_S_swap_lock,                        =DEFAULTMUTEX);# endif /* ( __STL_STATIC_TEMPLATE_DATA > 0 ) */    // This should be portable, but performance is expected    // to be quite awful.  This really needs platform specific    // code.    inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {        mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);        unsigned long __result = *__p;        *__p = __q;        mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);        return __result;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?