_threads.h

来自「stl的源码」· C头文件 代码 · 共 688 行 · 第 1/2 页

H
688
字号
/* * Copyright (c) 1997-1999 * Silicon Graphics Computer Systems, Inc. * * Copyright (c) 1999 * Boris Fomitchev * * This material is provided "as is", with absolutely no warranty expressed * or implied. Any use is at your own risk. * * Permission to use or copy this software for any purpose is hereby granted * without fee, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */// WARNING: This is an internal header file, included by other C++// standard library headers.  You should not attempt to use this header// file directly.#ifndef _STLP_INTERNAL_THREADS_H#define _STLP_INTERNAL_THREADS_H// Supported threading models are native SGI, pthreads, uithreads// (similar to pthreads, but based on an earlier draft of the Posix// threads standard), and Win32 threads.  Uithread support by Jochen// Schlick, 1999, and Solaris threads generalized to them.#ifndef _STLP_INTERNAL_CSTDDEF#  include <stl/_cstddef.h>#endif#ifndef _STLP_INTERNAL_CSTDLIB#  include <stl/_cstdlib.h>#endif// On SUN and Mac OS X gcc, zero-initialization works just fine...#if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))#  define _STLP_MUTEX_INITIALIZER#endif/* This header defines the following atomic operation that platform should * try to support as much as possible. Atomic operation are exposed as macro * in order to easily test for their existance. They are: * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) : * increment *__ptr by 1 and returns the new value * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) : * decrement  *__ptr by 1 and returns the new value * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) : * assign __val to *__target and returns former *__target value * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) : * assign __ptr to *__target and returns former *__target value */#if defined (_STLP_THREADS)#  if defined (_STLP_SGI_THREADS)#    include <mutex.h>// Hack for SGI o32 compilers.#    if !defined(__add_and_fetch) && \        (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))#      define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)#      define __test_and_set(__l,__v)  test_and_set(__l,__v)#    endif /* o32 */#    if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))#      define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)#    else#      define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)#    endif#    define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)#    define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)typedef long __stl_atomic_t;#  elif defined (_STLP_PTHREADS)#    include <pthread.h>#    if !defined (_STLP_USE_PTHREAD_SPINLOCK)#      if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)#        define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }#      endif//HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl#      if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)#        define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default#      else#        define _STLP_PTHREAD_ATTR_DEFAULT 0#      endif#    else#      if defined (__OpenBSD__)#        include <spinlock.h>#      endif#    endif#    if defined (__GNUC__) && defined (__i386__)#      if !defined (_STLP_ATOMIC_INCREMENT)inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {  long result;  __asm__ __volatile__    ("lock; xaddl  %1, %0;"    :"=m" (*p), "=r" (result)    :"m" (*p),  "1"  (1)    :"cc");  return result + 1;}#        define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))#      endif#      if !defined (_STLP_ATOMIC_DECREMENT)inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {  long result;  __asm__ __volatile__    ("lock; xaddl  %1, %0;"    :"=m" (*p), "=r" (result)    :"m" (*p),  "1"  (-1)    :"cc");  return result - 1;}#        define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))#      endiftypedef long __stl_atomic_t;#    elsetypedef size_t __stl_atomic_t;#    endif /* if defined(__GNUC__) && defined(__i386__) */#  elif defined (_STLP_WIN32THREADS)#    if !defined (_STLP_ATOMIC_INCREMENT)#      if !defined (_STLP_NEW_PLATFORM_SDK)#        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__CONST_CAST(long*, __x))#        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__CONST_CAST(long*, __x))#        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__CONST_CAST(long*, __x), __y)#      else#        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__x)#        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__x)#        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__x, __y)#      endif#      define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y)     STLPInterlockedExchangePointer(__x, __y)#    endiftypedef long __stl_atomic_t;#  elif defined (__DECC) || defined (__DECCXX)#    include <machine/builtins.h>#    define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG#    define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)#    define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)typedef long __stl_atomic_t;#  elif defined (_STLP_SPARC_SOLARIS_THREADS)typedef long __stl_atomic_t;#    include <stl/_sparc_atomic.h>#  elif defined (_STLP_UITHREADS)// this inclusion is potential hazard to bring up all sorts// of old-style headers. Let's assume vendor already know how// to deal with that.#    ifndef _STLP_INTERNAL_CTIME#      include <stl/_ctime.h>#    endif#    if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)using _STLP_VENDOR_CSTD::time_t;#    endif#    include <synch.h>#    ifndef _STLP_INTERNAL_CSTDIO#      include <stl/_cstdio.h>#    endif#    ifndef _STLP_INTERNAL_CWCHAR#      include <stl/_cwchar.h>#    endiftypedef size_t __stl_atomic_t;#  elif defined (_STLP_BETHREADS)#    include <OS.h>#    include <cassert>#    include <stdio.h>#    define _STLP_MUTEX_INITIALIZER = { 0 }typedef size_t __stl_atomic_t;#  elif defined (_STLP_NWTHREADS)#    include <nwthread.h>#    include <nwsemaph.h>typedef size_t __stl_atomic_t;#  elif defined(_STLP_OS2THREADS)#    if defined (__GNUC__)#      define INCL_DOSSEMAPHORES#      include <os2.h>#    else// This section serves to replace os2.h for VisualAge C++  typedef unsigned long ULONG;#      if !defined (__HEV__)  /* INCL_SEMAPHORE may also define HEV */#        define __HEV__  typedef ULONG HEV;  typedef HEV*  PHEV;#      endif  typedef ULONG APIRET;  typedef ULONG HMTX;  typedef HMTX*  PHMTX;  typedef const char*  PCSZ;  typedef ULONG BOOL32;  APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);  APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);  APIRET _System DosReleaseMutexSem(HMTX hmtx);  APIRET _System DosCloseMutexSem(HMTX hmtx);#      define _STLP_MUTEX_INITIALIZER = { 0 }#    endif /* GNUC */typedef size_t __stl_atomic_t;#  elsetypedef size_t __stl_atomic_t;#  endif#else/* no threads */#  define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)#  define _STLP_ATOMIC_DECREMENT(__x) --(*__x)/* We do not grant other atomic operations as they are useless if STLport do not have * to be thread safe */typedef size_t __stl_atomic_t;#endif#if !defined (_STLP_MUTEX_INITIALIZER)#  if defined(_STLP_ATOMIC_EXCHANGE)#    define _STLP_MUTEX_INITIALIZER = { 0 }#  elif defined(_STLP_UITHREADS)#    define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }#  else#    define _STLP_MUTEX_INITIALIZER#  endif#endif_STLP_BEGIN_NAMESPACE#if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)// Helper struct.  This is a workaround for various compilers that don't// handle static variables in inline functions properly.template <int __inst>struct _STLP_mutex_spin {  enum { __low_max = 30, __high_max = 1000 };  // Low if we suspect uniprocessor, high for multiprocessor.  static unsigned __max;  static unsigned __last;  static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);  static void _STLP_CALL _S_nsec_sleep(int __log_nsec, unsigned int& __iteration);};#endif // !_STLP_USE_PTHREAD_SPINLOCK// Locking class.  Note that this class *does not have a constructor*.// It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,// or dynamically, by explicitly calling the _M_initialize member function.// (This is similar to the ways that a pthreads mutex can be initialized.)// There are explicit member functions for acquiring and releasing the lock.// There is no constructor because static initialization is essential for// some uses, and only a class aggregate (see section 8.5.1 of the C++// standard) can be initialized that way.  That means we must have no// constructors, no base classes, no virtual functions, and no private or// protected members.// For non-static cases, clients should use  _STLP_mutex.struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {#if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)  // It should be relatively easy to get this to work on any modern Unix.  volatile __stl_atomic_t _M_lock;#endif#if defined (_STLP_THREADS)#  if defined (_STLP_ATOMIC_EXCHANGE)  inline void _M_initialize() { _M_lock = 0; }  inline void _M_destroy() {}  void _M_acquire_lock() {    _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);  }  inline void _M_release_lock() {    volatile __stl_atomic_t* __lock = &_M_lock;#    if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3    asm("sync");    *__lock = 0;#    elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \         (defined (_ABIN32) || defined(_ABI64))    __lock_release(__lock);#    elif defined (_STLP_SPARC_SOLARIS_THREADS)#      if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)    asm("membar #StoreStore ; membar #LoadStore");#      else    asm(" stbar ");#      endif    *__lock = 0;#    else    *__lock = 0;    // This is not sufficient on many multiprocessors, since    // writes to protected variables and the lock may be reordered.#    endif  }#  elif defined (_STLP_PTHREADS)#    if defined (_STLP_USE_PTHREAD_SPINLOCK)#      if !defined (__OpenBSD__)  pthread_spinlock_t _M_lock;  inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }  inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }  // sorry, but no static initializer for pthread_spinlock_t;  // this will not work for compilers that has problems with call  // constructor of static object...  // _STLP_mutex_base()  //   { pthread_spin_init( &_M_lock, 0 ); }  // ~_STLP_mutex_base()  //   { pthread_spin_destroy( &_M_lock ); }  inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }  inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }#      else // __OpenBSD__  spinlock_t _M_lock;  inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }  inline void _M_destroy() { }  inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }  inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }#      endif // __OpenBSD__#    else // !_STLP_USE_PTHREAD_SPINLOCK  pthread_mutex_t _M_lock;  inline void _M_initialize()  { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }  inline void _M_destroy()  { pthread_mutex_destroy(&_M_lock); }  inline void _M_acquire_lock() {#      if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?