⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 os_core_unix.c

📁 一个开源SIP协议栈
💻 C
📖 第 1 页 / 共 3 页
字号:

    return rec;
#else
    pj_assert(!"Threading is not enabled!");
    return NULL;
#endif
}

/*
 * pj_thread_join()
 */
PJ_DEF(pj_status_t) pj_thread_join(pj_thread_t *p)
{
#if PJ_HAS_THREADS
    pj_thread_t *rec = (pj_thread_t *)p;
    void *ret;
    int result;

    PJ_CHECK_STACK();

    PJ_LOG(6, (pj_thread_this()->obj_name, "Joining thread %s", p->obj_name));
    result = pthread_join( rec->thread, &ret);

    if (result == 0)
	return PJ_SUCCESS;
    else {
	/* Calling pthread_join() on a thread that no longer exists and 
	 * getting back ESRCH isn't an error (in this context). 
	 * Thanks Phil Torre <ptorre@zetron.com>.
	 */
	return result==ESRCH ? PJ_SUCCESS : PJ_RETURN_OS_ERROR(result);
    }
#else
    PJ_CHECK_STACK();
    pj_assert(!"No multithreading support!");
    return PJ_EINVALIDOP;
#endif
}

/*
 * pj_thread_destroy()
 */
PJ_DEF(pj_status_t) pj_thread_destroy(pj_thread_t *p)
{
    PJ_CHECK_STACK();

    /* Destroy mutex used to suspend thread */
    if (p->suspended_mutex) {
	pj_mutex_destroy(p->suspended_mutex);
	p->suspended_mutex = NULL;
    }

    return PJ_SUCCESS;
}

/*
 * pj_thread_sleep()
 */
PJ_DEF(pj_status_t) pj_thread_sleep(unsigned msec)
{
/* TODO: should change this to something like PJ_OS_HAS_NANOSLEEP */
#if defined(PJ_RTEMS) && PJ_RTEMS!=0
    enum { NANOSEC_PER_MSEC = 1000000 };
    struct timespec req;

    PJ_CHECK_STACK();
    req.tv_sec = msec / 1000;
    req.tv_nsec = (msec % 1000) * NANOSEC_PER_MSEC;

    if (nanosleep(&req, NULL) == 0)
	return PJ_SUCCESS;

    return PJ_RETURN_OS_ERROR(pj_get_native_os_error());
#else
    PJ_CHECK_STACK();

    pj_set_os_error(0);

    usleep(msec * 1000);

    return pj_get_os_error();
;
#endif	/* PJ_RTEMS */
}

#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
/*
 * pj_thread_check_stack()
 * Implementation for PJ_CHECK_STACK()
 */
PJ_DEF(void) pj_thread_check_stack(const char *file, int line)
{
    char stk_ptr;
    pj_uint32_t usage;
    pj_thread_t *thread = pj_thread_this();

    /* Calculate current usage. */
    usage = (&stk_ptr > thread->stk_start) ? &stk_ptr - thread->stk_start :
		thread->stk_start - &stk_ptr;

    /* Assert if stack usage is dangerously high. */
    pj_assert("STACK OVERFLOW!! " && (usage <= thread->stk_size - 128));

    /* Keep statistic. */
    if (usage > thread->stk_max_usage) {
	thread->stk_max_usage = usage;
	thread->caller_file = file;
	thread->caller_line = line;
    }
}

/*
 * pj_thread_get_stack_max_usage()
 */
PJ_DEF(pj_uint32_t) pj_thread_get_stack_max_usage(pj_thread_t *thread)
{
    return thread->stk_max_usage;
}

/*
 * pj_thread_get_stack_info()
 */
PJ_DEF(pj_status_t) pj_thread_get_stack_info( pj_thread_t *thread,
					      const char **file,
					      int *line )
{
    pj_assert(thread);

    *file = thread->caller_file;
    *line = thread->caller_line;
    return 0;
}

#endif	/* PJ_OS_HAS_CHECK_STACK */

///////////////////////////////////////////////////////////////////////////////
/*
 * pj_atomic_create()
 */
PJ_DEF(pj_status_t) pj_atomic_create( pj_pool_t *pool, 
				      pj_atomic_value_t initial,
				      pj_atomic_t **ptr_atomic)
{
    pj_status_t rc;
    pj_atomic_t *atomic_var = pj_pool_calloc(pool, 1, sizeof(pj_atomic_t));
    PJ_ASSERT_RETURN(atomic_var, PJ_ENOMEM);
    
#if PJ_HAS_THREADS
    rc = pj_mutex_create(pool, "atm%p", PJ_MUTEX_SIMPLE, &atomic_var->mutex);
    if (rc != PJ_SUCCESS)
	return rc;
#endif
    atomic_var->value = initial;

    *ptr_atomic = atomic_var;
    return PJ_SUCCESS;
}

/*
 * pj_atomic_destroy()
 */
PJ_DEF(pj_status_t) pj_atomic_destroy( pj_atomic_t *atomic_var )
{
    PJ_ASSERT_RETURN(atomic_var, PJ_EINVAL);
#if PJ_HAS_THREADS
    return pj_mutex_destroy( atomic_var->mutex );
#else
    return 0;
#endif
}

/*
 * pj_atomic_set()
 */
PJ_DEF(void) pj_atomic_set(pj_atomic_t *atomic_var, pj_atomic_value_t value)
{
    PJ_CHECK_STACK();

#if PJ_HAS_THREADS
    pj_mutex_lock( atomic_var->mutex );
#endif
    atomic_var->value = value;
#if PJ_HAS_THREADS
    pj_mutex_unlock( atomic_var->mutex);
#endif 
}

/*
 * pj_atomic_get()
 */
PJ_DEF(pj_atomic_value_t) pj_atomic_get(pj_atomic_t *atomic_var)
{
    pj_atomic_value_t oldval;
    
    PJ_CHECK_STACK();

#if PJ_HAS_THREADS
    pj_mutex_lock( atomic_var->mutex );
#endif
    oldval = atomic_var->value;
#if PJ_HAS_THREADS
    pj_mutex_unlock( atomic_var->mutex);
#endif
    return oldval;
}

/*
 * pj_atomic_inc_and_get()
 */
PJ_DEF(pj_atomic_value_t) pj_atomic_inc_and_get(pj_atomic_t *atomic_var)
{
    pj_atomic_value_t new_value;

    PJ_CHECK_STACK();

#if PJ_HAS_THREADS
    pj_mutex_lock( atomic_var->mutex );
#endif
    new_value = ++atomic_var->value;
#if PJ_HAS_THREADS
    pj_mutex_unlock( atomic_var->mutex);
#endif

    return new_value;
}
/*
 * pj_atomic_inc()
 */
PJ_DEF(void) pj_atomic_inc(pj_atomic_t *atomic_var)
{
    pj_atomic_inc_and_get(atomic_var);
}

/*
 * pj_atomic_dec_and_get()
 */
PJ_DEF(pj_atomic_value_t) pj_atomic_dec_and_get(pj_atomic_t *atomic_var)
{
    pj_atomic_value_t new_value;

    PJ_CHECK_STACK();

#if PJ_HAS_THREADS
    pj_mutex_lock( atomic_var->mutex );
#endif
    new_value = --atomic_var->value;
#if PJ_HAS_THREADS
    pj_mutex_unlock( atomic_var->mutex);
#endif

    return new_value;
}

/*
 * pj_atomic_dec()
 */
PJ_DEF(void) pj_atomic_dec(pj_atomic_t *atomic_var)
{
    pj_atomic_dec_and_get(atomic_var);
}

/*
 * pj_atomic_add_and_get()
 */ 
PJ_DEF(pj_atomic_value_t) pj_atomic_add_and_get( pj_atomic_t *atomic_var, 
                                                 pj_atomic_value_t value )
{
    pj_atomic_value_t new_value;

#if PJ_HAS_THREADS
    pj_mutex_lock(atomic_var->mutex);
#endif
    
    atomic_var->value += value;
    new_value = atomic_var->value;

#if PJ_HAS_THREADS
    pj_mutex_unlock(atomic_var->mutex);
#endif

    return new_value;
}

/*
 * pj_atomic_add()
 */ 
PJ_DEF(void) pj_atomic_add( pj_atomic_t *atomic_var, 
                            pj_atomic_value_t value )
{
    pj_atomic_add_and_get(atomic_var, value);
}

///////////////////////////////////////////////////////////////////////////////
/*
 * pj_thread_local_alloc()
 */
PJ_DEF(pj_status_t) pj_thread_local_alloc(long *p_index)
{
#if PJ_HAS_THREADS
    pthread_key_t key;
    int rc;

    PJ_ASSERT_RETURN(p_index != NULL, PJ_EINVAL);

    pj_assert( sizeof(pthread_key_t) <= sizeof(long));
    if ((rc=pthread_key_create(&key, NULL)) != 0)
	return PJ_RETURN_OS_ERROR(rc);

    *p_index = key;
    return PJ_SUCCESS;
#else
    int i;
    for (i=0; i<MAX_THREADS; ++i) {
	if (tls_flag[i] == 0)
	    break;
    }
    if (i == MAX_THREADS) 
	return PJ_ETOOMANY;
    
    tls_flag[i] = 1;
    tls[i] = NULL;

    *p_index = i;
    return PJ_SUCCESS;
#endif
}

/*
 * pj_thread_local_free()
 */
PJ_DEF(void) pj_thread_local_free(long index)
{
    PJ_CHECK_STACK();
#if PJ_HAS_THREADS
    pthread_key_delete(index);
#else
    tls_flag[index] = 0;
#endif
}

/*
 * pj_thread_local_set()
 */
PJ_DEF(pj_status_t) pj_thread_local_set(long index, void *value)
{
    //Can't check stack because this function is called in the
    //beginning before main thread is initialized.
    //PJ_CHECK_STACK();
#if PJ_HAS_THREADS
    int rc=pthread_setspecific(index, value);
    return rc==0 ? PJ_SUCCESS : PJ_RETURN_OS_ERROR(rc);
#else
    pj_assert(index >= 0 && index < MAX_THREADS);
    tls[index] = value;
    return PJ_SUCCESS;
#endif
}

PJ_DEF(void*) pj_thread_local_get(long index)
{
    //Can't check stack because this function is called
    //by PJ_CHECK_STACK() itself!!!
    //PJ_CHECK_STACK();
#if PJ_HAS_THREADS
    return pthread_getspecific(index);
#else
    pj_assert(index >= 0 && index < MAX_THREADS);
    return tls[index];
#endif
}

///////////////////////////////////////////////////////////////////////////////
PJ_DEF(void) pj_enter_critical_section(void)
{
#if PJ_HAS_THREADS
    pj_mutex_lock(&critical_section);
#endif
}

PJ_DEF(void) pj_leave_critical_section(void)
{
#if PJ_HAS_THREADS
    pj_mutex_unlock(&critical_section);
#endif
}


///////////////////////////////////////////////////////////////////////////////
static pj_status_t init_mutex(pj_mutex_t *mutex, const char *name, int type)
{
#if PJ_HAS_THREADS
    pthread_mutexattr_t attr;
    int rc;

    PJ_CHECK_STACK();

    rc = pthread_mutexattr_init(&attr);
    if (rc != 0)
	return PJ_RETURN_OS_ERROR(rc);

    if (type == PJ_MUTEX_SIMPLE) {
#if defined(PJ_LINUX) && PJ_LINUX!=0
	extern int pthread_mutexattr_settype(pthread_mutexattr_t*,int);
	rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_FAST_NP);
#elif defined(PJ_RTEMS) && PJ_RTEMS!=0
	/* Nothing to do, default is simple */
#else
	rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
#endif
    } else {
#if defined(PJ_LINUX) && PJ_LINUX!=0
	extern int pthread_mutexattr_settype(pthread_mutexattr_t*,int);
	rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
#elif defined(PJ_RTEMS) && PJ_RTEMS!=0
	// Phil Torre <ptorre@zetron.com>:
	// The RTEMS implementation of POSIX mutexes doesn't include 
	// pthread_mutexattr_settype(), so what follows is a hack
	// until I get RTEMS patched to support the set/get functions.
	PJ_TODO(FIX_RTEMS_RECURSIVE_MUTEX_TYPE)
	attr.recursive = 1;
#else
	rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
#endif
    }
    
    if (rc != 0) {
	return PJ_RETURN_OS_ERROR(rc);
    }

    rc = pthread_mutex_init(&mutex->mutex, &attr);
    if (rc != 0) {
	return PJ_RETURN_OS_ERROR(rc);
    }
    
#if PJ_DEBUG
    /* Set owner. */
    mutex->nesting_level = 0;
    mutex->owner = NULL;
#endif

    /* Set name. */
    if (!name) {
	name = "mtx%p";
    }
    if (strchr(name, '%')) {
	pj_ansi_snprintf(mutex->obj_name, PJ_MAX_OBJ_NAME, name, mutex);
    } else {
	strncpy(mutex->obj_name, name, PJ_MAX_OBJ_NAME);
	mutex->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
    }

    PJ_LOG(6, (mutex->obj_name, "Mutex created"));
    return PJ_SUCCESS;
#else /* PJ_HAS_THREADS */
    return PJ_SUCCESS;
#endif
}

/*
 * pj_mutex_create()
 */
PJ_DEF(pj_status_t) pj_mutex_create(pj_pool_t *pool, 
				    const char *name, 
				    int type,
				    pj_mutex_t **ptr_mutex)
{
#if PJ_HAS_THREADS
    pj_status_t rc;
    pj_mutex_t *mutex;

    PJ_ASSERT_RETURN(pool && ptr_mutex, PJ_EINVAL);

    mutex = pj_pool_alloc(pool, sizeof(*mutex));
    PJ_ASSERT_RETURN(mutex, PJ_ENOMEM);

    if ((rc=init_mutex(mutex, name, type)) != PJ_SUCCESS)
	return rc;
    
    *ptr_mutex = mutex;
    return PJ_SUCCESS;
#else /* PJ_HAS_THREADS */
    *ptr_mutex = (pj_mutex_t*)1;
    return PJ_SUCCESS;
#endif
}

/*
 * pj_mutex_create_simple()
 */
PJ_DEF(pj_status_t) pj_mutex_create_simple( pj_pool_t *pool, 
                                            const char *name,
					    pj_mutex_t **mutex )
{
    return pj_mutex_create(pool, name, PJ_MUTEX_SIMPLE, mutex);
}

/*
 * pj_mutex_create_recursive()
 */
PJ_DEF(pj_status_t) pj_mutex_create_recursive( pj_pool_t *pool,
					       const char *name,
					       pj_mutex_t **mutex )
{
    return pj_mutex_create(pool, name, PJ_MUTEX_RECURSE, mutex);
}

/*
 * pj_mutex_lock()
 */
PJ_DEF(pj_status_t) pj_mutex_lock(pj_mutex_t *mutex)
{
#if PJ_HAS_THREADS
    pj_status_t status;

    PJ_CHECK_STACK();
    PJ_ASSERT_RETURN(mutex, PJ_EINVAL);

#if PJ_DEBUG
    PJ_LOG(6,(mutex->obj_name, "Mutex: thread %s is waiting (mutex owner=%s)", 
				pj_thread_this()->obj_name,
				mutex->owner_name));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -