📄 sip_endpoint.c
字号:
break;
default:
return PJ_EINVAL;
}
if (hdr) {
pj_list_push_back(&endpt->cap_hdr, hdr);
}
}
/* Add the tags to the header. */
for (i=0; i<count; ++i) {
pj_strdup(endpt->pool, &hdr->values[hdr->count], &tags[i]);
++hdr->count;
}
/* Done. */
return PJ_SUCCESS;
}
/*
* Get additional headers to be put in outgoing request message.
*/
PJ_DEF(const pjsip_hdr*) pjsip_endpt_get_request_headers(pjsip_endpoint *endpt)
{
return &endpt->req_hdr;
}
/*
* Initialize endpoint.
*/
PJ_DEF(pj_status_t) pjsip_endpt_create(pj_pool_factory *pf,
const char *name,
pjsip_endpoint **p_endpt)
{
pj_status_t status;
pj_pool_t *pool;
pjsip_endpoint *endpt;
pjsip_max_fwd_hdr *mf_hdr;
pj_lock_t *lock = NULL;
if (!error_subsys_initialized) {
pj_register_strerror(PJSIP_ERRNO_START, PJ_ERRNO_SPACE_SIZE,
&pjsip_strerror);
error_subsys_initialized = 1;
}
PJ_LOG(5, (THIS_FILE, "Creating endpoint instance..."));
*p_endpt = NULL;
/* Create pool */
pool = pj_pool_create(pf, "pept%p",
PJSIP_POOL_LEN_ENDPT, PJSIP_POOL_INC_ENDPT,
&pool_callback);
if (!pool)
return PJ_ENOMEM;
/* Create endpoint. */
endpt = PJ_POOL_ZALLOC_T(pool, pjsip_endpoint);
endpt->pool = pool;
endpt->pf = pf;
/* Init modules list. */
pj_list_init(&endpt->module_list);
/* Create R/W mutex for module manipulation. */
status = pj_rwmutex_create(endpt->pool, "ept%p", &endpt->mod_mutex);
if (status != PJ_SUCCESS)
goto on_error;
/* Init parser. */
init_sip_parser();
/* Init tel: uri */
pjsip_tel_uri_subsys_init();
/* Get name. */
if (name != NULL) {
pj_str_t temp;
pj_strdup_with_null(endpt->pool, &endpt->name, pj_cstr(&temp, name));
} else {
pj_strdup_with_null(endpt->pool, &endpt->name, pj_gethostname());
}
/* Create mutex for the events, etc. */
status = pj_mutex_create_recursive( endpt->pool, "ept%p", &endpt->mutex );
if (status != PJ_SUCCESS) {
goto on_error;
}
/* Create timer heap to manage all timers within this endpoint. */
status = pj_timer_heap_create( endpt->pool, PJSIP_MAX_TIMER_COUNT,
&endpt->timer_heap);
if (status != PJ_SUCCESS) {
goto on_error;
}
/* Set recursive lock for the timer heap. */
status = pj_lock_create_recursive_mutex( endpt->pool, "edpt%p", &lock);
if (status != PJ_SUCCESS) {
goto on_error;
}
pj_timer_heap_set_lock(endpt->timer_heap, lock, PJ_TRUE);
/* Set maximum timed out entries to process in a single poll. */
pj_timer_heap_set_max_timed_out_per_poll(endpt->timer_heap,
PJSIP_MAX_TIMED_OUT_ENTRIES);
/* Create ioqueue. */
status = pj_ioqueue_create( endpt->pool, PJSIP_MAX_TRANSPORTS, &endpt->ioqueue);
if (status != PJ_SUCCESS) {
goto on_error;
}
/* Create transport manager. */
status = pjsip_tpmgr_create( endpt->pool, endpt,
&endpt_on_rx_msg,
&endpt_on_tx_msg,
&endpt->transport_mgr);
if (status != PJ_SUCCESS) {
goto on_error;
}
/* Create asynchronous DNS resolver. */
status = pjsip_resolver_create(endpt->pool, &endpt->resolver);
if (status != PJ_SUCCESS) {
PJ_LOG(4, (THIS_FILE, "Error creating resolver instance"));
goto on_error;
}
/* Initialize request headers. */
pj_list_init(&endpt->req_hdr);
/* Add "Max-Forwards" for request header. */
mf_hdr = pjsip_max_fwd_hdr_create(endpt->pool,
PJSIP_MAX_FORWARDS_VALUE);
pj_list_insert_before( &endpt->req_hdr, mf_hdr);
/* Initialize capability header list. */
pj_list_init(&endpt->cap_hdr);
/* Done. */
*p_endpt = endpt;
return status;
on_error:
if (endpt->transport_mgr) {
pjsip_tpmgr_destroy(endpt->transport_mgr);
endpt->transport_mgr = NULL;
}
if (endpt->ioqueue) {
pj_ioqueue_destroy(endpt->ioqueue);
endpt->ioqueue = NULL;
}
if (endpt->timer_heap) {
pj_timer_heap_destroy(endpt->timer_heap);
endpt->timer_heap = NULL;
}
if (endpt->mutex) {
pj_mutex_destroy(endpt->mutex);
endpt->mutex = NULL;
}
if (endpt->mod_mutex) {
pj_rwmutex_destroy(endpt->mod_mutex);
endpt->mod_mutex = NULL;
}
pj_pool_release( endpt->pool );
PJ_LOG(4, (THIS_FILE, "Error creating endpoint"));
return status;
}
/*
* Destroy endpoint.
*/
PJ_DEF(void) pjsip_endpt_destroy(pjsip_endpoint *endpt)
{
pjsip_module *mod;
PJ_LOG(5, (THIS_FILE, "Destroying endpoing instance.."));
/* Unregister modules. */
mod = endpt->module_list.prev;
while (mod != &endpt->module_list) {
pjsip_module *prev = mod->prev;
pjsip_endpt_unregister_module(endpt, mod);
mod = prev;
}
/* Destroy resolver */
pjsip_resolver_destroy(endpt->resolver);
/* Shutdown and destroy all transports. */
pjsip_tpmgr_destroy(endpt->transport_mgr);
/* Destroy ioqueue */
pj_ioqueue_destroy(endpt->ioqueue);
/* Destroy timer heap */
pj_timer_heap_destroy(endpt->timer_heap);
/* Delete endpoint mutex. */
pj_mutex_destroy(endpt->mutex);
/* Deinit parser */
deinit_sip_parser();
/* Delete module's mutex */
pj_rwmutex_destroy(endpt->mod_mutex);
/* Finally destroy pool. */
pj_pool_release(endpt->pool);
PJ_LOG(4, (THIS_FILE, "Endpoint %p destroyed", endpt));
}
/*
* Get endpoint name.
*/
PJ_DEF(const pj_str_t*) pjsip_endpt_name(const pjsip_endpoint *endpt)
{
return &endpt->name;
}
/*
* Create new pool.
*/
PJ_DEF(pj_pool_t*) pjsip_endpt_create_pool( pjsip_endpoint *endpt,
const char *pool_name,
pj_size_t initial,
pj_size_t increment )
{
pj_pool_t *pool;
/* Lock endpoint mutex. */
/* No need to lock mutex. Factory is thread safe.
pj_mutex_lock(endpt->mutex);
*/
/* Create pool */
pool = pj_pool_create( endpt->pf, pool_name,
initial, increment, &pool_callback);
/* Unlock mutex. */
/* No need to lock mutex. Factory is thread safe.
pj_mutex_unlock(endpt->mutex);
*/
if (!pool) {
PJ_LOG(4, (THIS_FILE, "Unable to create pool %s!", pool_name));
}
return pool;
}
/*
* Return back pool to endpoint's pool manager to be either destroyed or
* recycled.
*/
PJ_DEF(void) pjsip_endpt_release_pool( pjsip_endpoint *endpt, pj_pool_t *pool )
{
PJ_LOG(6, (THIS_FILE, "Releasing pool %s", pj_pool_getobjname(pool)));
/* Don't need to acquire mutex since pool factory is thread safe
pj_mutex_lock(endpt->mutex);
*/
pj_pool_release( pool );
PJ_UNUSED_ARG(endpt);
/*
pj_mutex_unlock(endpt->mutex);
*/
}
PJ_DEF(pj_status_t) pjsip_endpt_handle_events2(pjsip_endpoint *endpt,
const pj_time_val *max_timeout,
unsigned *p_count)
{
/* timeout is 'out' var. This just to make compiler happy. */
pj_time_val timeout = { 0, 0};
unsigned count = 0, net_event_count = 0;
int c;
PJ_LOG(6, (THIS_FILE, "pjsip_endpt_handle_events()"));
/* Poll the timer. The timer heap has its own mutex for better
* granularity, so we don't need to lock end endpoint.
*/
timeout.sec = timeout.msec = 0;
c = pj_timer_heap_poll( endpt->timer_heap, &timeout );
if (c > 0)
count += c;
/* timer_heap_poll should never ever returns negative value, or otherwise
* ioqueue_poll() will block forever!
*/
pj_assert(timeout.sec >= 0 && timeout.msec >= 0);
if (timeout.msec >= 1000) timeout.msec = 999;
/* If caller specifies maximum time to wait, then compare the value with
* the timeout to wait from timer, and use the minimum value.
*/
if (max_timeout && PJ_TIME_VAL_GT(timeout, *max_timeout)) {
timeout = *max_timeout;
}
/* Poll ioqueue.
* Repeat polling the ioqueue while we have immediate events, because
* timer heap may process more than one events, so if we only process
* one network events at a time (such as when IOCP backend is used),
* the ioqueue may have trouble keeping up with the request rate.
*
* For example, for each send() request, one network event will be
* reported by ioqueue for the send() completion. If we don't poll
* the ioqueue often enough, the send() completion will not be
* reported in timely manner.
*/
do {
c = pj_ioqueue_poll( endpt->ioqueue, &timeout);
if (c < 0) {
pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
if (p_count)
*p_count = count;
return pj_get_netos_error();
} else if (c == 0) {
break;
} else {
net_event_count += c;
timeout.sec = timeout.msec = 0;
}
} while (c > 0 && net_event_count < PJSIP_MAX_NET_EVENTS);
count += net_event_count;
if (p_count)
*p_count = count;
return PJ_SUCCESS;
}
/*
* Handle events.
*/
PJ_DEF(pj_status_t) pjsip_endpt_handle_events(pjsip_endpoint *endpt,
const pj_time_val *max_timeout)
{
return pjsip_endpt_handle_events2(endpt, max_timeout, NULL);
}
/*
* Schedule timer.
*/
PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer( pjsip_endpoint *endpt,
pj_timer_entry *entry,
const pj_time_val *delay )
{
PJ_LOG(6, (THIS_FILE, "pjsip_endpt_schedule_timer(entry=%p, delay=%u.%u)",
entry, delay->sec, delay->msec));
return pj_timer_heap_schedule( endpt->timer_heap, entry, delay );
}
/*
* Cancel the previously registered timer.
*/
PJ_DEF(void) pjsip_endpt_cancel_timer( pjsip_endpoint *endpt,
pj_timer_entry *entry )
{
PJ_LOG(6, (THIS_FILE, "pjsip_endpt_cancel_timer(entry=%p)", entry));
pj_timer_heap_cancel( endpt->timer_heap, entry );
}
/*
* Get the timer heap instance of the SIP endpoint.
*/
PJ_DEF(pj_timer_heap_t*) pjsip_endpt_get_timer_heap(pjsip_endpoint *endpt)
{
return endpt->timer_heap;
}
/*
* This is the callback that is called by the transport manager when it
* receives a message from the network.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -