⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sip_endpoint.c

📁 一个开源SIP协议栈
💻 C
📖 第 1 页 / 共 2 页
字号:
	pjsip_module *prev = mod->prev;
	pjsip_endpt_unregister_module(endpt, mod);
	mod = prev;
    }

    /* Shutdown and destroy all transports. */
    pjsip_tpmgr_destroy(endpt->transport_mgr);

    /* Destroy ioqueue */
    pj_ioqueue_destroy(endpt->ioqueue);

    /* Destroy timer heap */
    pj_timer_heap_destroy(endpt->timer_heap);

    /* Delete endpoint mutex. */
    pj_mutex_destroy(endpt->mutex);

    /* Deinit parser */
    deinit_sip_parser();

    /* Delete module's mutex */
    pj_rwmutex_destroy(endpt->mod_mutex);

    /* Finally destroy pool. */
    pj_pool_release(endpt->pool);

    PJ_LOG(4, (THIS_FILE, "Endpoint %p destroyed", endpt));
}

/*
 * Get endpoint name.
 */
PJ_DEF(const pj_str_t*) pjsip_endpt_name(const pjsip_endpoint *endpt)
{
    return &endpt->name;
}


/*
 * Create new pool.
 */
PJ_DEF(pj_pool_t*) pjsip_endpt_create_pool( pjsip_endpoint *endpt,
					       const char *pool_name,
					       pj_size_t initial,
					       pj_size_t increment )
{
    pj_pool_t *pool;

    /* Lock endpoint mutex. */
    /* No need to lock mutex. Factory is thread safe.
    pj_mutex_lock(endpt->mutex);
     */

    /* Create pool */
    pool = pj_pool_create( endpt->pf, pool_name,
			   initial, increment, &pool_callback);

    /* Unlock mutex. */
    /* No need to lock mutex. Factory is thread safe.
    pj_mutex_unlock(endpt->mutex);
     */

    if (!pool) {
	PJ_LOG(4, (THIS_FILE, "Unable to create pool %s!", pool_name));
    }

    return pool;
}

/*
 * Return back pool to endpoint's pool manager to be either destroyed or
 * recycled.
 */
PJ_DEF(void) pjsip_endpt_release_pool( pjsip_endpoint *endpt, pj_pool_t *pool )
{
    PJ_LOG(6, (THIS_FILE, "Releasing pool %s", pj_pool_getobjname(pool)));

    /* Don't need to acquire mutex since pool factory is thread safe
       pj_mutex_lock(endpt->mutex);
     */
    pj_pool_release( pool );

    PJ_UNUSED_ARG(endpt);
    /*
    pj_mutex_unlock(endpt->mutex);
     */
}


PJ_DEF(pj_status_t) pjsip_endpt_handle_events2(pjsip_endpoint *endpt,
					       const pj_time_val *max_timeout,
					       unsigned *p_count)
{
    /* timeout is 'out' var. This just to make compiler happy. */
    pj_time_val timeout = { 0, 0};
    unsigned count = 0, net_event_count = 0;
    int c;

    PJ_LOG(6, (THIS_FILE, "pjsip_endpt_handle_events()"));

    /* Poll the timer. The timer heap has its own mutex for better 
     * granularity, so we don't need to lock end endpoint. 
     */
    timeout.sec = timeout.msec = 0;
    c = pj_timer_heap_poll( endpt->timer_heap, &timeout );
    if (c > 0)
	count += c;

    /* timer_heap_poll should never ever returns negative value, or otherwise
     * ioqueue_poll() will block forever!
     */
    pj_assert(timeout.sec >= 0 && timeout.msec >= 0);
    if (timeout.msec >= 1000) timeout.msec = 999;

    /* If caller specifies maximum time to wait, then compare the value with
     * the timeout to wait from timer, and use the minimum value.
     */
    if (max_timeout && PJ_TIME_VAL_GT(timeout, *max_timeout)) {
	timeout = *max_timeout;
    }

    /* Poll ioqueue. 
     * Repeat polling the ioqueue while we have immediate events, because
     * timer heap may process more than one events, so if we only process
     * one network events at a time (such as when IOCP backend is used),
     * the ioqueue may have trouble keeping up with the request rate.
     *
     * For example, for each send() request, one network event will be
     *   reported by ioqueue for the send() completion. If we don't poll
     *   the ioqueue often enough, the send() completion will not be
     *   reported in timely manner.
     */
    do {
	c = pj_ioqueue_poll( endpt->ioqueue, &timeout);
	if (c < 0) {
	    pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
	    if (p_count)
		*p_count = count;
	    return pj_get_netos_error();
	} else if (c == 0) {
	    break;
	} else {
	    net_event_count += c;
	    timeout.sec = timeout.msec = 0;
	}
    } while (c > 0 && net_event_count < PJSIP_MAX_NET_EVENTS);

    count += net_event_count;
    if (p_count)
	*p_count = count;

    return PJ_SUCCESS;
}

/*
 * Handle events.
 */
PJ_DEF(pj_status_t) pjsip_endpt_handle_events(pjsip_endpoint *endpt,
					      const pj_time_val *max_timeout)
{
    return pjsip_endpt_handle_events2(endpt, max_timeout, NULL);
}

/*
 * Schedule timer.
 */
PJ_DEF(pj_status_t) pjsip_endpt_schedule_timer( pjsip_endpoint *endpt,
						pj_timer_entry *entry,
						const pj_time_val *delay )
{
    PJ_LOG(6, (THIS_FILE, "pjsip_endpt_schedule_timer(entry=%p, delay=%u.%u)",
			 entry, delay->sec, delay->msec));
    return pj_timer_heap_schedule( endpt->timer_heap, entry, delay );
}

/*
 * Cancel the previously registered timer.
 */
PJ_DEF(void) pjsip_endpt_cancel_timer( pjsip_endpoint *endpt, 
				       pj_timer_entry *entry )
{
    PJ_LOG(6, (THIS_FILE, "pjsip_endpt_cancel_timer(entry=%p)", entry));
    pj_timer_heap_cancel( endpt->timer_heap, entry );
}

/*
 * This is the callback that is called by the transport manager when it 
 * receives a message from the network.
 */
static void endpt_on_rx_msg( pjsip_endpoint *endpt,
				      pj_status_t status,
				      pjsip_rx_data *rdata )
{
    pjsip_msg *msg = rdata->msg_info.msg;

    if (status != PJ_SUCCESS) {
	char info[30];
	char errmsg[PJ_ERR_MSG_SIZE];

	info[0] = '\0';

	if (status == PJSIP_EMISSINGHDR) {
	    pj_str_t p;

	    p.ptr = info; p.slen = 0;

	    if (rdata->msg_info.cid == NULL || rdata->msg_info.cid->id.slen)
		pj_strcpy2(&p, "Call-ID");
	    if (rdata->msg_info.from == NULL)
		pj_strcpy2(&p, " From");
	    if (rdata->msg_info.to == NULL)
		pj_strcpy2(&p, " To");
	    if (rdata->msg_info.via == NULL)
		pj_strcpy2(&p, " Via");
	    if (rdata->msg_info.cseq == NULL) 
		pj_strcpy2(&p, " CSeq");

	    p.ptr[p.slen] = '\0';
	}

	pj_strerror(status, errmsg, sizeof(errmsg));

	PJ_LOG(1, (THIS_FILE, 
		  "Error processing packet from %s:%d: %s %s [code %d]:\n"
		  "%.*s\n"
		  "-- end of packet.",
		  rdata->pkt_info.src_name, 
		  rdata->pkt_info.src_port,
		  errmsg,
		  info,
		  status,
		  (int)rdata->msg_info.len,	
		  rdata->msg_info.msg_buf));
	return;
    }

    PJ_LOG(5, (THIS_FILE, "Processing incoming message: %s", 
	       pjsip_rx_data_get_info(rdata)));

    /* For response, check that the value in Via sent-by match the transport.
     * If not matched, silently drop the response.
     * Ref: RFC3261 Section 18.1.2 Receiving Response
     */
    if (msg->type == PJSIP_RESPONSE_MSG) {
	const pj_str_t *local_addr;
	int port = rdata->msg_info.via->sent_by.port;
	pj_bool_t mismatch = PJ_FALSE;
	if (port == 0) {
	    int type;
	    type = rdata->tp_info.transport->key.type;
	    port = pjsip_transport_get_default_port_for_type(type);
	}
	local_addr = &rdata->tp_info.transport->local_name.host;

	if (pj_strcmp(&rdata->msg_info.via->sent_by.host, local_addr) != 0) {

	    /* The RFC says that we should drop response when sent-by
	     * address mismatch. But it could happen (e.g. with SER) when
	     * endpoint with private IP is sending request to public
	     * server.

	    mismatch = PJ_TRUE;

	     */

	} else if (port != rdata->tp_info.transport->local_name.port) {
	    /* Port or address mismatch, we should discard response */
	    /* But we saw one implementation (we don't want to name it to 
	     * protect the innocence) which put wrong sent-by port although
	     * the "rport" parameter is correct.
	     * So we discard the response only if the port doesn't match
	     * both the port in sent-by and rport. We try to be lenient here!
	     */
	    if (rdata->msg_info.via->rport_param != 
		rdata->tp_info.transport->local_name.port)
		mismatch = PJ_TRUE;
	    else {
		PJ_LOG(4,(THIS_FILE, "Message %s from %s has mismatch port in "
				     "sent-by but the rport parameter is "
				     "correct",
				     pjsip_rx_data_get_info(rdata), 
				     rdata->pkt_info.src_name));
	    }
	}

	if (mismatch) {
	    PJ_TODO(ENDPT_REPORT_WHEN_DROPPING_MESSAGE);
	    PJ_LOG(4,(THIS_FILE, "Dropping response %s from %s:%d because "
				 "sent-by is mismatch", 
				 pjsip_rx_data_get_info(rdata),
				 rdata->pkt_info.src_name, 
				 rdata->pkt_info.src_port));
	    return;
	}
    }


    /* Distribute to modules, starting from modules with highest priority */
    LOCK_MODULE_ACCESS(endpt);

    if (msg->type == PJSIP_REQUEST_MSG) {
	pjsip_module *mod;
	pj_bool_t handled = PJ_FALSE;

	mod = endpt->module_list.next;
	while (mod != &endpt->module_list) {
	    if (mod->on_rx_request)
		handled = (*mod->on_rx_request)(rdata);
	    if (handled)
		break;
	    mod = mod->next;
	}

	/* No module is able to handle the request. */
	if (!handled) {
	    PJ_TODO(ENDPT_RESPOND_UNHANDLED_REQUEST);
	    PJ_LOG(4,(THIS_FILE, "Message %s from %s:%d was dropped/unhandled by"
				 " any modules",
				 pjsip_rx_data_get_info(rdata),
				 rdata->pkt_info.src_name,
				 rdata->pkt_info.src_port));
	}

    } else {
	pjsip_module *mod;
	pj_bool_t handled = PJ_FALSE;

	mod = endpt->module_list.next;
	while (mod != &endpt->module_list) {
	    if (mod->on_rx_response)
		handled = (*mod->on_rx_response)(rdata);
	    if (handled)
		break;
	    mod = mod->next;
	}

	if (!handled) {
	    PJ_LOG(4,(THIS_FILE, "Message %s from %s:%d was dropped/unhandled"
				 " by any modules",
				 pjsip_rx_data_get_info(rdata),
				 rdata->pkt_info.src_name,
				 rdata->pkt_info.src_port));
	}
    }

    UNLOCK_MODULE_ACCESS(endpt);

    /* Must clear mod_data before returning rdata to transport, since
     * rdata may be reused.
     */
    pj_bzero(&rdata->endpt_info, sizeof(rdata->endpt_info));
}

/*
 * This callback is called by transport manager before message is sent.
 * Modules may inspect the message before it's actually sent.
 */
static pj_status_t endpt_on_tx_msg( pjsip_endpoint *endpt,
				    pjsip_tx_data *tdata )
{
    pj_status_t status = PJ_SUCCESS;
    pjsip_module *mod;

    /* Distribute to modules, starting from modules with LOWEST priority */
    LOCK_MODULE_ACCESS(endpt);

    mod = endpt->module_list.prev;
    if (tdata->msg->type == PJSIP_REQUEST_MSG) {
	while (mod != &endpt->module_list) {
	    if (mod->on_tx_request)
		status = (*mod->on_tx_request)(tdata);
	    if (status != PJ_SUCCESS)
		break;
	    mod = mod->prev;
	}

    } else {
	while (mod != &endpt->module_list) {
	    if (mod->on_tx_response)
		status = (*mod->on_tx_response)(tdata);
	    if (status != PJ_SUCCESS)
		break;
	    mod = mod->prev;
	}
    }

    UNLOCK_MODULE_ACCESS(endpt);

    return status;
}


/*
 * Create transmit data buffer.
 */
PJ_DEF(pj_status_t) pjsip_endpt_create_tdata(  pjsip_endpoint *endpt,
					       pjsip_tx_data **p_tdata)
{
    return pjsip_tx_data_create(endpt->transport_mgr, p_tdata);
}

/*
 * Create the DNS resolver instance. 
 */
PJ_DEF(pj_status_t) pjsip_endpt_create_resolver(pjsip_endpoint *endpt,
						pj_dns_resolver **p_resv)
{
#if PJSIP_HAS_RESOLVER
    PJ_ASSERT_RETURN(endpt && p_resv, PJ_EINVAL);
    return pj_dns_resolver_create( endpt->pf, NULL, 0, endpt->timer_heap,
				   endpt->ioqueue, p_resv);
#else
    PJ_UNUSED_ARG(endpt);
    PJ_UNUSED_ARG(p_resv);
    pj_assert(!"Resolver is disabled (PJSIP_HAS_RESOLVER==0)");
    return PJ_EINVALIDOP;
#endif
}

/*
 * Set DNS resolver to be used by the SIP resolver.
 */
PJ_DEF(pj_status_t) pjsip_endpt_set_resolver( pjsip_endpoint *endpt,
					      pj_dns_resolver *resv)
{
    return pjsip_resolver_set_resolver(endpt->resolver, resv);
}

/*
 * Get the DNS resolver being used by the SIP resolver.
 */
PJ_DEF(pj_dns_resolver*) pjsip_endpt_get_resolver(pjsip_endpoint *endpt)
{
    PJ_ASSERT_RETURN(endpt, NULL);
    return pjsip_resolver_get_resolver(endpt->resolver);
}

/*
 * Resolve
 */
PJ_DEF(void) pjsip_endpt_resolve( pjsip_endpoint *endpt,
				  pj_pool_t *pool,
				  pjsip_host_info *target,
				  void *token,
				  pjsip_resolver_callback *cb)
{
    pjsip_resolve( endpt->resolver, pool, target, token, cb);
}

/*
 * Get transport manager.
 */
PJ_DEF(pjsip_tpmgr*) pjsip_endpt_get_tpmgr(pjsip_endpoint *endpt)
{
    return endpt->transport_mgr;
}

/*
 * Get ioqueue instance.
 */
PJ_DEF(pj_ioqueue_t*) pjsip_endpt_get_ioqueue(pjsip_endpoint *endpt)
{
    return endpt->ioqueue;
}

/*
 * Find/create transport.
 */
PJ_DEF(pj_status_t) pjsip_endpt_acquire_transport(pjsip_endpoint *endpt,
						  pjsip_transport_type_e type,
						  const pj_sockaddr_t *remote,
						  int addr_len,
						  const pjsip_tpselector *sel,
						  pjsip_transport **transport)
{
    return pjsip_tpmgr_acquire_transport(endpt->transport_mgr, type, 
					 remote, addr_len, sel, transport);
}


/*
 * Report error.
 */
PJ_DEF(void) pjsip_endpt_log_error(  pjsip_endpoint *endpt,
				     const char *sender,
                                     pj_status_t error_code,
                                     const char *format,
                                     ... )
{
#if PJ_LOG_MAX_LEVEL > 0
    char newformat[256];
    int len;
    va_list marker;

    va_start(marker, format);

    PJ_UNUSED_ARG(endpt);

    len = pj_ansi_strlen(format);
    if (len < sizeof(newformat)-30) {
	pj_str_t errstr;

	pj_ansi_strcpy(newformat, format);
	pj_ansi_snprintf(newformat+len, sizeof(newformat)-len-1,
			 ": [err %d] ", error_code);
	len += pj_ansi_strlen(newformat+len);

	errstr = pj_strerror( error_code, newformat+len, 
			      sizeof(newformat)-len-1);

	len += errstr.slen;
	newformat[len] = '\0';

	pj_log(sender, 1, newformat, marker);
    } else {
	pj_log(sender, 1, format, marker);
    }

    va_end(marker);
#else
    PJ_UNUSED_ARG(format);
    PJ_UNUSED_ARG(error_code);
    PJ_UNUSED_ARG(sender);
    PJ_UNUSED_ARG(endpt);
#endif
}


/*
 * Dump endpoint.
 */
PJ_DEF(void) pjsip_endpt_dump( pjsip_endpoint *endpt, pj_bool_t detail )
{
#if PJ_LOG_MAX_LEVEL >= 3
    PJ_LOG(5, (THIS_FILE, "pjsip_endpt_dump()"));

    /* Lock mutex. */
    pj_mutex_lock(endpt->mutex);

    PJ_LOG(3, (THIS_FILE, "Dumping endpoint %p:", endpt));
    
    /* Dumping pool factory. */
    pj_pool_factory_dump(endpt->pf, detail);

    /* Pool health. */
    PJ_LOG(3, (THIS_FILE," Endpoint pool capacity=%u, used_size=%u",
	       pj_pool_get_capacity(endpt->pool),
	       pj_pool_get_used_size(endpt->pool)));

    /* Resolver */
#if PJSIP_HAS_RESOLVER
    if (pjsip_endpt_get_resolver(endpt)) {
	pj_dns_resolver_dump(pjsip_endpt_get_resolver(endpt), detail);
    }
#endif

    /* Transports. 
     */
    pjsip_tpmgr_dump_transports( endpt->transport_mgr );

    /* Timer. */
    PJ_LOG(3,(THIS_FILE, " Timer heap has %u entries", 
			pj_timer_heap_count(endpt->timer_heap)));

    /* Unlock mutex. */
    pj_mutex_unlock(endpt->mutex);
#else
    PJ_UNUSED_ARG(endpt);
    PJ_UNUSED_ARG(detail);
    PJ_LOG(3,(THIS_FILE, "pjsip_end_dump: can't dump because it's disabled."));
#endif
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -