📄 sockets.c
字号:
#if LWIP_CHECKSUM_ON_COPY && LWIP_NETIF_TX_SINGLE_PBUF
err = sock->conn->last_err = udp_sendto_chksum(sock->conn->pcb.udp, p,
remote_addr, remote_port, 1, chksum);
#else /* LWIP_CHECKSUM_ON_COPY && LWIP_NETIF_TX_SINGLE_PBUF */
err = sock->conn->last_err = udp_sendto(sock->conn->pcb.udp, p,
remote_addr, remote_port);
#endif /* LWIP_CHECKSUM_ON_COPY && LWIP_NETIF_TX_SINGLE_PBUF */
#else /* LWIP_UDP */
err = ERR_ARG;
#endif /* LWIP_UDP */
}
UNLOCK_TCPIP_CORE();
pbuf_free(p);
} else {
err = ERR_MEM;
}
}
#else /* LWIP_TCPIP_CORE_LOCKING */
/* initialize a buffer */
buf.p = buf.ptr = NULL;
#if LWIP_CHECKSUM_ON_COPY
buf.flags = 0;
#endif /* LWIP_CHECKSUM_ON_COPY */
if (to) {
inet_addr_to_ipaddr(&buf.addr, &to_in->sin_addr);
remote_port = ntohs(to_in->sin_port);
netbuf_fromport(&buf) = remote_port;
} else {
remote_port = 0;
ip_addr_set_any(&buf.addr);
netbuf_fromport(&buf) = 0;
}
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
s, data, short_size, flags));
ip_addr_debug_print(SOCKETS_DEBUG, &buf.addr);
LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
/* make the buffer point to the data that should be sent */
#if LWIP_NETIF_TX_SINGLE_PBUF
/* Allocate a new netbuf and copy the data into it. */
if (netbuf_alloc(&buf, short_size) == NULL) {
err = ERR_MEM;
} else {
#if LWIP_CHECKSUM_ON_COPY
if (sock->conn->type != NETCONN_RAW) {
u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
netbuf_set_chksum(&buf, chksum);
err = ERR_OK;
} else
#endif /* LWIP_CHECKSUM_ON_COPY */
{
err = netbuf_take(&buf, data, short_size);
}
}
#else /* LWIP_NETIF_TX_SINGLE_PBUF */
err = netbuf_ref(&buf, data, short_size);
#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
if (err == ERR_OK) {
/* send the data */
err = netconn_send(sock->conn, &buf);
}
/* deallocated the buffer */
netbuf_free(&buf);
#endif /* LWIP_TCPIP_CORE_LOCKING */
sock_set_errno(sock, err_to_errno(err));
return (err == ERR_OK ? short_size : -1);
}
int
lwip_socket(int domain, int type, int protocol)
{
struct netconn *conn;
int i;
LWIP_UNUSED_ARG(domain);
/* create a netconn */
switch (type) {
case SOCK_RAW:
conn = netconn_new_with_proto_and_callback(NETCONN_RAW, (u8_t)protocol, event_callback);
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
break;
case SOCK_DGRAM:
conn = netconn_new_with_callback( (protocol == IPPROTO_UDPLITE) ?
NETCONN_UDPLITE : NETCONN_UDP, event_callback);
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
break;
case SOCK_STREAM:
conn = netconn_new_with_callback(NETCONN_TCP, event_callback);
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
if (conn != NULL) {
/* Prevent automatic window updates, we do this on our own! */
netconn_set_noautorecved(conn, 1);
}
break;
default:
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
domain, type, protocol));
set_errno(EINVAL);
return -1;
}
if (!conn) {
LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
set_errno(ENOBUFS);
return -1;
}
i = alloc_socket(conn, 0);
if (i == -1) {
netconn_delete(conn);
set_errno(ENFILE);
return -1;
}
conn->socket = i;
LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
set_errno(0);
return i;
}
int
lwip_write(int s, const void *data, size_t size)
{
return lwip_send(s, data, size, 0);
}
/**
* Go through the readset and writeset lists and see which socket of the sockets
* set in the sets has events. On return, readset, writeset and exceptset have
* the sockets enabled that had events.
*
* exceptset is not used for now!!!
*
* @param maxfdp1 the highest socket index in the sets
* @param readset_in: set of sockets to check for read events
* @param writeset_in: set of sockets to check for write events
* @param exceptset_in: set of sockets to check for error events
* @param readset_out: set of sockets that had read events
* @param writeset_out: set of sockets that had write events
* @param exceptset_out: set os sockets that had error events
* @return number of sockets that had events (read/write/exception) (>= 0)
*/
static int
lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
{
int i, nready = 0;
fd_set lreadset, lwriteset, lexceptset;
struct lwip_sock *sock;
SYS_ARCH_DECL_PROTECT(lev);
FD_ZERO(&lreadset);
FD_ZERO(&lwriteset);
FD_ZERO(&lexceptset);
/* Go through each socket in each list to count number of sockets which
currently match */
for(i = 0; i < maxfdp1; i++) {
void* lastdata = NULL;
s16_t rcvevent = 0;
u16_t sendevent = 0;
u16_t errevent = 0;
/* First get the socket's status (protected)... */
SYS_ARCH_PROTECT(lev);
sock = tryget_socket(i);
if (sock != NULL) {
lastdata = sock->lastdata;
rcvevent = sock->rcvevent;
sendevent = sock->sendevent;
errevent = sock->errevent;
}
SYS_ARCH_UNPROTECT(lev);
/* ... then examine it: */
/* See if netconn of this socket is ready for read */
if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
FD_SET(i, &lreadset);
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
nready++;
}
/* See if netconn of this socket is ready for write */
if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
FD_SET(i, &lwriteset);
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
nready++;
}
/* See if netconn of this socket had an error */
if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
FD_SET(i, &lexceptset);
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
nready++;
}
}
/* copy local sets to the ones provided as arguments */
*readset_out = lreadset;
*writeset_out = lwriteset;
*exceptset_out = lexceptset;
LWIP_ASSERT("nready >= 0", nready >= 0);
return nready;
}
/**
* Processing exceptset is not yet implemented.
*/
int
lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
struct timeval *timeout)
{
u32_t waitres = 0;
int nready;
fd_set lreadset, lwriteset, lexceptset;
u32_t msectimeout;
struct lwip_select_cb select_cb;
err_t err;
int i;
SYS_ARCH_DECL_PROTECT(lev);
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
timeout ? (s32_t)timeout->tv_sec : (s32_t)-1,
timeout ? (s32_t)timeout->tv_usec : (s32_t)-1));
/* Go through each socket in each list to count number of sockets which
currently match */
nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
/* If we don't have any current events, then suspend if we are supposed to */
if (!nready) {
if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
/* This is OK as the local fdsets are empty and nready is zero,
or we would have returned earlier. */
goto return_copy_fdsets;
}
/* None ready: add our semaphore to list:
We don't actually need any dynamic memory. Our entry on the
list is only valid while we are in this function, so it's ok
to use local variables. */
select_cb.next = NULL;
select_cb.prev = NULL;
select_cb.readset = readset;
select_cb.writeset = writeset;
select_cb.exceptset = exceptset;
select_cb.sem_signalled = 0;
err = sys_sem_new(&select_cb.sem, 0);
if (err != ERR_OK) {
/* failed to create semaphore */
set_errno(ENOMEM);
return -1;
}
/* Protect the select_cb_list */
SYS_ARCH_PROTECT(lev);
/* Put this select_cb on top of list */
select_cb.next = select_cb_list;
if (select_cb_list != NULL) {
select_cb_list->prev = &select_cb;
}
select_cb_list = &select_cb;
/* Increasing this counter tells even_callback that the list has changed. */
select_cb_ctr++;
/* Now we can safely unprotect */
SYS_ARCH_UNPROTECT(lev);
/* Increase select_waiting for each socket we are interested in */
for(i = 0; i < maxfdp1; i++) {
if ((readset && FD_ISSET(i, readset)) ||
(writeset && FD_ISSET(i, writeset)) ||
(exceptset && FD_ISSET(i, exceptset))) {
struct lwip_sock *sock = tryget_socket(i);
LWIP_ASSERT("sock != NULL", sock != NULL);
SYS_ARCH_PROTECT(lev);
sock->select_waiting++;
LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
SYS_ARCH_UNPROTECT(lev);
}
}
/* Call lwip_selscan again: there could have been events between
the last scan (whithout us on the list) and putting us on the list! */
nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
if (!nready) {
/* Still none ready, just wait to be woken */
if (timeout == 0) {
/* Wait forever */
msectimeout = 0;
} else {
msectimeout = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500)/1000));
if (msectimeout == 0) {
/* Wait 1ms at least (0 means wait forever) */
msectimeout = 1;
}
}
waitres = sys_arch_sem_wait(&select_cb.sem, msectimeout);
}
/* Increase select_waiting for each socket we are interested in */
for(i = 0; i < maxfdp1; i++) {
if ((readset && FD_ISSET(i, readset)) ||
(writeset && FD_ISSET(i, writeset)) ||
(exceptset && FD_ISSET(i, exceptset))) {
struct lwip_sock *sock = tryget_socket(i);
LWIP_ASSERT("sock != NULL", sock != NULL);
SYS_ARCH_PROTECT(lev);
sock->select_waiting--;
LWIP_ASSERT("sock->select_waiting >= 0", sock->select_waiting >= 0);
SYS_ARCH_UNPROTECT(lev);
}
}
/* Take us off the list */
SYS_ARCH_PROTECT(lev);
if (select_cb.next != NULL) {
select_cb.next->prev = select_cb.prev;
}
if (select_cb_list == &select_cb) {
LWIP_ASSERT("select_cb.prev == NULL", select_cb.prev == NULL);
select_cb_list = select_cb.next;
} else {
LWIP_ASSERT("select_cb.prev != NULL", select_cb.prev != NULL);
select_cb.prev->next = select_cb.next;
}
/* Increasing this counter tells even_callback that the list has changed. */
select_cb_ctr++;
SYS_ARCH_UNPROTECT(lev);
sys_sem_free(&select_cb.sem);
if (waitres == SYS_ARCH_TIMEOUT) {
/* Timeout */
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
/* This is OK as the local fdsets are empty and nready is zero,
or we would have returned earlier. */
goto return_copy_fdsets;
}
/* See what's set */
nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
}
LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
return_copy_fdsets:
set_errno(0);
if (readset) {
*readset = lreadset;
}
if (writeset) {
*writeset = lwriteset;
}
if (exceptset) {
*exceptset = lexceptset;
}
return nready;
}
/**
* Callback registered in the netconn layer for each socket-netconn.
* Processes recvevent (data available) and wakes up tasks waiting for select.
*/
static void
event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
{
int s;
struct lwip_sock *sock;
struct lwip_select_cb *scb;
int last_select_cb_ctr;
SYS_ARCH_DECL_PROTECT(lev);
LWIP_UNUSED_ARG(len);
/* Get socket */
if (conn) {
s = conn->socket;
if (s < 0) {
/* Data comes in right away after an accept, even though
* the server task might not have created a new socket yet.
* Just count down (or up) if that's the case and we
* will use the data later. Note that only receive events
* can happen before the new socket is set up. */
SYS_ARCH_PROTECT(lev);
if (conn->socket < 0) {
if (evt == NETCONN_EVT_RCVPLUS) {
conn->socket--;
}
SYS_ARCH_UNPROTECT(lev);
return;
}
s = conn->socket;
SYS_ARCH_UNPROTECT(lev);
}
sock = get_socket(s);
if (!sock) {
return;
}
} else {
return;
}
SYS_ARCH_PROTECT(lev);
/* Set event as required */
switch (evt) {
case NETCONN_EVT_RCVPLUS:
sock->rcvevent++;
break;
case NETCONN_EVT_RCVMINUS:
sock->rcvevent--;
break;
case NETCONN_EVT_SENDPLUS:
sock->sendevent = 1;
break;
case NETCONN_EVT_SENDMINUS:
sock->sendevent = 0;
break;
case NETCONN_EVT_ERROR:
sock->errevent = 1;
break;
default:
LWIP_ASSERT("unknown event", 0);
break;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -