📄 inet_connection_sock.c
字号:
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Support for INET connection oriented protocols. * * Authors: See the TCP sources * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or(at your option) any later version. */#include <linux/module.h>#include <linux/jhash.h>#include <net/inet_connection_sock.h>#include <net/inet_hashtables.h>#include <net/inet_timewait_sock.h>#include <net/ip.h>#include <net/route.h>#include <net/tcp_states.h>#include <net/xfrm.h>#ifdef INET_CSK_DEBUGconst char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";EXPORT_SYMBOL(inet_csk_timer_bug_msg);#endif/* * This array holds the first and last local port number. */int sysctl_local_port_range[2] = { 32768, 61000 };DEFINE_SEQLOCK(sysctl_port_range_lock);void inet_get_local_port_range(int *low, int *high){ unsigned seq; do { seq = read_seqbegin(&sysctl_port_range_lock); *low = sysctl_local_port_range[0]; *high = sysctl_local_port_range[1]; } while (read_seqretry(&sysctl_port_range_lock, seq));}EXPORT_SYMBOL(inet_get_local_port_range);int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb){ const __be32 sk_rcv_saddr = inet_rcv_saddr(sk); struct sock *sk2; struct hlist_node *node; int reuse = sk->sk_reuse; sk_for_each_bound(sk2, node, &tb->owners) { if (sk != sk2 && !inet_v6_ipv6only(sk2) && (!sk->sk_bound_dev_if || !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { if (!reuse || !sk2->sk_reuse || sk2->sk_state == TCP_LISTEN) { const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); if (!sk2_rcv_saddr || !sk_rcv_saddr || sk2_rcv_saddr == sk_rcv_saddr) break; } } } return node != NULL;}EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);/* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. */int inet_csk_get_port(struct inet_hashinfo *hashinfo, struct sock *sk, unsigned short snum, int (*bind_conflict)(const struct sock *sk, const struct inet_bind_bucket *tb)){ struct inet_bind_hashbucket *head; struct hlist_node *node; struct inet_bind_bucket *tb; int ret; local_bh_disable(); if (!snum) { int remaining, rover, low, high; inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; rover = net_random() % remaining + low; do { head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; spin_lock(&head->lock); inet_bind_bucket_for_each(tb, node, &head->chain) if (tb->port == rover) goto next; break; next: spin_unlock(&head->lock); if (++rover > high) rover = low; } while (--remaining > 0); /* Exhausted local port range during search? It is not * possible for us to be holding one of the bind hash * locks if this test triggers, because if 'remaining' * drops to zero, we broke out of the do/while loop at * the top level, not from the 'break;' statement. */ ret = 1; if (remaining <= 0) goto fail; /* OK, here is the one we will use. HEAD is * non-NULL and we hold it's mutex. */ snum = rover; } else { head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; spin_lock(&head->lock); inet_bind_bucket_for_each(tb, node, &head->chain) if (tb->port == snum) goto tb_found; } tb = NULL; goto tb_not_found;tb_found: if (!hlist_empty(&tb->owners)) { if (sk->sk_reuse > 1) goto success; if (tb->fastreuse > 0 && sk->sk_reuse && sk->sk_state != TCP_LISTEN) { goto success; } else { ret = 1; if (bind_conflict(sk, tb)) goto fail_unlock; } }tb_not_found: ret = 1; if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL) goto fail_unlock; if (hlist_empty(&tb->owners)) { if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) tb->fastreuse = 1; else tb->fastreuse = 0; } else if (tb->fastreuse && (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) tb->fastreuse = 0;success: if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, snum); BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); ret = 0;fail_unlock: spin_unlock(&head->lock);fail: local_bh_enable(); return ret;}EXPORT_SYMBOL_GPL(inet_csk_get_port);/* * Wait for an incoming connection, avoid race conditions. This must be called * with the socket locked. */static int inet_csk_wait_for_connect(struct sock *sk, long timeo){ struct inet_connection_sock *icsk = inet_csk(sk); DEFINE_WAIT(wait); int err; /* * True wake-one mechanism for incoming connections: only * one process gets woken up, not the 'whole herd'. * Since we do not 'race & poll' for established sockets * anymore, the common case will execute the loop only once. * * Subtle issue: "add_wait_queue_exclusive()" will be added * after any current non-exclusive waiters, and we know that * it will always _stay_ after any new non-exclusive waiters * because all non-exclusive waiters are added at the * beginning of the wait-queue. As such, it's ok to "drop" * our exclusiveness temporarily when we get woken up without * having to remove and re-insert us on the wait queue. */ for (;;) { prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); release_sock(sk); if (reqsk_queue_empty(&icsk->icsk_accept_queue)) timeo = schedule_timeout(timeo); lock_sock(sk); err = 0; if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) break; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) break; err = sock_intr_errno(timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!timeo) break; } finish_wait(sk->sk_sleep, &wait); return err;}/* * This will accept the next outstanding connection. */struct sock *inet_csk_accept(struct sock *sk, int flags, int *err){ struct inet_connection_sock *icsk = inet_csk(sk); struct sock *newsk; int error; lock_sock(sk); /* We need to make sure that this socket is listening, * and that it has something pending. */ error = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out_err; /* Find already established connection */ if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); /* If this is a non blocking socket don't sleep */ error = -EAGAIN; if (!timeo) goto out_err; error = inet_csk_wait_for_connect(sk, timeo); if (error) goto out_err; } newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);out: release_sock(sk); return newsk;out_err: newsk = NULL; *err = error; goto out;}EXPORT_SYMBOL(inet_csk_accept);/* * Using different timers for retransmit, delayed acks and probes * We may wish use just one timer maintaining a list of expire jiffies * to optimize. */void inet_csk_init_xmit_timers(struct sock *sk, void (*retransmit_handler)(unsigned long), void (*delack_handler)(unsigned long), void (*keepalive_handler)(unsigned long)){ struct inet_connection_sock *icsk = inet_csk(sk); init_timer(&icsk->icsk_retransmit_timer); init_timer(&icsk->icsk_delack_timer); init_timer(&sk->sk_timer); icsk->icsk_retransmit_timer.function = retransmit_handler; icsk->icsk_delack_timer.function = delack_handler; sk->sk_timer.function = keepalive_handler; icsk->icsk_retransmit_timer.data = icsk->icsk_delack_timer.data = sk->sk_timer.data = (unsigned long)sk; icsk->icsk_pending = icsk->icsk_ack.pending = 0;}EXPORT_SYMBOL(inet_csk_init_xmit_timers);void inet_csk_clear_xmit_timers(struct sock *sk){ struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; sk_stop_timer(sk, &icsk->icsk_retransmit_timer); sk_stop_timer(sk, &icsk->icsk_delack_timer); sk_stop_timer(sk, &sk->sk_timer);}EXPORT_SYMBOL(inet_csk_clear_xmit_timers);void inet_csk_delete_keepalive_timer(struct sock *sk){ sk_stop_timer(sk, &sk->sk_timer);}EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len){ sk_reset_timer(sk, &sk->sk_timer, jiffies + len);}EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);struct dst_entry* inet_csk_route_req(struct sock *sk, const struct request_sock *req){ struct rtable *rt; const struct inet_request_sock *ireq = inet_rsk(req); struct ip_options *opt = inet_rsk(req)->opt; struct flowi fl = { .oif = sk->sk_bound_dev_if, .nl_u = { .ip4_u = { .daddr = ((opt && opt->srr) ? opt->faddr : ireq->rmt_addr), .saddr = ireq->loc_addr, .tos = RT_CONN_FLAGS(sk) } }, .proto = sk->sk_protocol, .uli_u = { .ports = { .sport = inet_sk(sk)->sport, .dport = ireq->rmt_port } } }; security_req_classify_flow(req, &fl); if (ip_route_output_flow(&rt, &fl, sk, 0)) { IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); return NULL; } if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) { ip_rt_put(rt); IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); return NULL; } return &rt->u.dst;}EXPORT_SYMBOL_GPL(inet_csk_route_req);static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -