⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			}		}		break;#ifdef CONFIG_TCP_MD5SIG	case TCP_MD5SIG:		/* Read the IP->Key mappings from userspace */		err = tp->af_specific->md5_parse(sk, optval, optlen);		break;#endif	default:		err = -ENOPROTOOPT;		break;	}	release_sock(sk);	return err;}int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,		   int optlen){	struct inet_connection_sock *icsk = inet_csk(sk);	if (level != SOL_TCP)		return icsk->icsk_af_ops->setsockopt(sk, level, optname,						     optval, optlen);	return do_tcp_setsockopt(sk, level, optname, optval, optlen);}#ifdef CONFIG_COMPATint compat_tcp_setsockopt(struct sock *sk, int level, int optname,			  char __user *optval, int optlen){	if (level != SOL_TCP)		return inet_csk_compat_setsockopt(sk, level, optname,						  optval, optlen);	return do_tcp_setsockopt(sk, level, optname, optval, optlen);}EXPORT_SYMBOL(compat_tcp_setsockopt);#endif/* Return information about state of tcp endpoint in API format. */void tcp_get_info(struct sock *sk, struct tcp_info *info){	struct tcp_sock *tp = tcp_sk(sk);	const struct inet_connection_sock *icsk = inet_csk(sk);	u32 now = tcp_time_stamp;	memset(info, 0, sizeof(*info));	info->tcpi_state = sk->sk_state;	info->tcpi_ca_state = icsk->icsk_ca_state;	info->tcpi_retransmits = icsk->icsk_retransmits;	info->tcpi_probes = icsk->icsk_probes_out;	info->tcpi_backoff = icsk->icsk_backoff;	if (tp->rx_opt.tstamp_ok)		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;	if (tcp_is_sack(tp))		info->tcpi_options |= TCPI_OPT_SACK;	if (tp->rx_opt.wscale_ok) {		info->tcpi_options |= TCPI_OPT_WSCALE;		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;	}	if (tp->ecn_flags&TCP_ECN_OK)		info->tcpi_options |= TCPI_OPT_ECN;	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);	info->tcpi_snd_mss = tp->mss_cache;	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;	if (sk->sk_state == TCP_LISTEN) {		info->tcpi_unacked = sk->sk_ack_backlog;		info->tcpi_sacked = sk->sk_max_ack_backlog;	} else {		info->tcpi_unacked = tp->packets_out;		info->tcpi_sacked = tp->sacked_out;	}	info->tcpi_lost = tp->lost_out;	info->tcpi_retrans = tp->retrans_out;	info->tcpi_fackets = tp->fackets_out;	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;	info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;	info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;	info->tcpi_snd_ssthresh = tp->snd_ssthresh;	info->tcpi_snd_cwnd = tp->snd_cwnd;	info->tcpi_advmss = tp->advmss;	info->tcpi_reordering = tp->reordering;	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;	info->tcpi_rcv_space = tp->rcvq_space.space;	info->tcpi_total_retrans = tp->total_retrans;}EXPORT_SYMBOL_GPL(tcp_get_info);static int do_tcp_getsockopt(struct sock *sk, int level,		int optname, char __user *optval, int __user *optlen){	struct inet_connection_sock *icsk = inet_csk(sk);	struct tcp_sock *tp = tcp_sk(sk);	int val, len;	if (get_user(len, optlen))		return -EFAULT;	len = min_t(unsigned int, len, sizeof(int));	if (len < 0)		return -EINVAL;	switch (optname) {	case TCP_MAXSEG:		val = tp->mss_cache;		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))			val = tp->rx_opt.user_mss;		break;	case TCP_NODELAY:		val = !!(tp->nonagle&TCP_NAGLE_OFF);		break;	case TCP_CORK:		val = !!(tp->nonagle&TCP_NAGLE_CORK);		break;	case TCP_KEEPIDLE:		val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;		break;	case TCP_KEEPINTVL:		val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;		break;	case TCP_KEEPCNT:		val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;		break;	case TCP_SYNCNT:		val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;		break;	case TCP_LINGER2:		val = tp->linger2;		if (val >= 0)			val = (val ? : sysctl_tcp_fin_timeout) / HZ;		break;	case TCP_DEFER_ACCEPT:		val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :			((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));		break;	case TCP_WINDOW_CLAMP:		val = tp->window_clamp;		break;	case TCP_INFO: {		struct tcp_info info;		if (get_user(len, optlen))			return -EFAULT;		tcp_get_info(sk, &info);		len = min_t(unsigned int, len, sizeof(info));		if (put_user(len, optlen))			return -EFAULT;		if (copy_to_user(optval, &info, len))			return -EFAULT;		return 0;	}	case TCP_QUICKACK:		val = !icsk->icsk_ack.pingpong;		break;	case TCP_CONGESTION:		if (get_user(len, optlen))			return -EFAULT;		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);		if (put_user(len, optlen))			return -EFAULT;		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))			return -EFAULT;		return 0;	default:		return -ENOPROTOOPT;	}	if (put_user(len, optlen))		return -EFAULT;	if (copy_to_user(optval, &val, len))		return -EFAULT;	return 0;}int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,		   int __user *optlen){	struct inet_connection_sock *icsk = inet_csk(sk);	if (level != SOL_TCP)		return icsk->icsk_af_ops->getsockopt(sk, level, optname,						     optval, optlen);	return do_tcp_getsockopt(sk, level, optname, optval, optlen);}#ifdef CONFIG_COMPATint compat_tcp_getsockopt(struct sock *sk, int level, int optname,			  char __user *optval, int __user *optlen){	if (level != SOL_TCP)		return inet_csk_compat_getsockopt(sk, level, optname,						  optval, optlen);	return do_tcp_getsockopt(sk, level, optname, optval, optlen);}EXPORT_SYMBOL(compat_tcp_getsockopt);#endifstruct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features){	struct sk_buff *segs = ERR_PTR(-EINVAL);	struct tcphdr *th;	unsigned thlen;	unsigned int seq;	__be32 delta;	unsigned int oldlen;	unsigned int len;	if (!pskb_may_pull(skb, sizeof(*th)))		goto out;	th = tcp_hdr(skb);	thlen = th->doff * 4;	if (thlen < sizeof(*th))		goto out;	if (!pskb_may_pull(skb, thlen))		goto out;	oldlen = (u16)~skb->len;	__skb_pull(skb, thlen);	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {		/* Packet is from an untrusted source, reset gso_segs. */		int type = skb_shinfo(skb)->gso_type;		int mss;		if (unlikely(type &			     ~(SKB_GSO_TCPV4 |			       SKB_GSO_DODGY |			       SKB_GSO_TCP_ECN |			       SKB_GSO_TCPV6 |			       0) ||			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))			goto out;		mss = skb_shinfo(skb)->gso_size;		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);		segs = NULL;		goto out;	}	segs = skb_segment(skb, features);	if (IS_ERR(segs))		goto out;	len = skb_shinfo(skb)->gso_size;	delta = htonl(oldlen + (thlen + len));	skb = segs;	th = tcp_hdr(skb);	seq = ntohl(th->seq);	do {		th->fin = th->psh = 0;		th->check = ~csum_fold((__force __wsum)((__force u32)th->check +				       (__force u32)delta));		if (skb->ip_summed != CHECKSUM_PARTIAL)			th->check =			     csum_fold(csum_partial(skb_transport_header(skb),						    thlen, skb->csum));		seq += len;		skb = skb->next;		th = tcp_hdr(skb);		th->seq = htonl(seq);		th->cwr = 0;	} while (skb->next);	delta = htonl(oldlen + (skb->tail - skb->transport_header) +		      skb->data_len);	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +				(__force u32)delta));	if (skb->ip_summed != CHECKSUM_PARTIAL)		th->check = csum_fold(csum_partial(skb_transport_header(skb),						   thlen, skb->csum));out:	return segs;}EXPORT_SYMBOL(tcp_tso_segment);#ifdef CONFIG_TCP_MD5SIGstatic unsigned long tcp_md5sig_users;static struct tcp_md5sig_pool **tcp_md5sig_pool;static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool){	int cpu;	for_each_possible_cpu(cpu) {		struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);		if (p) {			if (p->md5_desc.tfm)				crypto_free_hash(p->md5_desc.tfm);			kfree(p);			p = NULL;		}	}	free_percpu(pool);}void tcp_free_md5sig_pool(void){	struct tcp_md5sig_pool **pool = NULL;	spin_lock_bh(&tcp_md5sig_pool_lock);	if (--tcp_md5sig_users == 0) {		pool = tcp_md5sig_pool;		tcp_md5sig_pool = NULL;	}	spin_unlock_bh(&tcp_md5sig_pool_lock);	if (pool)		__tcp_free_md5sig_pool(pool);}EXPORT_SYMBOL(tcp_free_md5sig_pool);static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void){	int cpu;	struct tcp_md5sig_pool **pool;	pool = alloc_percpu(struct tcp_md5sig_pool *);	if (!pool)		return NULL;	for_each_possible_cpu(cpu) {		struct tcp_md5sig_pool *p;		struct crypto_hash *hash;		p = kzalloc(sizeof(*p), GFP_KERNEL);		if (!p)			goto out_free;		*per_cpu_ptr(pool, cpu) = p;		hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);		if (!hash || IS_ERR(hash))			goto out_free;		p->md5_desc.tfm = hash;	}	return pool;out_free:	__tcp_free_md5sig_pool(pool);	return NULL;}struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void){	struct tcp_md5sig_pool **pool;	int alloc = 0;retry:	spin_lock_bh(&tcp_md5sig_pool_lock);	pool = tcp_md5sig_pool;	if (tcp_md5sig_users++ == 0) {		alloc = 1;		spin_unlock_bh(&tcp_md5sig_pool_lock);	} else if (!pool) {		tcp_md5sig_users--;		spin_unlock_bh(&tcp_md5sig_pool_lock);		cpu_relax();		goto retry;	} else		spin_unlock_bh(&tcp_md5sig_pool_lock);	if (alloc) {		/* we cannot hold spinlock here because this may sleep. */		struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();		spin_lock_bh(&tcp_md5sig_pool_lock);		if (!p) {			tcp_md5sig_users--;			spin_unlock_bh(&tcp_md5sig_pool_lock);			return NULL;		}		pool = tcp_md5sig_pool;		if (pool) {			/* oops, it has already been assigned. */			spin_unlock_bh(&tcp_md5sig_pool_lock);			__tcp_free_md5sig_pool(p);		} else {			tcp_md5sig_pool = pool = p;			spin_unlock_bh(&tcp_md5sig_pool_lock);		}	}	return pool;}EXPORT_SYMBOL(tcp_alloc_md5sig_pool);struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu){	struct tcp_md5sig_pool **p;	spin_lock_bh(&tcp_md5sig_pool_lock);	p = tcp_md5sig_pool;	if (p)		tcp_md5sig_users++;	spin_unlock_bh(&tcp_md5sig_pool_lock);	return (p ? *per_cpu_ptr(p, cpu) : NULL);}EXPORT_SYMBOL(__tcp_get_md5sig_pool);void __tcp_put_md5sig_pool(void){	tcp_free_md5sig_pool();}EXPORT_SYMBOL(__tcp_put_md5sig_pool);#endifvoid tcp_done(struct sock *sk){	if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)		TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);	tcp_set_state(sk, TCP_CLOSE);	tcp_clear_xmit_timers(sk);	sk->sk_shutdown = SHUTDOWN_MASK;	if (!sock_flag(sk, SOCK_DEAD))		sk->sk_state_change(sk);	else		inet_csk_destroy_sock(sk);}EXPORT_SYMBOL_GPL(tcp_done);extern void __skb_cb_too_small_for_tcp(int, int);extern struct tcp_congestion_ops tcp_reno;static __initdata unsigned long thash_entries;static int __init set_thash_entries(char *str){	if (!str)		return 0;	thash_entries = simple_strtoul(str, &str, 0);	return 1;}__setup("thash_entries=", set_thash_entries);void __init tcp_init(void){	struct sk_buff *skb = NULL;	unsigned long limit;	int order, i, max_share;	if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))		__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),					   sizeof(skb->cb));	tcp_hashinfo.bind_bucket_cachep =		kmem_cache_create("tcp_bind_bucket",				  sizeof(struct inet_bind_bucket), 0,				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);	/* Size and allocate the main established and bind bucket	 * hash tables.	 *	 * The methodology is similar to that 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -