⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 proto.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
		slen = sl->dccpsl_nr * sizeof(u32);		total_len += slen;	}	err = -EINVAL;	if (total_len > len)		goto out;	err = 0;	if (put_user(total_len, optlen) ||	    put_user(dp->dccps_service, optval) ||	    (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))		err = -EFAULT;out:	release_sock(sk);	return err;}static int do_dccp_getsockopt(struct sock *sk, int level, int optname,		    char __user *optval, int __user *optlen){	struct dccp_sock *dp;	int val, len;	if (get_user(len, optlen))		return -EFAULT;	if (len < (int)sizeof(int))		return -EINVAL;	dp = dccp_sk(sk);	switch (optname) {	case DCCP_SOCKOPT_PACKET_SIZE:		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");		return 0;	case DCCP_SOCKOPT_SERVICE:		return dccp_getsockopt_service(sk, len,					       (__be32 __user *)optval, optlen);	case DCCP_SOCKOPT_GET_CUR_MPS:		val = dp->dccps_mss_cache;		len = sizeof(val);		break;	case DCCP_SOCKOPT_SEND_CSCOV:		val = dp->dccps_pcslen;		len = sizeof(val);		break;	case DCCP_SOCKOPT_RECV_CSCOV:		val = dp->dccps_pcrlen;		len = sizeof(val);		break;	case 128 ... 191:		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,					     len, (u32 __user *)optval, optlen);	case 192 ... 255:		return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,					     len, (u32 __user *)optval, optlen);	default:		return -ENOPROTOOPT;	}	if (put_user(len, optlen) || copy_to_user(optval, &val, len))		return -EFAULT;	return 0;}int dccp_getsockopt(struct sock *sk, int level, int optname,		    char __user *optval, int __user *optlen){	if (level != SOL_DCCP)		return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,							     optname, optval,							     optlen);	return do_dccp_getsockopt(sk, level, optname, optval, optlen);}EXPORT_SYMBOL_GPL(dccp_getsockopt);#ifdef CONFIG_COMPATint compat_dccp_getsockopt(struct sock *sk, int level, int optname,			   char __user *optval, int __user *optlen){	if (level != SOL_DCCP)		return inet_csk_compat_getsockopt(sk, level, optname,						  optval, optlen);	return do_dccp_getsockopt(sk, level, optname, optval, optlen);}EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);#endifint dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,		 size_t len){	const struct dccp_sock *dp = dccp_sk(sk);	const int flags = msg->msg_flags;	const int noblock = flags & MSG_DONTWAIT;	struct sk_buff *skb;	int rc, size;	long timeo;	if (len > dp->dccps_mss_cache)		return -EMSGSIZE;	lock_sock(sk);	if (sysctl_dccp_tx_qlen &&	    (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {		rc = -EAGAIN;		goto out_release;	}	timeo = sock_sndtimeo(sk, noblock);	/*	 * We have to use sk_stream_wait_connect here to set sk_write_pending,	 * so that the trick in dccp_rcv_request_sent_state_process.	 */	/* Wait for a connection to finish. */	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))		if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)			goto out_release;	size = sk->sk_prot->max_header + len;	release_sock(sk);	skb = sock_alloc_send_skb(sk, size, noblock, &rc);	lock_sock(sk);	if (skb == NULL)		goto out_release;	skb_reserve(skb, sk->sk_prot->max_header);	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);	if (rc != 0)		goto out_discard;	skb_queue_tail(&sk->sk_write_queue, skb);	dccp_write_xmit(sk,0);out_release:	release_sock(sk);	return rc ? : len;out_discard:	kfree_skb(skb);	goto out_release;}EXPORT_SYMBOL_GPL(dccp_sendmsg);int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,		 size_t len, int nonblock, int flags, int *addr_len){	const struct dccp_hdr *dh;	long timeo;	lock_sock(sk);	if (sk->sk_state == DCCP_LISTEN) {		len = -ENOTCONN;		goto out;	}	timeo = sock_rcvtimeo(sk, nonblock);	do {		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);		if (skb == NULL)			goto verify_sock_status;		dh = dccp_hdr(skb);		if (dh->dccph_type == DCCP_PKT_DATA ||		    dh->dccph_type == DCCP_PKT_DATAACK)			goto found_ok_skb;		if (dh->dccph_type == DCCP_PKT_RESET ||		    dh->dccph_type == DCCP_PKT_CLOSE) {			dccp_pr_debug("found fin ok!\n");			len = 0;			goto found_fin_ok;		}		dccp_pr_debug("packet_type=%s\n",			      dccp_packet_name(dh->dccph_type));		sk_eat_skb(sk, skb, 0);verify_sock_status:		if (sock_flag(sk, SOCK_DONE)) {			len = 0;			break;		}		if (sk->sk_err) {			len = sock_error(sk);			break;		}		if (sk->sk_shutdown & RCV_SHUTDOWN) {			len = 0;			break;		}		if (sk->sk_state == DCCP_CLOSED) {			if (!sock_flag(sk, SOCK_DONE)) {				/* This occurs when user tries to read				 * from never connected socket.				 */				len = -ENOTCONN;				break;			}			len = 0;			break;		}		if (!timeo) {			len = -EAGAIN;			break;		}		if (signal_pending(current)) {			len = sock_intr_errno(timeo);			break;		}		sk_wait_data(sk, &timeo);		continue;	found_ok_skb:		if (len > skb->len)			len = skb->len;		else if (len < skb->len)			msg->msg_flags |= MSG_TRUNC;		if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {			/* Exception. Bailout! */			len = -EFAULT;			break;		}	found_fin_ok:		if (!(flags & MSG_PEEK))			sk_eat_skb(sk, skb, 0);		break;	} while (1);out:	release_sock(sk);	return len;}EXPORT_SYMBOL_GPL(dccp_recvmsg);int inet_dccp_listen(struct socket *sock, int backlog){	struct sock *sk = sock->sk;	unsigned char old_state;	int err;	lock_sock(sk);	err = -EINVAL;	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)		goto out;	old_state = sk->sk_state;	if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))		goto out;	/* Really, if the socket is already in listen state	 * we can only allow the backlog to be adjusted.	 */	if (old_state != DCCP_LISTEN) {		/*		 * FIXME: here it probably should be sk->sk_prot->listen_start		 * see tcp_listen_start		 */		err = dccp_listen_start(sk, backlog);		if (err)			goto out;	}	sk->sk_max_ack_backlog = backlog;	err = 0;out:	release_sock(sk);	return err;}EXPORT_SYMBOL_GPL(inet_dccp_listen);static const unsigned char dccp_new_state[] = {	/* current state:   new state:      action:	*/	[0]		  = DCCP_CLOSED,	[DCCP_OPEN]	  = DCCP_CLOSING | DCCP_ACTION_FIN,	[DCCP_REQUESTING] = DCCP_CLOSED,	[DCCP_PARTOPEN]	  = DCCP_CLOSING | DCCP_ACTION_FIN,	[DCCP_LISTEN]	  = DCCP_CLOSED,	[DCCP_RESPOND]	  = DCCP_CLOSED,	[DCCP_CLOSING]	  = DCCP_CLOSED,	[DCCP_TIME_WAIT]  = DCCP_CLOSED,	[DCCP_CLOSED]	  = DCCP_CLOSED,};static int dccp_close_state(struct sock *sk){	const int next = dccp_new_state[sk->sk_state];	const int ns = next & DCCP_STATE_MASK;	if (ns != sk->sk_state)		dccp_set_state(sk, ns);	return next & DCCP_ACTION_FIN;}void dccp_close(struct sock *sk, long timeout){	struct dccp_sock *dp = dccp_sk(sk);	struct sk_buff *skb;	int state;	lock_sock(sk);	sk->sk_shutdown = SHUTDOWN_MASK;	if (sk->sk_state == DCCP_LISTEN) {		dccp_set_state(sk, DCCP_CLOSED);		/* Special case. */		inet_csk_listen_stop(sk);		goto adjudge_to_death;	}	sk_stop_timer(sk, &dp->dccps_xmit_timer);	/*	 * We need to flush the recv. buffs.  We do this only on the	 * descriptor close, not protocol-sourced closes, because the	  *reader process may not have drained the data yet!	 */	/* FIXME: check for unread data */	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {		__kfree_skb(skb);	}	if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {		/* Check zero linger _after_ checking for unread data. */		sk->sk_prot->disconnect(sk, 0);	} else if (dccp_close_state(sk)) {		dccp_send_close(sk, 1);	}	sk_stream_wait_close(sk, timeout);adjudge_to_death:	state = sk->sk_state;	sock_hold(sk);	sock_orphan(sk);	atomic_inc(sk->sk_prot->orphan_count);	/*	 * It is the last release_sock in its life. It will remove backlog.	 */	release_sock(sk);	/*	 * Now socket is owned by kernel and we acquire BH lock	 * to finish close. No need to check for user refs.	 */	local_bh_disable();	bh_lock_sock(sk);	BUG_TRAP(!sock_owned_by_user(sk));	/* Have we already been destroyed by a softirq or backlog? */	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)		goto out;	/*	 * The last release_sock may have processed the CLOSE or RESET	 * packet moving sock to CLOSED state, if not we have to fire	 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"	 * in draft-ietf-dccp-spec-11. -acme	 */	if (sk->sk_state == DCCP_CLOSING) {		/* FIXME: should start at 2 * RTT */		/* Timer for repeating the CLOSE/CLOSEREQ until an answer. */		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,					  inet_csk(sk)->icsk_rto,					  DCCP_RTO_MAX);#if 0		/* Yeah, we should use sk->sk_prot->orphan_count, etc */		dccp_set_state(sk, DCCP_CLOSED);#endif	}	if (sk->sk_state == DCCP_CLOSED)		inet_csk_destroy_sock(sk);	/* Otherwise, socket is reprieved until protocol close. */out:	bh_unlock_sock(sk);	local_bh_enable();	sock_put(sk);}EXPORT_SYMBOL_GPL(dccp_close);void dccp_shutdown(struct sock *sk, int how){	dccp_pr_debug("entry\n");}EXPORT_SYMBOL_GPL(dccp_shutdown);static int __init dccp_mib_init(void){	int rc = -ENOMEM;	dccp_statistics[0] = alloc_percpu(struct dccp_mib);	if (dccp_statistics[0] == NULL)		goto out;	dccp_statistics[1] = alloc_percpu(struct dccp_mib);	if (dccp_statistics[1] == NULL)		goto out_free_one;	rc = 0;out:	return rc;out_free_one:	free_percpu(dccp_statistics[0]);	dccp_statistics[0] = NULL;	goto out;}static void dccp_mib_exit(void){	free_percpu(dccp_statistics[0]);	free_percpu(dccp_statistics[1]);	dccp_statistics[0] = dccp_statistics[1] = NULL;}static int thash_entries;module_param(thash_entries, int, 0444);MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");#ifdef CONFIG_IP_DCCP_DEBUGint dccp_debug;module_param(dccp_debug, bool, 0444);MODULE_PARM_DESC(dccp_debug, "Enable debug messages");EXPORT_SYMBOL_GPL(dccp_debug);#endifstatic int __init dccp_init(void){	unsigned long goal;	int ehash_order, bhash_order, i;	int rc = -ENOBUFS;	dccp_hashinfo.bind_bucket_cachep =		kmem_cache_create("dccp_bind_bucket",				  sizeof(struct inet_bind_bucket), 0,				  SLAB_HWCACHE_ALIGN, NULL);	if (!dccp_hashinfo.bind_bucket_cachep)		goto out;	/*	 * Size and allocate the main established and bind bucket	 * hash tables.	 *	 * The methodology is similar to that of the buffer cache.	 */	if (num_physpages >= (128 * 1024))		goal = num_physpages >> (21 - PAGE_SHIFT);	else		goal = num_physpages >> (23 - PAGE_SHIFT);	if (thash_entries)		goal = (thash_entries *			sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;	for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)		;	do {		dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /					sizeof(struct inet_ehash_bucket);		while (dccp_hashinfo.ehash_size &		       (dccp_hashinfo.ehash_size - 1))			dccp_hashinfo.ehash_size--;		dccp_hashinfo.ehash = (struct inet_ehash_bucket *)			__get_free_pages(GFP_ATOMIC, ehash_order);	} while (!dccp_hashinfo.ehash && --ehash_order > 0);	if (!dccp_hashinfo.ehash) {		DCCP_CRIT("Failed to allocate DCCP established hash table");		goto out_free_bind_bucket_cachep;	}	for (i = 0; i < dccp_hashinfo.ehash_size; i++) {		INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);		INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain);	}	if (inet_ehash_locks_alloc(&dccp_hashinfo))			goto out_free_dccp_ehash;	bhash_order = ehash_order;	do {		dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /					sizeof(struct inet_bind_hashbucket);		if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&		    bhash_order > 0)			continue;		dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)			__get_free_pages(GFP_ATOMIC, bhash_order);	} while (!dccp_hashinfo.bhash && --bhash_order >= 0);	if (!dccp_hashinfo.bhash) {		DCCP_CRIT("Failed to allocate DCCP bind hash table");		goto out_free_dccp_locks;	}	for (i = 0; i < dccp_hashinfo.bhash_size; i++) {		spin_lock_init(&dccp_hashinfo.bhash[i].lock);		INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);	}	rc = dccp_mib_init();	if (rc)		goto out_free_dccp_bhash;	rc = dccp_ackvec_init();	if (rc)		goto out_free_dccp_mib;	rc = dccp_sysctl_init();	if (rc)		goto out_ackvec_exit;	dccp_timestamping_init();out:	return rc;out_ackvec_exit:	dccp_ackvec_exit();out_free_dccp_mib:	dccp_mib_exit();out_free_dccp_bhash:	free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);	dccp_hashinfo.bhash = NULL;out_free_dccp_locks:	inet_ehash_locks_free(&dccp_hashinfo);out_free_dccp_ehash:	free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);	dccp_hashinfo.ehash = NULL;out_free_bind_bucket_cachep:	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);	dccp_hashinfo.bind_bucket_cachep = NULL;	goto out;}static void __exit dccp_fini(void){	dccp_mib_exit();	free_pages((unsigned long)dccp_hashinfo.bhash,		   get_order(dccp_hashinfo.bhash_size *			     sizeof(struct inet_bind_hashbucket)));	free_pages((unsigned long)dccp_hashinfo.ehash,		   get_order(dccp_hashinfo.ehash_size *			     sizeof(struct inet_ehash_bucket)));	inet_ehash_locks_free(&dccp_hashinfo);	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);	dccp_ackvec_exit();	dccp_sysctl_exit();}module_init(dccp_init);module_exit(dccp_fini);MODULE_LICENSE("GPL");MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -