⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 myinet_connection_sock.c

📁 一个基于linux的TCP/IP协议栈的实现
💻 C
字号:
#define AF_INET_FAMILY(fam) 1static inline u32 myinet_synq_hash(const u32 raddr, const u16 rport,				const u32 rnd, const u16 synq_hsize){	return jhash_2words(raddr, (u32)rport, rnd) & (synq_hsize - 1);}int myinet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb){	const u32 sk_rcv_saddr = inet_rcv_saddr(sk);	struct sock *sk2;	struct hlist_node *node;	int reuse = sk->sk_reuse;	sk_for_each_bound(sk2, node, &tb->owners) {		if (sk != sk2 && !inet_v6_ipv6only(sk2) &&						(!sk->sk_bound_dev_if || !sk2->sk_bound_dev_if ||						 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {			if (!reuse || !sk2->sk_reuse || sk2->sk_state == TCP_LISTEN ){				const u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);				if( !sk2_rcv_saddr || !sk_rcv_saddr || sk2_rcv_saddr == sk_rcv_saddr )					break;			}		}	}	return node != NULL;}void myinet_csk_init_xmit_timers(struct sock *sk, void (*retransmit_handler)(unsigned long),				void (*delack_handler)(unsigned long), void (*keepalive_handler)(unsigned long)){	struct inet_connection_sock *icsk = inet_csk(sk);	init_timer(&icsk->icsk_retransmit_timer);	init_timer(&icsk->icsk_delack_timer);	init_timer(&sk->sk_timer);	icsk->icsk_retransmit_timer.function = retransmit_handler;	icsk->icsk_delack_timer.function = delack_handler;	sk->sk_timer.function = keepalive_handler;	icsk->icsk_retransmit_timer.data = 	icsk->icsk_delack_timer.data =	sk->sk_timer.data  = (unsigned long)sk;	icsk->icsk_pending = icsk->icsk_ack.pending = 0;}void myinet_csk_clear_xmit_timers(struct sock *sk){	struct inet_connection_sock *icsk = inet_csk(sk);	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);	sk_stop_timer(sk, &icsk->icsk_delack_timer);	sk_stop_timer(sk, &sk->sk_timer);}void myinet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr){}void myinet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len){	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);}void myinet_csk_destroy_sock(struct sock *sk){	unsigned int cpu = get_cpu();	BUG_TRAP(sk->sk_state == TCP_CLOSE);	BUG_TRAP(sock_flag(sk, SOCK_DEAD));	BUG_TRAP(sk_unhashed(sk));	BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);	sk->sk_prot->destroy(sk);	sk_stream_kill_queues(sk);	//xfrm_sk_free_policy(sk);	sk_refcnt_debug_release(sk);	atomic_dec(sk->sk_prot->orphan_count);	sock_put(sk);}int myreqsk_queue_alloc(struct request_sock_queue *queue, const int nr_table_entries){	const int lopt_size = sizeof(struct listen_sock) +			nr_table_entries * sizeof(struct request_sock *);	struct listen_sock *lopt = kmalloc(lopt_size, GFP_KERNEL);	if (lopt == NULL)		return -ENOMEM;	memset(lopt, 0, lopt_size);	for (lopt->max_qlen_log = 6; (1 << lopt->max_qlen_log) < mysysctl_max_syn_backlog;	     lopt->max_qlen_log++);	get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));	rwlock_init(&queue->syn_wait_lock);	queue->rskq_accept_head = queue->rskq_accept_tail = NULL;	lopt->nr_table_entries = nr_table_entries;	printk(KERN_INFO "%s:%d: the max_qlen_log: %d, nr_table_entries: %d\n", __FUNCTION__, __LINE__, lopt->max_qlen_log, lopt->nr_table_entries );	write_lock_bh(&queue->syn_wait_lock);	queue->listen_opt = lopt;	write_unlock_bh(&queue->syn_wait_lock);	return 0;}int myinet_csk_listen_start(struct sock *sk, const int nr_table_entries){	struct inet_sock *inet = inet_sk(sk);	struct inet_connection_sock *icsk = inet_csk(sk);	int rc = myreqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);	if (rc != 0)		return rc;	sk->sk_max_ack_backlog = 0;	sk->sk_ack_backlog = 0;	inet_csk_delack_init(sk);	sk->sk_state = TCP_LISTEN;	if (!sk->sk_prot->get_port(sk, inet->num)) {		inet->sport = htons(inet->num);		sk_dst_reset(sk);		sk->sk_prot->hash(sk);		return 0;	}	sk->sk_state = TCP_CLOSE;	__reqsk_queue_destroy(&icsk->icsk_accept_queue);	return -EADDRINUSE;}void myinet_csk_delete_keepalive_timer(struct sock *sk){	sk_stop_timer(sk, &sk->sk_timer);}void myinet_csk_listen_stop(struct sock *sk){	struct inet_connection_sock *icsk = inet_csk(sk);	struct request_sock *acc_req;	struct request_sock *req;	printk(KERN_INFO "%s:%d: myinet_csk_listen_stop!\n", __FUNCTION__, __LINE__ );	myinet_csk_delete_keepalive_timer(sk);	acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);	reqsk_queue_destroy(&icsk->icsk_accept_queue);	while ((req = acc_req) != NULL) {		struct sock *child = req->sk;		acc_req = req->dl_next;		local_bh_disable();		bh_lock_sock(child);		BUG_TRAP(!sock_owned_by_user(child));		sock_hold(child);		sk->sk_prot->disconnect(child, O_NONBLOCK);		sock_orphan(child);		atomic_inc(sk->sk_prot->orphan_count);		myinet_csk_destroy_sock(child);		bh_unlock_sock(child);		local_bh_enable();		sock_put(child);		sk_acceptq_removed(sk);		__reqsk_free(req);	}	BUG_TRAP(!sk->sk_ack_backlog);}static int myinet_csk_wait_for_connect(struct sock *sk, long timeo){	struct inet_connection_sock *icsk = inet_csk(sk);	DEFINE_WAIT(wait);	int err;	for( ; ; ){		printk(KERN_INFO "%s:%d: accept wait for connect!\n", __FUNCTION__, __LINE__ );		prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);		release_sock(sk);		if (reqsk_queue_empty(&icsk->icsk_accept_queue))			timeo = schedule_timeout(timeo);		lock_sock(sk);		err = 0;		printk(KERN_INFO "%s:%d: accept get a connect!\n", __FUNCTION__, __LINE__ );		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))			break;		err = -EINVAL;		if (sk->sk_state != TCP_LISTEN)			break;		err = sock_intr_errno(timeo);		if (signal_pending(current))			break;		err = -EAGAIN;		if (!timeo)			break;	}	finish_wait(sk->sk_sleep, &wait);	return err;}struct sock *myinet_csk_accept(struct sock *sk, int flags, int *err){	struct inet_connection_sock *icsk = inet_csk(sk);	struct sock *newsk;	int error;	lock_sock(sk);	error = -EINVAL;	if (sk->sk_state != TCP_LISTEN)		goto out_err;	if( reqsk_queue_empty(&icsk->icsk_accept_queue) ){		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);		error = -EAGAIN;		if (!timeo)			goto out_err;		error = myinet_csk_wait_for_connect(sk, timeo);		if (error)			goto out_err;	}	newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);	BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);out:	release_sock(sk);	return newsk;out_err:	newsk = NULL;	*err = error;	goto out;}int myinet_csk_get_port(struct inet_hashinfo *hashinfo,				struct sock *sk, unsigned short snum,				int (*bind_conflict)(const struct sock *sk,						const struct inet_bind_bucket *tb)){	struct inet_bind_hashbucket *head;	struct hlist_node *node;	struct inet_bind_bucket *tb;	int ret;	local_bh_disable();	if (!snum) {		int low = mysysctl_local_port_range[0];		int high = mysysctl_local_port_range[1];		int remaining = (high - low) + 1;		int rover = net_random() % (high - low) + low;		do {			head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)];			spin_lock(&head->lock);			inet_bind_bucket_for_each(tb, node, &head->chain)				if (tb->port == rover)					goto next;			break;		next:			spin_unlock(&head->lock);			if (++rover > high)				rover = low;		} while (--remaining > 0);		ret = 1;		if (remaining <= 0)			goto fail;		snum = rover;	} else {		head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)];		spin_lock(&head->lock);		inet_bind_bucket_for_each(tb, node, &head->chain)			if (tb->port == snum)				goto tb_found;	}	tb = NULL;	goto tb_not_found;tb_found:	if (!hlist_empty(&tb->owners)) {		if( sk->sk_reuse > 1 )			goto success;		if( tb->fastreuse > 0 && sk->sk_reuse && sk->sk_state != TCP_LISTEN ){			goto success;		}else{			ret = 1;			if( bind_conflict(sk, tb) )				goto fail_unlock;		}	}tb_not_found:	ret = 1;	if (!tb && (tb = myinet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL)		goto fail_unlock;	if (hlist_empty(&tb->owners)) {		if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)			tb->fastreuse = 1;		else			tb->fastreuse = 0;	} else if (tb->fastreuse &&		   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))		tb->fastreuse = 0;success:	if (!inet_csk(sk)->icsk_bind_hash)		myinet_bind_hash(sk, tb, snum);	BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); 	ret = 0;	fail_unlock:	spin_unlock(&head->lock);fail:	local_bh_enable();	return ret;}struct request_sock *myinet_csk_search_req(const struct sock *sk,				struct request_sock ***prevp,				const __u16 rport, const __u32 raddr,				const __u32 laddr){	const struct inet_connection_sock *icsk = inet_csk(sk);	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;	struct request_sock *req, **prev;	printk(KERN_INFO "%s:%d: raddr: %u.%u.%u.%u, rport: %u, hash_rnd: %u, nrentries: %d\n", __FUNCTION__, __LINE__, NIPQUAD(raddr), rport, lopt->hash_rnd, lopt->nr_table_entries );	for (prev = &lopt->syn_table[ myinet_synq_hash(raddr, rport, lopt->hash_rnd,							lopt->nr_table_entries)]; (req = *prev) != NULL;					prev = &req->dl_next) {		const struct inet_request_sock *ireq = inet_rsk(req);		if (ireq->rmt_port == rport && ireq->rmt_addr == raddr &&		    ireq->loc_addr == laddr && AF_INET_FAMILY(req->rsk_ops->family)) {			BUG_TRAP(!req->sk);			*prevp = prev;			break;		}	}	return req;}struct dst_entry* myinet_csk_route_req(struct sock *sk, const struct request_sock *req){	struct rtable *rt;	const struct inet_request_sock *ireq = inet_rsk(req);	struct ip_options *opt = inet_rsk(req)->opt;	struct flowi fl = { .oif = sk->sk_bound_dev_if,			.nl_u = { .ip4_u =					{ .daddr = ((opt && opt->srr) ?									opt->faddr :									ireq->rmt_addr),					.saddr = ireq->loc_addr,					.tos = RT_CONN_FLAGS(sk) } },			.proto = sk->sk_protocol + ( MY_IPPROTO_UDP - IPPROTO_UDP ),			.uli_u = { .ports =					{ .sport = inet_sk(sk)->sport,							.dport = ireq->rmt_port } } };	if( myip_route_output_flow(&rt, &fl, sk, 0) ){		//MYIP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);		return NULL;	}	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {		ip_rt_put(rt);		//MYIP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);		return NULL;	}	return &rt->u.dst;}void myinet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,				unsigned long timeout){	struct inet_connection_sock *icsk = inet_csk(sk);	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;	printk(KERN_INFO "%s:%d: raddr: %u.%u.%u.%u, rport: %u, hash_rnd: %u, nrentries: %d\n", __FUNCTION__, __LINE__, NIPQUAD(inet_rsk(req)->rmt_addr), inet_rsk(req)->rmt_port, lopt->hash_rnd, lopt->nr_table_entries );	const u32 h = myinet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,					lopt->hash_rnd, lopt->nr_table_entries);	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);	inet_csk_reqsk_queue_added(sk, timeout);}struct sock * mysk_clone(const struct sock *sk, const gfp_t priority){	struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);	if (newsk != NULL) {		struct sk_filter *filter;		memcpy(newsk, sk, sk->sk_prot->obj_size);		sk_node_init(&newsk->sk_node);		sock_lock_init(newsk);		bh_lock_sock(newsk);		atomic_set(&newsk->sk_rmem_alloc, 0);		atomic_set(&newsk->sk_wmem_alloc, 0);		atomic_set(&newsk->sk_omem_alloc, 0);		skb_queue_head_init(&newsk->sk_receive_queue);		skb_queue_head_init(&newsk->sk_write_queue);		rwlock_init(&newsk->sk_dst_lock);		rwlock_init(&newsk->sk_callback_lock);		newsk->sk_dst_cache	= NULL;		newsk->sk_wmem_queued	= 0;		newsk->sk_forward_alloc = 0;		newsk->sk_send_head	= NULL;		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;		sock_reset_flag(newsk, SOCK_DONE);		skb_queue_head_init(&newsk->sk_error_queue);		filter = newsk->sk_filter;		if (filter != NULL)			sk_filter_charge(newsk, filter);		//if (unlikely(xfrm_sk_clone_policy(newsk))) {		//	newsk->sk_destruct = NULL;		//	sk_free(newsk);		//	newsk = NULL;		//	goto out;		//}		newsk->sk_err	   = 0;		newsk->sk_priority = 0;		atomic_set(&newsk->sk_refcnt, 2);		sk_refcnt_debug_inc(newsk);		newsk->sk_socket = NULL;		newsk->sk_sleep	 = NULL;		if (newsk->sk_prot->sockets_allocated)			atomic_inc(newsk->sk_prot->sockets_allocated);	}out:	return newsk;}struct sock *myinet_csk_clone(struct sock *sk, const struct request_sock *req,			    const gfp_t priority){	struct sock *newsk = mysk_clone(sk, priority);	if (newsk != NULL) {		struct inet_connection_sock *newicsk = inet_csk(newsk);		newsk->sk_state = TCP_SYN_RECV;		newicsk->icsk_bind_hash = NULL;		inet_sk(newsk)->dport = inet_rsk(req)->rmt_port;		newsk->sk_write_space = sk_stream_write_space;		newicsk->icsk_retransmits = 0;		newicsk->icsk_backoff	  = 0;		newicsk->icsk_probes_out  = 0;		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));	}	return newsk;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -