⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sock.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	}	return sk;}void sk_free(struct sock *sk){#ifdef CONFIG_FILTER	struct sk_filter *filter;#endif	if (sk->destruct)		sk->destruct(sk);#ifdef CONFIG_FILTER	filter = sk->filter;	if (filter) {		sk_filter_release(sk, filter);		sk->filter = NULL;	}#endif	if (atomic_read(&sk->omem_alloc))		printk(KERN_DEBUG "sk_free: optmem leakage (%d bytes) detected.\n", atomic_read(&sk->omem_alloc));	kmem_cache_free(sk_cachep, sk);}void __init sk_init(void){	sk_cachep = kmem_cache_create("sock", sizeof(struct sock), 0,				      SLAB_HWCACHE_ALIGN, 0, 0);	if (!sk_cachep)		printk(KERN_CRIT "sk_init: Cannot create sock SLAB cache!");	if (num_physpages <= 4096) {		sysctl_wmem_max = 32767;		sysctl_rmem_max = 32767;		sysctl_wmem_default = 32767;		sysctl_wmem_default = 32767;	} else if (num_physpages >= 131072) {		sysctl_wmem_max = 131071;		sysctl_rmem_max = 131071;	}}/* *	Simple resource managers for sockets. *//*  * Write buffer destructor automatically called from kfree_skb.  */void sock_wfree(struct sk_buff *skb){	struct sock *sk = skb->sk;	/* In case it might be waiting for more memory. */	atomic_sub(skb->truesize, &sk->wmem_alloc);	sk->write_space(sk);	sock_put(sk);}/*  * Read buffer destructor automatically called from kfree_skb.  */void sock_rfree(struct sk_buff *skb){	struct sock *sk = skb->sk;	atomic_sub(skb->truesize, &sk->rmem_alloc);}/* * Allocate a skb from the socket's send buffer. */struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority){	if (force || atomic_read(&sk->wmem_alloc) < sk->sndbuf) {		struct sk_buff * skb = alloc_skb(size, priority);		if (skb) {			skb_set_owner_w(skb, sk);			return skb;		}	}	return NULL;}/* * Allocate a skb from the socket's receive buffer. */ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority){	if (force || atomic_read(&sk->rmem_alloc) < sk->rcvbuf) {		struct sk_buff *skb = alloc_skb(size, priority);		if (skb) {			skb_set_owner_r(skb, sk);			return skb;		}	}	return NULL;}/*  * Allocate a memory block from the socket's option memory buffer. */ void *sock_kmalloc(struct sock *sk, int size, int priority){	if ((unsigned)size <= sysctl_optmem_max &&	    atomic_read(&sk->omem_alloc)+size < sysctl_optmem_max) {		void *mem;		/* First do the add, to avoid the race if kmalloc 		 * might sleep.		 */		atomic_add(size, &sk->omem_alloc);		mem = kmalloc(size, priority);		if (mem)			return mem;		atomic_sub(size, &sk->omem_alloc);	}	return NULL;}/* * Free an option memory block. */void sock_kfree_s(struct sock *sk, void *mem, int size){	kfree(mem);	atomic_sub(size, &sk->omem_alloc);}/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.   I think, these locks should be removed for datagram sockets. */static long sock_wait_for_wmem(struct sock * sk, long timeo){	DECLARE_WAITQUEUE(wait, current);	clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);	add_wait_queue(sk->sleep, &wait);	for (;;) {		if (signal_pending(current))			break;		set_bit(SOCK_NOSPACE, &sk->socket->flags);		set_current_state(TASK_INTERRUPTIBLE);		if (atomic_read(&sk->wmem_alloc) < sk->sndbuf)			break;		if (sk->shutdown & SEND_SHUTDOWN)			break;		if (sk->err)			break;		timeo = schedule_timeout(timeo);	}	__set_current_state(TASK_RUNNING);	remove_wait_queue(sk->sleep, &wait);	return timeo;}/* *	Generic send/receive buffer handlers */struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 			unsigned long fallback, int noblock, int *errcode){	int err;	struct sk_buff *skb;	long timeo;	timeo = sock_sndtimeo(sk, noblock);	while (1) {		unsigned long try_size = size;		err = sock_error(sk);		if (err != 0)			goto failure;		/*		 *	We should send SIGPIPE in these cases according to		 *	1003.1g draft 6.4. If we (the user) did a shutdown()		 *	call however we should not. 		 *		 *	Note: This routine isnt just used for datagrams and		 *	anyway some datagram protocols have a notion of		 *	close down.		 */		err = -EPIPE;		if (sk->shutdown&SEND_SHUTDOWN)			goto failure;		if (atomic_read(&sk->wmem_alloc) < sk->sndbuf) {			if (fallback) {				/* The buffer get won't block, or use the atomic queue.			 	* It does produce annoying no free page messages still.			 	*/				skb = alloc_skb(size, GFP_BUFFER);				if (skb)					break;				try_size = fallback;			}			skb = alloc_skb(try_size, sk->allocation);			if (skb)				break;			err = -ENOBUFS;			goto failure;		}		/*		 *	This means we have too many buffers for this socket already.		 */		set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);		set_bit(SOCK_NOSPACE, &sk->socket->flags);		err = -EAGAIN;		if (!timeo)			goto failure;		if (signal_pending(current))			goto interrupted;		timeo = sock_wait_for_wmem(sk, timeo);	}	skb_set_owner_w(skb, sk);	return skb;interrupted:	err = sock_intr_errno(timeo);failure:	*errcode = err;	return NULL;}void __lock_sock(struct sock *sk){	DECLARE_WAITQUEUE(wait, current);	add_wait_queue_exclusive(&sk->lock.wq, &wait);	for(;;) {		current->state = TASK_UNINTERRUPTIBLE;		spin_unlock_bh(&sk->lock.slock);		schedule();		spin_lock_bh(&sk->lock.slock);		if(!sk->lock.users)			break;	}	current->state = TASK_RUNNING;	remove_wait_queue(&sk->lock.wq, &wait);}void __release_sock(struct sock *sk){	struct sk_buff *skb = sk->backlog.head;	do {		sk->backlog.head = sk->backlog.tail = NULL;		bh_unlock_sock(sk);		do {			struct sk_buff *next = skb->next;			skb->next = NULL;			sk->backlog_rcv(sk, skb);			skb = next;		} while (skb != NULL);		bh_lock_sock(sk);	} while((skb = sk->backlog.head) != NULL);}/* *	Generic socket manager library. Most simpler socket families *	use this to manage their socket lists. At some point we should *	hash these. By making this generic we get the lot hashed for free. * *	It is broken by design. All the protocols using it must be fixed. --ANK */rwlock_t net_big_sklist_lock = RW_LOCK_UNLOCKED; void sklist_remove_socket(struct sock **list, struct sock *sk){	struct sock *s;	write_lock_bh(&net_big_sklist_lock);	while ((s = *list) != NULL) {		if (s == sk) {			*list = s->next;			break;		}		list = &s->next;	}	write_unlock_bh(&net_big_sklist_lock);	if (s)		sock_put(s);}void sklist_insert_socket(struct sock **list, struct sock *sk){	write_lock_bh(&net_big_sklist_lock);	sk->next= *list;	*list=sk;	sock_hold(sk);	write_unlock_bh(&net_big_sklist_lock);}/* *	This is only called from user mode. Thus it protects itself against *	interrupt users but doesn't worry about being called during work. *	Once it is removed from the queue no interrupt or bottom half will *	touch it and we are (fairly 8-) ) safe. */void sklist_destroy_socket(struct sock **list, struct sock *sk);/* *	Handler for deferred kills. */static void sklist_destroy_timer(unsigned long data){	struct sock *sk=(struct sock *)data;	sklist_destroy_socket(NULL,sk);}/* *	Destroy a socket. We pass NULL for a list if we know the *	socket is not on a list. */ void sklist_destroy_socket(struct sock **list,struct sock *sk){	struct sk_buff *skb;	if(list)		sklist_remove_socket(list, sk);	while((skb=skb_dequeue(&sk->receive_queue))!=NULL)	{		kfree_skb(skb);	}	if(atomic_read(&sk->wmem_alloc) == 0 &&	   atomic_read(&sk->rmem_alloc) == 0 &&	   sk->dead)	{		sock_put(sk);	}	else	{		/*		 *	Someone is using our buffers still.. defer		 */		init_timer(&sk->timer);		sk->timer.expires=jiffies+SOCK_DESTROY_TIME;		sk->timer.function=sklist_destroy_timer;		sk->timer.data = (unsigned long)sk;		add_timer(&sk->timer);	}}/* * Set of default routines for initialising struct proto_ops when * the protocol does not support a particular function. In certain * cases where it makes no sense for a protocol to have a "do nothing" * function, some default processing is provided. */int sock_no_release(struct socket *sock){	return 0;}int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len){	return -EOPNOTSUPP;}int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 		    int len, int flags){	return -EOPNOTSUPP;}int sock_no_socketpair(struct socket *sock1, struct socket *sock2){	return -EOPNOTSUPP;}int sock_no_accept(struct socket *sock, struct socket *newsock, int flags){	return -EOPNOTSUPP;}int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 		    int *len, int peer){	return -EOPNOTSUPP;}unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt){	return 0;}int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg){	return -EOPNOTSUPP;}int sock_no_listen(struct socket *sock, int backlog){	return -EOPNOTSUPP;}int sock_no_shutdown(struct socket *sock, int how){	return -EOPNOTSUPP;}int sock_no_setsockopt(struct socket *sock, int level, int optname,		    char *optval, int optlen){	return -EOPNOTSUPP;}int sock_no_getsockopt(struct socket *sock, int level, int optname,		    char *optval, int *optlen){	return -EOPNOTSUPP;}/*  * Note: if you add something that sleeps here then change sock_fcntl() *       to do proper fd locking. */int sock_no_fcntl(struct socket *sock, unsigned int cmd, unsigned long arg){	struct sock *sk = sock->sk;	switch(cmd)	{		case F_SETOWN:			/*			 * This is a little restrictive, but it's the only			 * way to make sure that you can't send a sigurg to			 * another process.			 */			if (current->pgrp != -arg &&				current->pid != arg &&				!capable(CAP_KILL)) return(-EPERM);			sk->proc = arg;			return(0);		case F_GETOWN:			return(sk->proc);		default:			return(-EINVAL);	}}int sock_no_sendmsg(struct socket *sock, struct msghdr *m, int flags,		    struct scm_cookie *scm){	return -EOPNOTSUPP;}int sock_no_recvmsg(struct socket *sock, struct msghdr *m, int len, int flags,		    struct scm_cookie *scm){	return -EOPNOTSUPP;}int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma){	/* Mirror missing mmap method error code */	return -ENODEV;}/* *	Default Socket Callbacks */void sock_def_wakeup(struct sock *sk){	read_lock(&sk->callback_lock);	if (sk->sleep && waitqueue_active(sk->sleep))		wake_up_interruptible_all(sk->sleep);	read_unlock(&sk->callback_lock);}void sock_def_error_report(struct sock *sk){	read_lock(&sk->callback_lock);	if (sk->sleep && waitqueue_active(sk->sleep))		wake_up_interruptible(sk->sleep);	sk_wake_async(sk,0,POLL_ERR); 	read_unlock(&sk->callback_lock);}void sock_def_readable(struct sock *sk, int len){	read_lock(&sk->callback_lock);	if (sk->sleep && waitqueue_active(sk->sleep))		wake_up_interruptible(sk->sleep);	sk_wake_async(sk,1,POLL_IN);	read_unlock(&sk->callback_lock);}void sock_def_write_space(struct sock *sk){	read_lock(&sk->callback_lock);	/* Do not wake up a writer until he can make "significant"	 * progress.  --DaveM	 */	if((atomic_read(&sk->wmem_alloc) << 1) <= sk->sndbuf) {		if (sk->sleep && waitqueue_active(sk->sleep))			wake_up_interruptible(sk->sleep);		/* Should agree with poll, otherwise some programs break */		if (sock_writeable(sk))			sk_wake_async(sk, 2, POLL_OUT);	}	read_unlock(&sk->callback_lock);}void sock_def_destruct(struct sock *sk){	if (sk->protinfo.destruct_hook)		kfree(sk->protinfo.destruct_hook);}void sock_init_data(struct socket *sock, struct sock *sk){	skb_queue_head_init(&sk->receive_queue);	skb_queue_head_init(&sk->write_queue);	skb_queue_head_init(&sk->error_queue);	init_timer(&sk->timer);		sk->allocation	=	GFP_KERNEL;	sk->rcvbuf	=	sysctl_rmem_default;	sk->sndbuf	=	sysctl_wmem_default;	sk->state 	= 	TCP_CLOSE;	sk->zapped	=	1;	sk->socket	=	sock;	if(sock)	{		sk->type	=	sock->type;		sk->sleep	=	&sock->wait;		sock->sk	=	sk;	} else		sk->sleep	=	NULL;	sk->dst_lock		=	RW_LOCK_UNLOCKED;	sk->callback_lock	=	RW_LOCK_UNLOCKED;	sk->state_change	=	sock_def_wakeup;	sk->data_ready		=	sock_def_readable;	sk->write_space		=	sock_def_write_space;	sk->error_report	=	sock_def_error_report;	sk->destruct            =       sock_def_destruct;	sk->peercred.pid 	=	0;	sk->peercred.uid	=	-1;	sk->peercred.gid	=	-1;	sk->rcvlowat		=	1;	sk->rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;	sk->sndtimeo		=	MAX_SCHEDULE_TIMEOUT;	atomic_set(&sk->refcnt, 1);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -