⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sock.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 2 页
字号:
	filter = sk->filter;	if (filter) {		sk_filter_release(sk, filter);		sk->filter = NULL;	}#endif	if (atomic_read(&sk->omem_alloc))		printk(KERN_DEBUG "sk_free: optmem leakage (%d bytes) detected.\n", atomic_read(&sk->omem_alloc));	kmem_cache_free(sk_cachep, sk);}void __init sk_init(void){	sk_cachep = kmem_cache_create("sock", sizeof(struct sock), 0,				      SLAB_HWCACHE_ALIGN, 0, 0);	if (!sk_cachep)		printk(KERN_CRIT "sk_init: Cannot create sock SLAB cache!");	if (num_physpages <= 4096) {		sysctl_wmem_max = 32767;		sysctl_rmem_max = 32767;		sysctl_wmem_default = 32767;		sysctl_wmem_default = 32767;	} else if (num_physpages >= 131072) {		sysctl_wmem_max = 131071;		sysctl_rmem_max = 131071;	}}/* *	Simple resource managers for sockets. *//*  * Write buffer destructor automatically called from kfree_skb.  */void sock_wfree(struct sk_buff *skb){	struct sock *sk = skb->sk;	/* In case it might be waiting for more memory. */	atomic_sub(skb->truesize, &sk->wmem_alloc);	if (!sk->use_write_queue)		sk->write_space(sk);	sock_put(sk);}/*  * Read buffer destructor automatically called from kfree_skb.  */void sock_rfree(struct sk_buff *skb){	struct sock *sk = skb->sk;	atomic_sub(skb->truesize, &sk->rmem_alloc);}/* * Allocate a skb from the socket's send buffer. */struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority){	if (force || atomic_read(&sk->wmem_alloc) < sk->sndbuf) {		struct sk_buff * skb = alloc_skb(size, priority);		if (skb) {			skb_set_owner_w(skb, sk);			return skb;		}	}	return NULL;}/* * Allocate a skb from the socket's receive buffer. */ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority){	if (force || atomic_read(&sk->rmem_alloc) < sk->rcvbuf) {		struct sk_buff *skb = alloc_skb(size, priority);		if (skb) {			skb_set_owner_r(skb, sk);			return skb;		}	}	return NULL;}/*  * Allocate a memory block from the socket's option memory buffer. */ void *sock_kmalloc(struct sock *sk, int size, int priority){	if ((unsigned)size <= sysctl_optmem_max &&	    atomic_read(&sk->omem_alloc)+size < sysctl_optmem_max) {		void *mem;		/* First do the add, to avoid the race if kmalloc 		 * might sleep.		 */		atomic_add(size, &sk->omem_alloc);		mem = kmalloc(size, priority);		if (mem)			return mem;		atomic_sub(size, &sk->omem_alloc);	}	return NULL;}/* * Free an option memory block. */void sock_kfree_s(struct sock *sk, void *mem, int size){	kfree(mem);	atomic_sub(size, &sk->omem_alloc);}/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.   I think, these locks should be removed for datagram sockets. */static long sock_wait_for_wmem(struct sock * sk, long timeo){	DECLARE_WAITQUEUE(wait, current);	clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);	add_wait_queue(sk->sleep, &wait);	for (;;) {		if (!timeo)			break;		if (signal_pending(current))			break;		set_bit(SOCK_NOSPACE, &sk->socket->flags);		set_current_state(TASK_INTERRUPTIBLE);		if (atomic_read(&sk->wmem_alloc) < sk->sndbuf)			break;		if (sk->shutdown & SEND_SHUTDOWN)			break;		if (sk->err)			break;		timeo = schedule_timeout(timeo);	}	__set_current_state(TASK_RUNNING);	remove_wait_queue(sk->sleep, &wait);	return timeo;}/* *	Generic send/receive buffer handlers */struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,				     unsigned long data_len, int noblock, int *errcode){	struct sk_buff *skb;	long timeo;	int err;	timeo = sock_sndtimeo(sk, noblock);	while (1) {		err = sock_error(sk);		if (err != 0)			goto failure;		err = -EPIPE;		if (sk->shutdown & SEND_SHUTDOWN)			goto failure;		if (atomic_read(&sk->wmem_alloc) < sk->sndbuf) {			skb = alloc_skb(header_len, sk->allocation);			if (skb) {				int npages;				int i;				/* No pages, we're done... */				if (!data_len)					break;				npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;				skb->truesize += data_len;				skb_shinfo(skb)->nr_frags = npages;				for (i = 0; i < npages; i++) {					struct page *page;					skb_frag_t *frag;					page = alloc_pages(sk->allocation, 0);					if (!page) {						err = -ENOBUFS;						kfree_skb(skb);						goto failure;					}					frag = &skb_shinfo(skb)->frags[i];					frag->page = page;					frag->page_offset = 0;					frag->size = (data_len >= PAGE_SIZE ?						      PAGE_SIZE :						      data_len);					data_len -= PAGE_SIZE;				}				/* Full success... */				break;			}			err = -ENOBUFS;			goto failure;		}		set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);		set_bit(SOCK_NOSPACE, &sk->socket->flags);		err = -EAGAIN;		if (!timeo)			goto failure;		if (signal_pending(current))			goto interrupted;		timeo = sock_wait_for_wmem(sk, timeo);	}	skb_set_owner_w(skb, sk);	return skb;interrupted:	err = sock_intr_errno(timeo);failure:	*errcode = err;	return NULL;}struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 				    int noblock, int *errcode){	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);}void __lock_sock(struct sock *sk){	DECLARE_WAITQUEUE(wait, current);	add_wait_queue_exclusive(&sk->lock.wq, &wait);	for(;;) {		current->state = TASK_UNINTERRUPTIBLE;		spin_unlock_bh(&sk->lock.slock);		schedule();		spin_lock_bh(&sk->lock.slock);		if(!sk->lock.users)			break;	}	current->state = TASK_RUNNING;	remove_wait_queue(&sk->lock.wq, &wait);}void __release_sock(struct sock *sk){	struct sk_buff *skb = sk->backlog.head;	do {		sk->backlog.head = sk->backlog.tail = NULL;		bh_unlock_sock(sk);		do {			struct sk_buff *next = skb->next;			skb->next = NULL;			sk->backlog_rcv(sk, skb);			skb = next;		} while (skb != NULL);		bh_lock_sock(sk);	} while((skb = sk->backlog.head) != NULL);}/* *	Generic socket manager library. Most simpler socket families *	use this to manage their socket lists. At some point we should *	hash these. By making this generic we get the lot hashed for free. * *	It is broken by design. All the protocols using it must be fixed. --ANK */rwlock_t net_big_sklist_lock = RW_LOCK_UNLOCKED; void sklist_remove_socket(struct sock **list, struct sock *sk){	struct sock *s;	write_lock_bh(&net_big_sklist_lock);	while ((s = *list) != NULL) {		if (s == sk) {			*list = s->next;			break;		}		list = &s->next;	}	write_unlock_bh(&net_big_sklist_lock);	if (s)		sock_put(s);}void sklist_insert_socket(struct sock **list, struct sock *sk){	write_lock_bh(&net_big_sklist_lock);	sk->next= *list;	*list=sk;	sock_hold(sk);	write_unlock_bh(&net_big_sklist_lock);}/* *	This is only called from user mode. Thus it protects itself against *	interrupt users but doesn't worry about being called during work. *	Once it is removed from the queue no interrupt or bottom half will *	touch it and we are (fairly 8-) ) safe. */void sklist_destroy_socket(struct sock **list, struct sock *sk);/* *	Handler for deferred kills. */static void sklist_destroy_timer(unsigned long data){	struct sock *sk=(struct sock *)data;	sklist_destroy_socket(NULL,sk);}/* *	Destroy a socket. We pass NULL for a list if we know the *	socket is not on a list. */ void sklist_destroy_socket(struct sock **list,struct sock *sk){	if(list)		sklist_remove_socket(list, sk);	skb_queue_purge(&sk->receive_queue);	if(atomic_read(&sk->wmem_alloc) == 0 &&	   atomic_read(&sk->rmem_alloc) == 0 &&	   sk->dead)	{		sock_put(sk);	}	else	{		/*		 *	Someone is using our buffers still.. defer		 */		init_timer(&sk->timer);		sk->timer.expires=jiffies+SOCK_DESTROY_TIME;		sk->timer.function=sklist_destroy_timer;		sk->timer.data = (unsigned long)sk;		add_timer(&sk->timer);	}}/* * Set of default routines for initialising struct proto_ops when * the protocol does not support a particular function. In certain * cases where it makes no sense for a protocol to have a "do nothing" * function, some default processing is provided. */int sock_no_release(struct socket *sock){	return 0;}int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len){	return -EOPNOTSUPP;}int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 		    int len, int flags){	return -EOPNOTSUPP;}int sock_no_socketpair(struct socket *sock1, struct socket *sock2){	return -EOPNOTSUPP;}int sock_no_accept(struct socket *sock, struct socket *newsock, int flags){	return -EOPNOTSUPP;}int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 		    int *len, int peer){	return -EOPNOTSUPP;}unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt){	return 0;}int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg){	return -EOPNOTSUPP;}int sock_no_listen(struct socket *sock, int backlog){	return -EOPNOTSUPP;}int sock_no_shutdown(struct socket *sock, int how){	return -EOPNOTSUPP;}int sock_no_setsockopt(struct socket *sock, int level, int optname,		    char *optval, int optlen){	return -EOPNOTSUPP;}int sock_no_getsockopt(struct socket *sock, int level, int optname,		    char *optval, int *optlen){	return -EOPNOTSUPP;}/*  * Note: if you add something that sleeps here then change sock_fcntl() *       to do proper fd locking. */int sock_no_fcntl(struct socket *sock, unsigned int cmd, unsigned long arg){	struct sock *sk = sock->sk;	switch(cmd)	{		case F_SETOWN:			/*			 * This is a little restrictive, but it's the only			 * way to make sure that you can't send a sigurg to			 * another process.			 */			if (current->pgrp != -arg &&				current->pid != arg &&				!capable(CAP_KILL)) return(-EPERM);			sk->proc = arg;			return(0);		case F_GETOWN:			return(sk->proc);		default:			return(-EINVAL);	}}int sock_no_sendmsg(struct socket *sock, struct msghdr *m, int flags,		    struct scm_cookie *scm){	return -EOPNOTSUPP;}int sock_no_recvmsg(struct socket *sock, struct msghdr *m, int len, int flags,		    struct scm_cookie *scm){	return -EOPNOTSUPP;}int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma){	/* Mirror missing mmap method error code */	return -ENODEV;}ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags){	ssize_t res;	struct msghdr msg;	struct iovec iov;	mm_segment_t old_fs;	char *kaddr;	kaddr = kmap(page);	msg.msg_name = NULL;	msg.msg_namelen = 0;	msg.msg_iov = &iov;	msg.msg_iovlen = 1;	msg.msg_control = NULL;	msg.msg_controllen = 0;	msg.msg_flags = flags;	iov.iov_base = kaddr + offset;	iov.iov_len = size;	old_fs = get_fs();	set_fs(KERNEL_DS);	res = sock_sendmsg(sock, &msg, size);	set_fs(old_fs);	kunmap(page);	return res;}/* *	Default Socket Callbacks */void sock_def_wakeup(struct sock *sk){	read_lock(&sk->callback_lock);	if (sk->sleep && waitqueue_active(sk->sleep))		wake_up_interruptible_all(sk->sleep);	read_unlock(&sk->callback_lock);}void sock_def_error_report(struct sock *sk){	read_lock(&sk->callback_lock);	if (sk->sleep && waitqueue_active(sk->sleep))		wake_up_interruptible(sk->sleep);	sk_wake_async(sk,0,POLL_ERR); 	read_unlock(&sk->callback_lock);}void sock_def_readable(struct sock *sk, int len){	read_lock(&sk->callback_lock);	if (sk->sleep && waitqueue_active(sk->sleep))		wake_up_interruptible(sk->sleep);	sk_wake_async(sk,1,POLL_IN);	read_unlock(&sk->callback_lock);}void sock_def_write_space(struct sock *sk){	read_lock(&sk->callback_lock);	/* Do not wake up a writer until he can make "significant"	 * progress.  --DaveM	 */	if((atomic_read(&sk->wmem_alloc) << 1) <= sk->sndbuf) {		if (sk->sleep && waitqueue_active(sk->sleep))			wake_up_interruptible(sk->sleep);		/* Should agree with poll, otherwise some programs break */		if (sock_writeable(sk))			sk_wake_async(sk, 2, POLL_OUT);	}	read_unlock(&sk->callback_lock);}void sock_def_destruct(struct sock *sk){	if (sk->protinfo.destruct_hook)		kfree(sk->protinfo.destruct_hook);}void sock_init_data(struct socket *sock, struct sock *sk){	skb_queue_head_init(&sk->receive_queue);	skb_queue_head_init(&sk->write_queue);	skb_queue_head_init(&sk->error_queue);	init_timer(&sk->timer);		sk->allocation	=	GFP_KERNEL;	sk->rcvbuf	=	sysctl_rmem_default;	sk->sndbuf	=	sysctl_wmem_default;	sk->state 	= 	TCP_CLOSE;	sk->zapped	=	1;	sk->socket	=	sock;	if(sock)	{		sk->type	=	sock->type;		sk->sleep	=	&sock->wait;		sock->sk	=	sk;	} else		sk->sleep	=	NULL;	sk->dst_lock		=	RW_LOCK_UNLOCKED;	sk->callback_lock	=	RW_LOCK_UNLOCKED;	sk->state_change	=	sock_def_wakeup;	sk->data_ready		=	sock_def_readable;	sk->write_space		=	sock_def_write_space;	sk->error_report	=	sock_def_error_report;	sk->destruct            =       sock_def_destruct;	sk->peercred.pid 	=	0;	sk->peercred.uid	=	-1;	sk->peercred.gid	=	-1;	sk->rcvlowat		=	1;	sk->rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;	sk->sndtimeo		=	MAX_SCHEDULE_TIMEOUT;	atomic_set(&sk->refcnt, 1);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -