⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sock.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 3 页
字号:
			break;				case SO_RCVBUF:			v.val = sk->sk_rcvbuf;			break;		case SO_REUSEADDR:			v.val = sk->sk_reuse;			break;		case SO_KEEPALIVE:			v.val = !!sock_flag(sk, SOCK_KEEPOPEN);			break;		case SO_TYPE:			v.val = sk->sk_type;		  					break;		case SO_ERROR:			v.val = -sock_error(sk);			if(v.val==0)				v.val = xchg(&sk->sk_err_soft, 0);			break;		case SO_OOBINLINE:			v.val = !!sock_flag(sk, SOCK_URGINLINE);			break;			case SO_NO_CHECK:			v.val = sk->sk_no_check;			break;		case SO_PRIORITY:			v.val = sk->sk_priority;			break;				case SO_LINGER:				lv		= sizeof(v.ling);			v.ling.l_onoff	= !!sock_flag(sk, SOCK_LINGER); 			v.ling.l_linger	= sk->sk_lingertime / HZ;			break;							case SO_BSDCOMPAT:			sock_warn_obsolete_bsdism("getsockopt");			break;		case SO_TIMESTAMP:			v.val = sk->sk_rcvtstamp;			break;		case SO_RCVTIMEO:			lv=sizeof(struct timeval);			if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {				v.tm.tv_sec = 0;				v.tm.tv_usec = 0;			} else {				v.tm.tv_sec = sk->sk_rcvtimeo / HZ;				v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;			}			break;		case SO_SNDTIMEO:			lv=sizeof(struct timeval);			if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {				v.tm.tv_sec = 0;				v.tm.tv_usec = 0;			} else {				v.tm.tv_sec = sk->sk_sndtimeo / HZ;				v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;			}			break;		case SO_RCVLOWAT:			v.val = sk->sk_rcvlowat;			break;		case SO_SNDLOWAT:			v.val=1;			break; 		case SO_PASSCRED:			v.val = sock->passcred;			break;		case SO_PEERCRED:			if (len > sizeof(sk->sk_peercred))				len = sizeof(sk->sk_peercred);			if (copy_to_user(optval, &sk->sk_peercred, len))				return -EFAULT;			goto lenout;		case SO_PEERNAME:		{			char address[128];			if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))				return -ENOTCONN;			if (lv < len)				return -EINVAL;			if (copy_to_user(optval, address, len))				return -EFAULT;			goto lenout;		}		/* Dubious BSD thing... Probably nobody even uses it, but		 * the UNIX standard wants it for whatever reason... -DaveM		 */		case SO_ACCEPTCONN:			v.val = sk->sk_state == TCP_LISTEN;			break;		case SO_PEERSEC:			return security_socket_getpeersec(sock, optval, optlen, len);		default:			return(-ENOPROTOOPT);	}	if (len > lv)		len = lv;	if (copy_to_user(optval, &v, len))		return -EFAULT;lenout:  	if (put_user(len, optlen))  		return -EFAULT;  	return 0;}static kmem_cache_t *sk_cachep;/** *	sk_alloc - All socket objects are allocated here *	@family - protocol family *	@priority - for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) *	@zero_it - zeroes the allocated sock *	@slab - alternate slab * *	All socket objects are allocated here. If @zero_it is non-zero *	it should have the size of the are to be zeroed, because the *	private slabcaches have different sizes of the generic struct sock. *	1 has been kept as a way to say sizeof(struct sock). */struct sock *sk_alloc(int family, int priority, int zero_it, kmem_cache_t *slab){	struct sock *sk = NULL;	if (!slab)		slab = sk_cachep;	sk = kmem_cache_alloc(slab, priority);	if (sk) {		if (zero_it) {			memset(sk, 0,			       zero_it == 1 ? sizeof(struct sock) : zero_it);			sk->sk_family = family;			sock_lock_init(sk);		}		sk->sk_slab = slab;				if (security_sk_alloc(sk, family, priority)) {			kmem_cache_free(slab, sk);			sk = NULL;		}	}	return sk;}void sk_free(struct sock *sk){	struct sk_filter *filter;	struct module *owner = sk->sk_owner;	if (sk->sk_destruct)		sk->sk_destruct(sk);	filter = sk->sk_filter;	if (filter) {		sk_filter_release(sk, filter);		sk->sk_filter = NULL;	}	sock_disable_timestamp(sk);	if (atomic_read(&sk->sk_omem_alloc))		printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",		       __FUNCTION__, atomic_read(&sk->sk_omem_alloc));	security_sk_free(sk);	kmem_cache_free(sk->sk_slab, sk);	module_put(owner);}void __init sk_init(void){	sk_cachep = kmem_cache_create("sock", sizeof(struct sock), 0,				      SLAB_HWCACHE_ALIGN, NULL, NULL);	if (!sk_cachep)		printk(KERN_CRIT "sk_init: Cannot create sock SLAB cache!");	if (num_physpages <= 4096) {		sysctl_wmem_max = 32767;		sysctl_rmem_max = 32767;		sysctl_wmem_default = 32767;		sysctl_rmem_default = 32767;	} else if (num_physpages >= 131072) {		sysctl_wmem_max = 131071;		sysctl_rmem_max = 131071;	}}/* *	Simple resource managers for sockets. *//*  * Write buffer destructor automatically called from kfree_skb.  */void sock_wfree(struct sk_buff *skb){	struct sock *sk = skb->sk;	/* In case it might be waiting for more memory. */	atomic_sub(skb->truesize, &sk->sk_wmem_alloc);	if (!sk->sk_use_write_queue)		sk->sk_write_space(sk);	sock_put(sk);}/*  * Read buffer destructor automatically called from kfree_skb.  */void sock_rfree(struct sk_buff *skb){	struct sock *sk = skb->sk;	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);}int sock_i_uid(struct sock *sk){	int uid;	read_lock(&sk->sk_callback_lock);	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;	read_unlock(&sk->sk_callback_lock);	return uid;}unsigned long sock_i_ino(struct sock *sk){	unsigned long ino;	read_lock(&sk->sk_callback_lock);	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;	read_unlock(&sk->sk_callback_lock);	return ino;}/* * Allocate a skb from the socket's send buffer. */struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority){	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {		struct sk_buff * skb = alloc_skb(size, priority);		if (skb) {			skb_set_owner_w(skb, sk);			return skb;		}	}	return NULL;}/* * Allocate a skb from the socket's receive buffer. */ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority){	if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {		struct sk_buff *skb = alloc_skb(size, priority);		if (skb) {			skb_set_owner_r(skb, sk);			return skb;		}	}	return NULL;}/*  * Allocate a memory block from the socket's option memory buffer. */ void *sock_kmalloc(struct sock *sk, int size, int priority){	if ((unsigned)size <= sysctl_optmem_max &&	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {		void *mem;		/* First do the add, to avoid the race if kmalloc 		 * might sleep.		 */		atomic_add(size, &sk->sk_omem_alloc);		mem = kmalloc(size, priority);		if (mem)			return mem;		atomic_sub(size, &sk->sk_omem_alloc);	}	return NULL;}/* * Free an option memory block. */void sock_kfree_s(struct sock *sk, void *mem, int size){	kfree(mem);	atomic_sub(size, &sk->sk_omem_alloc);}/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.   I think, these locks should be removed for datagram sockets. */static long sock_wait_for_wmem(struct sock * sk, long timeo){	DEFINE_WAIT(wait);	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);	for (;;) {		if (!timeo)			break;		if (signal_pending(current))			break;		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)			break;		if (sk->sk_shutdown & SEND_SHUTDOWN)			break;		if (sk->sk_err)			break;		timeo = schedule_timeout(timeo);	}	finish_wait(sk->sk_sleep, &wait);	return timeo;}/* *	Generic send/receive buffer handlers */struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,				     unsigned long data_len, int noblock, int *errcode){	struct sk_buff *skb;	unsigned int gfp_mask;	long timeo;	int err;	gfp_mask = sk->sk_allocation;	if (gfp_mask & __GFP_WAIT)		gfp_mask |= __GFP_REPEAT;	timeo = sock_sndtimeo(sk, noblock);	while (1) {		err = sock_error(sk);		if (err != 0)			goto failure;		err = -EPIPE;		if (sk->sk_shutdown & SEND_SHUTDOWN)			goto failure;		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {			skb = alloc_skb(header_len, sk->sk_allocation);			if (skb) {				int npages;				int i;				/* No pages, we're done... */				if (!data_len)					break;				npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;				skb->truesize += data_len;				skb_shinfo(skb)->nr_frags = npages;				for (i = 0; i < npages; i++) {					struct page *page;					skb_frag_t *frag;					page = alloc_pages(sk->sk_allocation, 0);					if (!page) {						err = -ENOBUFS;						skb_shinfo(skb)->nr_frags = i;						kfree_skb(skb);						goto failure;					}					frag = &skb_shinfo(skb)->frags[i];					frag->page = page;					frag->page_offset = 0;					frag->size = (data_len >= PAGE_SIZE ?						      PAGE_SIZE :						      data_len);					data_len -= PAGE_SIZE;				}				/* Full success... */				break;			}			err = -ENOBUFS;			goto failure;		}		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);		err = -EAGAIN;		if (!timeo)			goto failure;		if (signal_pending(current))			goto interrupted;		timeo = sock_wait_for_wmem(sk, timeo);	}	skb_set_owner_w(skb, sk);	return skb;interrupted:	err = sock_intr_errno(timeo);failure:	*errcode = err;	return NULL;}struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 				    int noblock, int *errcode){	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);}void __lock_sock(struct sock *sk){	DEFINE_WAIT(wait);	for(;;) {		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,					TASK_UNINTERRUPTIBLE);		spin_unlock_bh(&sk->sk_lock.slock);		schedule();		spin_lock_bh(&sk->sk_lock.slock);		if(!sock_owned_by_user(sk))			break;	}	finish_wait(&sk->sk_lock.wq, &wait);}void __release_sock(struct sock *sk){	struct sk_buff *skb = sk->sk_backlog.head;	do {		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;		bh_unlock_sock(sk);		do {			struct sk_buff *next = skb->next;			skb->next = NULL;			sk->sk_backlog_rcv(sk, skb);			skb = next;		} while (skb != NULL);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -