⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sock.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
EXPORT_SYMBOL_GPL(sk_clone);void sk_setup_caps(struct sock *sk, struct dst_entry *dst){	__sk_dst_set(sk, dst);	sk->sk_route_caps = dst->dev->features;	if (sk->sk_route_caps & NETIF_F_GSO)		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;	if (sk_can_gso(sk)) {		if (dst->header_len)			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;		else			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;	}}EXPORT_SYMBOL_GPL(sk_setup_caps);void __init sk_init(void){	if (num_physpages <= 4096) {		sysctl_wmem_max = 32767;		sysctl_rmem_max = 32767;		sysctl_wmem_default = 32767;		sysctl_rmem_default = 32767;	} else if (num_physpages >= 131072) {		sysctl_wmem_max = 131071;		sysctl_rmem_max = 131071;	}}/* *	Simple resource managers for sockets. *//* * Write buffer destructor automatically called from kfree_skb. */void sock_wfree(struct sk_buff *skb){	struct sock *sk = skb->sk;	/* In case it might be waiting for more memory. */	atomic_sub(skb->truesize, &sk->sk_wmem_alloc);	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))		sk->sk_write_space(sk);	sock_put(sk);}/* * Read buffer destructor automatically called from kfree_skb. */void sock_rfree(struct sk_buff *skb){	struct sock *sk = skb->sk;	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);}int sock_i_uid(struct sock *sk){	int uid;	read_lock(&sk->sk_callback_lock);	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;	read_unlock(&sk->sk_callback_lock);	return uid;}unsigned long sock_i_ino(struct sock *sk){	unsigned long ino;	read_lock(&sk->sk_callback_lock);	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;	read_unlock(&sk->sk_callback_lock);	return ino;}/* * Allocate a skb from the socket's send buffer. */struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,			     gfp_t priority){	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {		struct sk_buff * skb = alloc_skb(size, priority);		if (skb) {			skb_set_owner_w(skb, sk);			return skb;		}	}	return NULL;}/* * Allocate a skb from the socket's receive buffer. */struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,			     gfp_t priority){	if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {		struct sk_buff *skb = alloc_skb(size, priority);		if (skb) {			skb_set_owner_r(skb, sk);			return skb;		}	}	return NULL;}/* * Allocate a memory block from the socket's option memory buffer. */void *sock_kmalloc(struct sock *sk, int size, gfp_t priority){	if ((unsigned)size <= sysctl_optmem_max &&	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {		void *mem;		/* First do the add, to avoid the race if kmalloc		 * might sleep.		 */		atomic_add(size, &sk->sk_omem_alloc);		mem = kmalloc(size, priority);		if (mem)			return mem;		atomic_sub(size, &sk->sk_omem_alloc);	}	return NULL;}/* * Free an option memory block. */void sock_kfree_s(struct sock *sk, void *mem, int size){	kfree(mem);	atomic_sub(size, &sk->sk_omem_alloc);}/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.   I think, these locks should be removed for datagram sockets. */static long sock_wait_for_wmem(struct sock * sk, long timeo){	DEFINE_WAIT(wait);	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);	for (;;) {		if (!timeo)			break;		if (signal_pending(current))			break;		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)			break;		if (sk->sk_shutdown & SEND_SHUTDOWN)			break;		if (sk->sk_err)			break;		timeo = schedule_timeout(timeo);	}	finish_wait(sk->sk_sleep, &wait);	return timeo;}/* *	Generic send/receive buffer handlers */static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,					    unsigned long header_len,					    unsigned long data_len,					    int noblock, int *errcode){	struct sk_buff *skb;	gfp_t gfp_mask;	long timeo;	int err;	gfp_mask = sk->sk_allocation;	if (gfp_mask & __GFP_WAIT)		gfp_mask |= __GFP_REPEAT;	timeo = sock_sndtimeo(sk, noblock);	while (1) {		err = sock_error(sk);		if (err != 0)			goto failure;		err = -EPIPE;		if (sk->sk_shutdown & SEND_SHUTDOWN)			goto failure;		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {			skb = alloc_skb(header_len, gfp_mask);			if (skb) {				int npages;				int i;				/* No pages, we're done... */				if (!data_len)					break;				npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;				skb->truesize += data_len;				skb_shinfo(skb)->nr_frags = npages;				for (i = 0; i < npages; i++) {					struct page *page;					skb_frag_t *frag;					page = alloc_pages(sk->sk_allocation, 0);					if (!page) {						err = -ENOBUFS;						skb_shinfo(skb)->nr_frags = i;						kfree_skb(skb);						goto failure;					}					frag = &skb_shinfo(skb)->frags[i];					frag->page = page;					frag->page_offset = 0;					frag->size = (data_len >= PAGE_SIZE ?						      PAGE_SIZE :						      data_len);					data_len -= PAGE_SIZE;				}				/* Full success... */				break;			}			err = -ENOBUFS;			goto failure;		}		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);		err = -EAGAIN;		if (!timeo)			goto failure;		if (signal_pending(current))			goto interrupted;		timeo = sock_wait_for_wmem(sk, timeo);	}	skb_set_owner_w(skb, sk);	return skb;interrupted:	err = sock_intr_errno(timeo);failure:	*errcode = err;	return NULL;}struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,				    int noblock, int *errcode){	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);}static void __lock_sock(struct sock *sk){	DEFINE_WAIT(wait);	for (;;) {		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,					TASK_UNINTERRUPTIBLE);		spin_unlock_bh(&sk->sk_lock.slock);		schedule();		spin_lock_bh(&sk->sk_lock.slock);		if (!sock_owned_by_user(sk))			break;	}	finish_wait(&sk->sk_lock.wq, &wait);}static void __release_sock(struct sock *sk){	struct sk_buff *skb = sk->sk_backlog.head;	do {		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;		bh_unlock_sock(sk);		do {			struct sk_buff *next = skb->next;			skb->next = NULL;			sk->sk_backlog_rcv(sk, skb);			/*			 * We are in process context here with softirqs			 * disabled, use cond_resched_softirq() to preempt.			 * This is safe to do because we've taken the backlog			 * queue private:			 */			cond_resched_softirq();			skb = next;		} while (skb != NULL);		bh_lock_sock(sk);	} while ((skb = sk->sk_backlog.head) != NULL);}/** * sk_wait_data - wait for data to arrive at sk_receive_queue * @sk:    sock to wait on * @timeo: for how long * * Now socket state including sk->sk_err is changed only under lock, * hence we may omit checks after joining wait queue. * We check receive queue before schedule() only as optimization; * it is very likely that release_sock() added new data. */int sk_wait_data(struct sock *sk, long *timeo){	int rc;	DEFINE_WAIT(wait);	prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);	finish_wait(sk->sk_sleep, &wait);	return rc;}EXPORT_SYMBOL(sk_wait_data);/* * Set of default routines for initialising struct proto_ops when * the protocol does not support a particular function. In certain * cases where it makes no sense for a protocol to have a "do nothing" * function, some default processing is provided. */int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len){	return -EOPNOTSUPP;}int sock_no_connect(struct socket *sock, struct sockaddr *saddr,		    int len, int flags){	return -EOPNOTSUPP;}int sock_no_socketpair(struct socket *sock1, struct socket *sock2){	return -EOPNOTSUPP;}int sock_no_accept(struct socket *sock, struct socket *newsock, int flags){	return -EOPNOTSUPP;}int sock_no_getname(struct socket *sock, struct sockaddr *saddr,		    int *len, int peer){	return -EOPNOTSUPP;}unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt){	return 0;}int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg){	return -EOPNOTSUPP;}int sock_no_listen(struct socket *sock, int backlog){	return -EOPNOTSUPP;}int sock_no_shutdown(struct socket *sock, int how){	return -EOPNOTSUPP;}int sock_no_setsockopt(struct socket *sock, int level, int optname,		    char __user *optval, int optlen){	return -EOPNOTSUPP;}int sock_no_getsockopt(struct socket *sock, int level, int optname,		    char __user *optval, int __user *optlen){	return -EOPNOTSUPP;}int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,		    size_t len){	return -EOPNOTSUPP;}int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,		    size_t len, int flags){	return -EOPNOTSUPP;}int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma){	/* Mirror missing mmap method error code */	return -ENODEV;}ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags){	ssize_t res;	struct msghdr msg = {.msg_flags = flags};	struct kvec iov;	char *kaddr = kmap(page);	iov.iov_base = kaddr + offset;	iov.iov_len = size;	res = kernel_sendmsg(sock, &msg, &iov, 1, size);	kunmap(page);	return res;}/* *	Default Socket Callbacks */static void sock_def_wakeup(struct sock *sk){	read_lock(&sk->sk_callback_lock);	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))		wake_up_interruptible_all(sk->sk_sleep);	read_unlock(&sk->sk_callback_lock);}static void sock_def_error_report(struct sock *sk){	read_lock(&sk->sk_callback_lock);	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))		wake_up_interruptible(sk->sk_sleep);	sk_wake_async(sk,0,POLL_ERR);	read_unlock(&sk->sk_callback_lock);}static void sock_def_readable(struct sock *sk, int len){	read_lock(&sk->sk_callback_lock);	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))		wake_up_interruptible(sk->sk_sleep);	sk_wake_async(sk,1,POLL_IN);	read_unlock(&sk->sk_callback_lock);}static void sock_def_write_space(struct sock *sk){	read_lock(&sk->sk_callback_lock);	/* Do not wake up a writer until he can make "significant"	 * progress.  --DaveM	 */	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))			wake_up_interruptible(sk->sk_sleep);		/* Should agree with poll, otherwise some programs break */		if (sock_writeable(sk))			sk_wake_async(sk, 2, POLL_OUT);	}	read_unlock(&sk->sk_callback_lock);}static void sock_def_destruct(struct sock *sk){	kfree(sk->sk_protinfo);}void sk_send_sigurg(struct sock *sk){	if (sk->sk_socket && sk->sk_socket->file)		if (send_sigurg(&sk->sk_socket->file->f_owner))			sk_wake_async(sk, 3, POLL_PRI);}void sk_reset_timer(struct sock *sk, struct timer_list* timer,		    unsigned long expires){	if (!mod_timer(timer, expires))		sock_hold(sk);}EXPORT_SYMBOL(sk_reset_timer);void sk_stop_timer(struct sock *sk, struct timer_list* timer){	if (timer_pending(timer) && del_timer(timer))		__sock_put(sk);}EXPORT_SYMBOL(sk_stop_timer);void sock_init_data(struct socket *sock, struct sock *sk){	skb_queue_head_init(&sk->sk_receive_queue);	skb_queue_head_init(&sk->sk_write_queue);	skb_queue_head_init(&sk->sk_error_queue);#ifdef CONFIG_NET_DMA	skb_queue_head_init(&sk->sk_async_wait_queue);#endif	sk->sk_send_head	=	NULL;	init_timer(&sk->sk_timer);	sk->sk_allocation	=	GFP_KERNEL;	sk->sk_rcvbuf		=	sysctl_rmem_default;	sk->sk_sndbuf		=	sysctl_wmem_default;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -