sock.h

来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 1,342 行 · 第 1/3 页

H
1,342
字号
/* Ungrab socket and destroy it, if it was the last reference. */static inline void sock_put(struct sock *sk){	if (atomic_dec_and_test(&sk->sk_refcnt))		sk_free(sk);}/* Detach socket from process context. * Announce socket dead, detach it from wait queue and inode. * Note that parent inode held reference count on this struct sock, * we do not release it in this function, because protocol * probably wants some additional cleanups or even continuing * to work with this socket (TCP). */static inline void sock_orphan(struct sock *sk){	write_lock_bh(&sk->sk_callback_lock);	sock_set_flag(sk, SOCK_DEAD);	sk->sk_socket = NULL;	sk->sk_sleep  = NULL;	write_unlock_bh(&sk->sk_callback_lock);}static inline void sock_graft(struct sock *sk, struct socket *parent){	write_lock_bh(&sk->sk_callback_lock);	sk->sk_sleep = &parent->wait;	parent->sk = sk;	sk->sk_socket = parent;	write_unlock_bh(&sk->sk_callback_lock);}extern int sock_i_uid(struct sock *sk);extern unsigned long sock_i_ino(struct sock *sk);static inline struct dst_entry *__sk_dst_get(struct sock *sk){	return sk->sk_dst_cache;}static inline struct dst_entry *sk_dst_get(struct sock *sk){	struct dst_entry *dst;	read_lock(&sk->sk_dst_lock);	dst = sk->sk_dst_cache;	if (dst)		dst_hold(dst);	read_unlock(&sk->sk_dst_lock);	return dst;}static inline void__sk_dst_set(struct sock *sk, struct dst_entry *dst){	struct dst_entry *old_dst;	old_dst = sk->sk_dst_cache;	sk->sk_dst_cache = dst;	dst_release(old_dst);}static inline voidsk_dst_set(struct sock *sk, struct dst_entry *dst){	write_lock(&sk->sk_dst_lock);	__sk_dst_set(sk, dst);	write_unlock(&sk->sk_dst_lock);}static inline void__sk_dst_reset(struct sock *sk){	struct dst_entry *old_dst;	old_dst = sk->sk_dst_cache;	sk->sk_dst_cache = NULL;	dst_release(old_dst);}static inline voidsk_dst_reset(struct sock *sk){	write_lock(&sk->sk_dst_lock);	__sk_dst_reset(sk);	write_unlock(&sk->sk_dst_lock);}static inline struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie){	struct dst_entry *dst = sk->sk_dst_cache;	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {		sk->sk_dst_cache = NULL;		return NULL;	}	return dst;}static inline struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie){	struct dst_entry *dst = sk_dst_get(sk);	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {		sk_dst_reset(sk);		return NULL;	}	return dst;}static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb){	sk->sk_wmem_queued   += skb->truesize;	sk->sk_forward_alloc -= skb->truesize;}static inline int skb_copy_to_page(struct sock *sk, char __user *from,				   struct sk_buff *skb, struct page *page,				   int off, int copy){	if (skb->ip_summed == CHECKSUM_NONE) {		int err = 0;		unsigned int csum = csum_and_copy_from_user(from,						     page_address(page) + off,							    copy, 0, &err);		if (err)			return err;		skb->csum = csum_block_add(skb->csum, csum, skb->len);	} else if (copy_from_user(page_address(page) + off, from, copy))		return -EFAULT;	skb->len	     += copy;	skb->data_len	     += copy;	skb->truesize	     += copy;	sk->sk_wmem_queued   += copy;	sk->sk_forward_alloc -= copy;	return 0;}/* * 	Queue a received datagram if it will fit. Stream and sequenced *	protocols can't normally use this as they need to fit buffers in *	and play with them. * * 	Inlined as it's very short and called for pretty much every *	packet ever received. */static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk){	sock_hold(sk);	skb->sk = sk;	skb->destructor = sock_wfree;	atomic_add(skb->truesize, &sk->sk_wmem_alloc);}static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk){	skb->sk = sk;	skb->destructor = sock_rfree;	atomic_add(skb->truesize, &sk->sk_rmem_alloc);}extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,			   unsigned long expires);extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb){	int err = 0;	int skb_len;	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces	   number of warnings when compiling with -W --ANK	 */	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=	    (unsigned)sk->sk_rcvbuf) {		err = -ENOMEM;		goto out;	}	/* It would be deadlock, if sock_queue_rcv_skb is used	   with socket lock! We assume that users of this	   function are lock free.	*/	err = sk_filter(sk, skb, 1);	if (err)		goto out;	skb->dev = NULL;	skb_set_owner_r(skb, sk);	/* Cache the SKB length before we tack it onto the receive	 * queue.  Once it is added it no longer belongs to us and	 * may be freed by other threads of control pulling packets	 * from the queue.	 */	skb_len = skb->len;	skb_queue_tail(&sk->sk_receive_queue, skb);	if (!sock_flag(sk, SOCK_DEAD))		sk->sk_data_ready(sk, skb_len);out:	return err;}static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb){	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces	   number of warnings when compiling with -W --ANK	 */	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=	    (unsigned)sk->sk_rcvbuf)		return -ENOMEM;	skb_set_owner_r(skb, sk);	skb_queue_tail(&sk->sk_error_queue, skb);	if (!sock_flag(sk, SOCK_DEAD))		sk->sk_data_ready(sk, skb->len);	return 0;}/* *	Recover an error report and clear atomically */ static inline int sock_error(struct sock *sk){	int err = xchg(&sk->sk_err, 0);	return -err;}static inline unsigned long sock_wspace(struct sock *sk){	int amt = 0;	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);		if (amt < 0) 			amt = 0;	}	return amt;}static inline void sk_wake_async(struct sock *sk, int how, int band){	if (sk->sk_socket && sk->sk_socket->fasync_list)		sock_wake_async(sk->sk_socket, how, band);}#define SOCK_MIN_SNDBUF 2048#define SOCK_MIN_RCVBUF 256static inline void sk_stream_moderate_sndbuf(struct sock *sk){	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);	}}static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,						   int size, int mem, int gfp){	struct sk_buff *skb = alloc_skb(size + sk->sk_prot->max_header, gfp);	if (skb) {		skb->truesize += mem;		if (sk->sk_forward_alloc >= (int)skb->truesize ||		    sk_stream_mem_schedule(sk, skb->truesize, 0)) {			skb_reserve(skb, sk->sk_prot->max_header);			return skb;		}		__kfree_skb(skb);	} else {		sk->sk_prot->enter_memory_pressure();		sk_stream_moderate_sndbuf(sk);	}	return NULL;}static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,						  int size, int gfp){	return sk_stream_alloc_pskb(sk, size, 0, gfp);}static inline struct page *sk_stream_alloc_page(struct sock *sk){	struct page *page = NULL;	if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||	    sk_stream_mem_schedule(sk, PAGE_SIZE, 0))		page = alloc_pages(sk->sk_allocation, 0);	else {		sk->sk_prot->enter_memory_pressure();		sk_stream_moderate_sndbuf(sk);	}	return page;}#define sk_stream_for_retrans_queue(skb, sk)				\		for (skb = (sk)->sk_write_queue.next;			\		     (skb != (sk)->sk_send_head) &&			\		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\		     skb = skb->next)/* *	Default write policy as shown to user space via poll/select/SIGIO */static inline int sock_writeable(const struct sock *sk) {	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);}static inline int gfp_any(void){	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;}static inline long sock_rcvtimeo(const struct sock *sk, int noblock){	return noblock ? 0 : sk->sk_rcvtimeo;}static inline long sock_sndtimeo(const struct sock *sk, int noblock){	return noblock ? 0 : sk->sk_sndtimeo;}static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len){	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;}/* Alas, with timeout socket operations are not restartable. * Compare this to poll(). */static inline int sock_intr_errno(long timeo){	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;}static __inline__ voidsock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb){	struct timeval *stamp = &skb->stamp;	if (sk->sk_rcvtstamp) { 		/* Race occurred between timestamp enabling and packet		   receiving.  Fill in the current time for now. */		if (stamp->tv_sec == 0)			do_gettimeofday(stamp);		put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),			 stamp);	} else		sk->sk_stamp = *stamp;}/** * sk_eat_skb - Release a skb if it is no longer needed * @sk - socket to eat this skb from * @skb - socket buffer to eat * * This routine must be called with interrupts disabled or with the socket * locked so that the sk_buff queue operation is ok.*/static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb){	__skb_unlink(skb, &sk->sk_receive_queue);	__kfree_skb(skb);}extern atomic_t netstamp_needed;extern void sock_enable_timestamp(struct sock *sk);extern void sock_disable_timestamp(struct sock *sk);static inline void net_timestamp(struct timeval *stamp) { 	if (atomic_read(&netstamp_needed)) 		do_gettimeofday(stamp);	else {		stamp->tv_sec = 0;		stamp->tv_usec = 0;	}		} extern int sock_get_timestamp(struct sock *, struct timeval __user *);/*  *	Enable debug/info messages  */#if 0#define NETDEBUG(x)	do { } while (0)#define LIMIT_NETDEBUG(x) do {} while(0)#else#define NETDEBUG(x)	do { x; } while (0)#define LIMIT_NETDEBUG(x) do { if (net_ratelimit()) { x; } } while(0)#endif/* * Macros for sleeping on a socket. Use them like this: * * SOCK_SLEEP_PRE(sk) * if (condition) * 	schedule(); * SOCK_SLEEP_POST(sk) * * N.B. These are now obsolete and were, afaik, only ever used in DECnet * and when the last use of them in DECnet has gone, I'm intending to * remove them. */#define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \				DECLARE_WAITQUEUE(wait, tsk); \				tsk->state = TASK_INTERRUPTIBLE; \				add_wait_queue((sk)->sk_sleep, &wait); \				release_sock(sk);#define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \				remove_wait_queue((sk)->sk_sleep, &wait); \				lock_sock(sk); \				}static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool){	if (valbool)		sock_set_flag(sk, bit);	else		sock_reset_flag(sk, bit);}extern __u32 sysctl_wmem_max;extern __u32 sysctl_rmem_max;int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);#endif	/* _SOCK_H */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?