sock.h

来自「linux 内核源代码」· C头文件 代码 · 共 1,417 行 · 第 1/3 页

H
1,417
字号
}/* The per-socket spinlock must be held here. */static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb){	if (!sk->sk_backlog.tail) {		sk->sk_backlog.head = sk->sk_backlog.tail = skb;	} else {		sk->sk_backlog.tail->next = skb;		sk->sk_backlog.tail = skb;	}	skb->next = NULL;}#define sk_wait_event(__sk, __timeo, __condition)			\	({	int __rc;						\		release_sock(__sk);					\		__rc = __condition;					\		if (!__rc) {						\			*(__timeo) = schedule_timeout(*(__timeo));	\		}							\		lock_sock(__sk);					\		__rc = __condition;					\		__rc;							\	})extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);extern void sk_stream_wait_close(struct sock *sk, long timeo_p);extern int sk_stream_error(struct sock *sk, int flags, int err);extern void sk_stream_kill_queues(struct sock *sk);extern int sk_wait_data(struct sock *sk, long *timeo);struct request_sock_ops;struct timewait_sock_ops;/* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface * transport -> network interface is defined by struct inet_proto */struct proto {	void			(*close)(struct sock *sk, 					long timeout);	int			(*connect)(struct sock *sk,				        struct sockaddr *uaddr, 					int addr_len);	int			(*disconnect)(struct sock *sk, int flags);	struct sock *		(*accept) (struct sock *sk, int flags, int *err);	int			(*ioctl)(struct sock *sk, int cmd,					 unsigned long arg);	int			(*init)(struct sock *sk);	int			(*destroy)(struct sock *sk);	void			(*shutdown)(struct sock *sk, int how);	int			(*setsockopt)(struct sock *sk, int level, 					int optname, char __user *optval,					int optlen);	int			(*getsockopt)(struct sock *sk, int level, 					int optname, char __user *optval, 					int __user *option);  	 	int			(*compat_setsockopt)(struct sock *sk,					int level,					int optname, char __user *optval,					int optlen);	int			(*compat_getsockopt)(struct sock *sk,					int level,					int optname, char __user *optval,					int __user *option);	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,					   struct msghdr *msg, size_t len);	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,					   struct msghdr *msg,					size_t len, int noblock, int flags, 					int *addr_len);	int			(*sendpage)(struct sock *sk, struct page *page,					int offset, size_t size, int flags);	int			(*bind)(struct sock *sk, 					struct sockaddr *uaddr, int addr_len);	int			(*backlog_rcv) (struct sock *sk, 						struct sk_buff *skb);	/* Keeping track of sk's, looking them up, and port selection methods. */	void			(*hash)(struct sock *sk);	void			(*unhash)(struct sock *sk);	int			(*get_port)(struct sock *sk, unsigned short snum);#ifdef CONFIG_SMP	/* Keeping track of sockets in use */	void			(*inuse_add)(struct proto *prot, int inc);	int			(*inuse_getval)(const struct proto *prot);	int			*inuse_ptr;#else	int			inuse;#endif	/* Memory pressure */	void			(*enter_memory_pressure)(void);	atomic_t		*memory_allocated;	/* Current allocated memory. */	atomic_t		*sockets_allocated;	/* Current number of sockets. */	/*	 * Pressure flag: try to collapse.	 * Technical note: it is used by multiple contexts non atomically.	 * All the sk_stream_mem_schedule() is of this nature: accounting	 * is strict, actions are advisory and have some latency.	 */	int			*memory_pressure;	int			*sysctl_mem;	int			*sysctl_wmem;	int			*sysctl_rmem;	int			max_header;	struct kmem_cache		*slab;	unsigned int		obj_size;	atomic_t		*orphan_count;	struct request_sock_ops	*rsk_prot;	struct timewait_sock_ops *twsk_prot;	struct module		*owner;	char			name[32];	struct list_head	node;#ifdef SOCK_REFCNT_DEBUG	atomic_t		socks;#endif};/* * Special macros to let protos use a fast version of inuse{get|add} * using a static percpu variable per proto instead of an allocated one, * saving one dereference. * This might be changed if/when dynamic percpu vars become fast. */#ifdef CONFIG_SMP# define DEFINE_PROTO_INUSE(NAME)			\static DEFINE_PER_CPU(int, NAME##_inuse);		\static void NAME##_inuse_add(struct proto *prot, int inc)	\{							\	__get_cpu_var(NAME##_inuse) += inc;		\}							\							\static int NAME##_inuse_getval(const struct proto *prot)\{							\	int res = 0, cpu;				\							\	for_each_possible_cpu(cpu)			\		res += per_cpu(NAME##_inuse, cpu);	\	return res;					\}# define REF_PROTO_INUSE(NAME)				\	.inuse_add = NAME##_inuse_add,			\	.inuse_getval = NAME##_inuse_getval,#else# define DEFINE_PROTO_INUSE(NAME)# define REF_PROTO_INUSE(NAME)#endifextern int proto_register(struct proto *prot, int alloc_slab);extern void proto_unregister(struct proto *prot);#ifdef SOCK_REFCNT_DEBUGstatic inline void sk_refcnt_debug_inc(struct sock *sk){	atomic_inc(&sk->sk_prot->socks);}static inline void sk_refcnt_debug_dec(struct sock *sk){	atomic_dec(&sk->sk_prot->socks);	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));}static inline void sk_refcnt_debug_release(const struct sock *sk){	if (atomic_read(&sk->sk_refcnt) != 1)		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));}#else /* SOCK_REFCNT_DEBUG */#define sk_refcnt_debug_inc(sk) do { } while (0)#define sk_refcnt_debug_dec(sk) do { } while (0)#define sk_refcnt_debug_release(sk) do { } while (0)#endif /* SOCK_REFCNT_DEBUG *//* Called with local bh disabled */static __inline__ void sock_prot_inc_use(struct proto *prot){#ifdef CONFIG_SMP	prot->inuse_add(prot, 1);#else	prot->inuse++;#endif}static __inline__ void sock_prot_dec_use(struct proto *prot){#ifdef CONFIG_SMP	prot->inuse_add(prot, -1);#else	prot->inuse--;#endif}static __inline__ int sock_prot_inuse(struct proto *proto){#ifdef CONFIG_SMP	return proto->inuse_getval(proto);#else	return proto->inuse;#endif}/* With per-bucket locks this operation is not-atomic, so that * this version is not worse. */static inline void __sk_prot_rehash(struct sock *sk){	sk->sk_prot->unhash(sk);	sk->sk_prot->hash(sk);}/* About 10 seconds */#define SOCK_DESTROY_TIME (10*HZ)/* Sockets 0-1023 can't be bound to unless you are superuser */#define PROT_SOCK	1024#define SHUTDOWN_MASK	3#define RCV_SHUTDOWN	1#define SEND_SHUTDOWN	2#define SOCK_SNDBUF_LOCK	1#define SOCK_RCVBUF_LOCK	2#define SOCK_BINDADDR_LOCK	4#define SOCK_BINDPORT_LOCK	8/* sock_iocb: used to kick off async processing of socket ios */struct sock_iocb {	struct list_head	list;	int			flags;	int			size;	struct socket		*sock;	struct sock		*sk;	struct scm_cookie	*scm;	struct msghdr		*msg, async_msg;	struct kiocb		*kiocb;};static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb){	return (struct sock_iocb *)iocb->private;}static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si){	return si->kiocb;}struct socket_alloc {	struct socket socket;	struct inode vfs_inode;};static inline struct socket *SOCKET_I(struct inode *inode){	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;}static inline struct inode *SOCK_INODE(struct socket *socket){	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;}extern void __sk_stream_mem_reclaim(struct sock *sk);extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)static inline int sk_stream_pages(int amt){	return DIV_ROUND_UP(amt, SK_STREAM_MEM_QUANTUM);}static inline void sk_stream_mem_reclaim(struct sock *sk){	if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)		__sk_stream_mem_reclaim(sk);}static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb){	return (int)skb->truesize <= sk->sk_forward_alloc ||		sk_stream_mem_schedule(sk, skb->truesize, 1);}static inline int sk_stream_wmem_schedule(struct sock *sk, int size){	return size <= sk->sk_forward_alloc ||	       sk_stream_mem_schedule(sk, size, 0);}/* Used by processes to "lock" a socket state, so that * interrupts and bottom half handlers won't change it * from under us. It essentially blocks any incoming * packets, so that we won't get any new data or any * packets that change the state of the socket. * * While locked, BH processing will add new packets to * the backlog queue.  This queue is processed by the * owner of the socket lock right before it is released. * * Since ~2.3.5 it is also exclusive sleep lock serializing * accesses from user process context. */#define sock_owned_by_user(sk)	((sk)->sk_lock.owned)/* * Macro so as to not evaluate some arguments when * lockdep is not enabled. * * Mark both the sk_lock and the sk_lock.slock as a * per-address-family lock class. */#define sock_lock_init_class_and_name(sk, sname, skey, name, key) 	\do {									\	sk->sk_lock.owned = 0;					\	init_waitqueue_head(&sk->sk_lock.wq);				\	spin_lock_init(&(sk)->sk_lock.slock);				\	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\			sizeof((sk)->sk_lock));				\	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\		       	(skey), (sname));				\	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\} while (0)extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));static inline void lock_sock(struct sock *sk){	lock_sock_nested(sk, 0);}extern void FASTCALL(release_sock(struct sock *sk));/* BH context may only use the following locking interface. */#define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))#define bh_lock_sock_nested(__sk) \				spin_lock_nested(&((__sk)->sk_lock.slock), \				SINGLE_DEPTH_NESTING)#define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))extern struct sock		*sk_alloc(struct net *net, int family,					  gfp_t priority,					  struct proto *prot);extern void			sk_free(struct sock *sk);extern struct sock		*sk_clone(const struct sock *sk,					  const gfp_t priority);extern struct sk_buff		*sock_wmalloc(struct sock *sk,					      unsigned long size, int force,					      gfp_t priority);extern struct sk_buff		*sock_rmalloc(struct sock *sk,					      unsigned long size, int force,					      gfp_t priority);extern void			sock_wfree(struct sk_buff *skb);extern void			sock_rfree(struct sk_buff *skb);extern int			sock_setsockopt(struct socket *sock, int level,						int op, char __user *optval,						int optlen);extern int			sock_getsockopt(struct socket *sock, int level,						int op, char __user *optval, 						int __user *optlen);extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,						     unsigned long size,						     int noblock,						     int *errcode);extern void *sock_kmalloc(struct sock *sk, int size,			  gfp_t priority);extern void sock_kfree_s(struct sock *sk, void *mem, int size);extern void sk_send_sigurg(struct sock *sk);/* * Functions to fill in entries in struct proto_ops when a protocol * does not implement a particular function. */extern int                      sock_no_bind(struct socket *, 					     struct sockaddr *, int);extern int                      sock_no_connect(struct socket *,						struct sockaddr *, int, int);extern int                      sock_no_socketpair(struct socket *,						   struct socket *);extern int                      sock_no_accept(struct socket *,					       struct socket *, int);extern int                      sock_no_getname(struct socket *,						struct sockaddr *, int *, int);extern unsigned int             sock_no_poll(struct file *, struct socket *,					     struct poll_table_struct *);extern int                      sock_no_ioctl(struct socket *, unsigned int,					      unsigned long);extern int			sock_no_listen(struct socket *, int);extern int                      sock_no_shutdown(struct socket *, int);extern int			sock_no_getsockopt(struct socket *, int , int,						   char __user *, int __user *);extern int			sock_no_setsockopt(struct socket *, int, int,						   char __user *, int);extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,						struct msghdr *, size_t);extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,						struct msghdr *, size_t, int);extern int			sock_no_mmap(struct file *file,					     struct socket *sock,					     struct vm_area_struct *vma);extern ssize_t			sock_no_sendpage(struct socket *sock,						struct page *page,						int offset, size_t size, 						int flags);/* * Functions to fill in entries in struct proto_ops when a protocol * uses the inet style. */extern int sock_common_getsockopt(struct socket *sock, int level, int optname,				  char __user *optval, int __user *optlen);extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,			       struct msghdr *msg, size_t size, int flags);extern int sock_common_setsockopt(struct socket *sock, int level, int optname,				  char __user *optval, int optlen);extern int compat_sock_common_getsockopt(struct socket *sock, int level,		int optname, char __user *optval, int __user *optlen);extern int compat_sock_common_setsockopt(struct socket *sock, int level,		int optname, char __user *optval, int optlen);extern void sk_common_release(struct sock *sk);/* *	Default socket callbacks and setup code */ /* Initialise core socket variables */extern void sock_init_data(struct socket *sock, struct sock *sk);/** *	sk_filter - run a packet through a socket filter *	@sk: sock associated with &sk_buff *	@skb: buffer to filter *	@needlock: set to 1 if the sock is not locked by caller. * * Run the filter code and then cut skb->data to correct size returned by * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level * wrapper to sk_run_filter. It returns 0 if the packet should * be accepted or -EPERM if the packet should be tossed. * */static inline int sk_filter(struct sock *sk, struct sk_buff *skb){	int err;	struct sk_filter *filter;		err = security_sock_rcv_skb(sk, skb);	if (err)		return err;		rcu_read_lock_bh();

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?