sock.h

来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 1,342 行 · 第 1/3 页

H
1,342
字号
{	skb->sk = sk;	skb->destructor = sk_stream_rfree;	atomic_add(skb->truesize, &sk->sk_rmem_alloc);	sk->sk_forward_alloc -= skb->truesize;}static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb){	sk->sk_queue_shrunk   = 1;	sk->sk_wmem_queued   -= skb->truesize;	sk->sk_forward_alloc += skb->truesize;	__kfree_skb(skb);}/* The per-socket spinlock must be held here. */#define sk_add_backlog(__sk, __skb)				\do {	if (!(__sk)->sk_backlog.tail) {				\		(__sk)->sk_backlog.head =			\		     (__sk)->sk_backlog.tail = (__skb);		\	} else {						\		((__sk)->sk_backlog.tail)->next = (__skb);	\		(__sk)->sk_backlog.tail = (__skb);		\	}							\	(__skb)->next = NULL;					\} while(0)#define sk_wait_event(__sk, __timeo, __condition)		\({	int rc;							\	release_sock(__sk);					\	rc = __condition;					\	if (!rc) {						\		*(__timeo) = schedule_timeout(*(__timeo));	\		rc = __condition;				\	}							\	lock_sock(__sk);					\	rc;							\})extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);extern void sk_stream_wait_close(struct sock *sk, long timeo_p);extern int sk_stream_error(struct sock *sk, int flags, int err);extern void sk_stream_kill_queues(struct sock *sk);extern int sk_wait_data(struct sock *sk, long *timeo);/* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface * transport -> network interface is defined by struct inet_proto */struct proto {	void			(*close)(struct sock *sk, 					long timeout);	int			(*connect)(struct sock *sk,				        struct sockaddr *uaddr, 					int addr_len);	int			(*disconnect)(struct sock *sk, int flags);	struct sock *		(*accept) (struct sock *sk, int flags, int *err);	int			(*ioctl)(struct sock *sk, int cmd,					 unsigned long arg);	int			(*init)(struct sock *sk);	int			(*destroy)(struct sock *sk);	void			(*shutdown)(struct sock *sk, int how);	int			(*setsockopt)(struct sock *sk, int level, 					int optname, char __user *optval,					int optlen);	int			(*getsockopt)(struct sock *sk, int level, 					int optname, char __user *optval, 					int __user *option);  	 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,					   struct msghdr *msg, size_t len);	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,					   struct msghdr *msg,					size_t len, int noblock, int flags, 					int *addr_len);	int			(*sendpage)(struct sock *sk, struct page *page,					int offset, size_t size, int flags);	int			(*bind)(struct sock *sk, 					struct sockaddr *uaddr, int addr_len);	int			(*backlog_rcv) (struct sock *sk, 						struct sk_buff *skb);	/* Keeping track of sk's, looking them up, and port selection methods. */	void			(*hash)(struct sock *sk);	void			(*unhash)(struct sock *sk);	int			(*get_port)(struct sock *sk, unsigned short snum);	/* Memory pressure */	void			(*enter_memory_pressure)(void);	atomic_t		*memory_allocated;	/* Current allocated memory. */	atomic_t		*sockets_allocated;	/* Current number of sockets. */	/*	 * Pressure flag: try to collapse.	 * Technical note: it is used by multiple contexts non atomically.	 * All the sk_stream_mem_schedule() is of this nature: accounting	 * is strict, actions are advisory and have some latency.	 */	int			*memory_pressure;	int			*sysctl_mem;	int			*sysctl_wmem;	int			*sysctl_rmem;	int			max_header;	kmem_cache_t		*slab;	int			slab_obj_size;	char			name[32];	struct {		int inuse;		u8  __pad[SMP_CACHE_BYTES - sizeof(int)];	} stats[NR_CPUS];};extern int sk_alloc_slab(struct proto *prot, char *name);extern void sk_free_slab(struct proto *prot);static inline void sk_alloc_slab_error(struct proto *proto){	printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", proto->name);}static __inline__ void sk_set_owner(struct sock *sk, struct module *owner){	/*	 * One should use sk_set_owner just once, after struct sock creation,	 * be it shortly after sk_alloc or after a function that returns a new	 * struct sock (and that down the call chain called sk_alloc), e.g. the	 * IPv4 and IPv6 modules share tcp_create_openreq_child, so if	 * tcp_create_openreq_child called sk_set_owner IPv6 would have to	 * change the ownership of this struct sock, with one not needed	 * transient sk_set_owner call.	 */	BUG_ON(sk->sk_owner != NULL);	sk->sk_owner = owner;	__module_get(owner);}/* Called with local bh disabled */static __inline__ void sock_prot_inc_use(struct proto *prot){	prot->stats[smp_processor_id()].inuse++;}static __inline__ void sock_prot_dec_use(struct proto *prot){	prot->stats[smp_processor_id()].inuse--;}/* About 10 seconds */#define SOCK_DESTROY_TIME (10*HZ)/* Sockets 0-1023 can't be bound to unless you are superuser */#define PROT_SOCK	1024#define SHUTDOWN_MASK	3#define RCV_SHUTDOWN	1#define SEND_SHUTDOWN	2#define SOCK_SNDBUF_LOCK	1#define SOCK_RCVBUF_LOCK	2#define SOCK_BINDADDR_LOCK	4#define SOCK_BINDPORT_LOCK	8/* sock_iocb: used to kick off async processing of socket ios */struct sock_iocb {	struct list_head	list;	int			flags;	int			size;	struct socket		*sock;	struct sock		*sk;	struct scm_cookie	*scm;	struct msghdr		*msg, async_msg;	struct iovec		async_iov;	struct kiocb		*kiocb;};static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb){	return (struct sock_iocb *)iocb->private;}static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si){	return si->kiocb;}struct socket_alloc {	struct socket socket;	struct inode vfs_inode;};static inline struct socket *SOCKET_I(struct inode *inode){	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;}static inline struct inode *SOCK_INODE(struct socket *socket){	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;}extern void __sk_stream_mem_reclaim(struct sock *sk);extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)static inline int sk_stream_pages(int amt){	return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM;}static inline void sk_stream_mem_reclaim(struct sock *sk){	if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)		__sk_stream_mem_reclaim(sk);}static inline void sk_stream_writequeue_purge(struct sock *sk){	struct sk_buff *skb;	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)		sk_stream_free_skb(sk, skb);	sk_stream_mem_reclaim(sk);}static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb){	return (int)skb->truesize <= sk->sk_forward_alloc ||		sk_stream_mem_schedule(sk, skb->truesize, 1);}/* Used by processes to "lock" a socket state, so that * interrupts and bottom half handlers won't change it * from under us. It essentially blocks any incoming * packets, so that we won't get any new data or any * packets that change the state of the socket. * * While locked, BH processing will add new packets to * the backlog queue.  This queue is processed by the * owner of the socket lock right before it is released. * * Since ~2.3.5 it is also exclusive sleep lock serializing * accesses from user process context. */extern void __lock_sock(struct sock *sk);extern void __release_sock(struct sock *sk);#define sock_owned_by_user(sk)	((sk)->sk_lock.owner)extern void FASTCALL(lock_sock(struct sock *sk));extern void FASTCALL(release_sock(struct sock *sk));/* BH context may only use the following locking interface. */#define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))#define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))extern struct sock *		sk_alloc(int family, int priority, int zero_it,					 kmem_cache_t *slab);extern void			sk_free(struct sock *sk);extern struct sk_buff		*sock_wmalloc(struct sock *sk,					      unsigned long size, int force,					      int priority);extern struct sk_buff		*sock_rmalloc(struct sock *sk,					      unsigned long size, int force,					      int priority);extern void			sock_wfree(struct sk_buff *skb);extern void			sock_rfree(struct sk_buff *skb);extern int			sock_setsockopt(struct socket *sock, int level,						int op, char __user *optval,						int optlen);extern int			sock_getsockopt(struct socket *sock, int level,						int op, char __user *optval, 						int __user *optlen);extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,						     unsigned long size,						     int noblock,						     int *errcode);extern struct sk_buff 		*sock_alloc_send_pskb(struct sock *sk,						      unsigned long header_len,						      unsigned long data_len,						      int noblock,						      int *errcode);extern void *sock_kmalloc(struct sock *sk, int size, int priority);extern void sock_kfree_s(struct sock *sk, void *mem, int size);extern void sk_send_sigurg(struct sock *sk);/* * Functions to fill in entries in struct proto_ops when a protocol * does not implement a particular function. */extern int                      sock_no_release(struct socket *);extern int                      sock_no_bind(struct socket *, 					     struct sockaddr *, int);extern int                      sock_no_connect(struct socket *,						struct sockaddr *, int, int);extern int                      sock_no_socketpair(struct socket *,						   struct socket *);extern int                      sock_no_accept(struct socket *,					       struct socket *, int);extern int                      sock_no_getname(struct socket *,						struct sockaddr *, int *, int);extern unsigned int             sock_no_poll(struct file *, struct socket *,					     struct poll_table_struct *);extern int                      sock_no_ioctl(struct socket *, unsigned int,					      unsigned long);extern int			sock_no_listen(struct socket *, int);extern int                      sock_no_shutdown(struct socket *, int);extern int			sock_no_getsockopt(struct socket *, int , int,						   char __user *, int __user *);extern int			sock_no_setsockopt(struct socket *, int, int,						   char __user *, int);extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,						struct msghdr *, size_t);extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,						struct msghdr *, size_t, int);extern int			sock_no_mmap(struct file *file,					     struct socket *sock,					     struct vm_area_struct *vma);extern ssize_t			sock_no_sendpage(struct socket *sock,						struct page *page,						int offset, size_t size, 						int flags);/* * Functions to fill in entries in struct proto_ops when a protocol * uses the inet style. */extern int sock_common_getsockopt(struct socket *sock, int level, int optname,				  char __user *optval, int __user *optlen);extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,			       struct msghdr *msg, size_t size, int flags);extern int sock_common_setsockopt(struct socket *sock, int level, int optname,				  char __user *optval, int optlen);extern void sk_common_release(struct sock *sk);/* *	Default socket callbacks and setup code */ extern void sock_def_destruct(struct sock *);/* Initialise core socket variables */extern void sock_init_data(struct socket *sock, struct sock *sk);/** *	sk_filter - run a packet through a socket filter *	@sk: sock associated with &sk_buff *	@skb: buffer to filter *	@needlock: set to 1 if the sock is not locked by caller. * * Run the filter code and then cut skb->data to correct size returned by * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level * wrapper to sk_run_filter. It returns 0 if the packet should * be accepted or -EPERM if the packet should be tossed. * */static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock){	int err;		err = security_sock_rcv_skb(sk, skb);	if (err)		return err;		if (sk->sk_filter) {		struct sk_filter *filter;				if (needlock)			bh_lock_sock(sk);				filter = sk->sk_filter;		if (filter) {			int pkt_len = sk_run_filter(skb, filter->insns,						    filter->len);			if (!pkt_len)				err = -EPERM;			else				skb_trim(skb, pkt_len);		}		if (needlock)			bh_unlock_sock(sk);	}	return err;}/** *	sk_filter_release: Release a socket filter *	@sk: socket *	@fp: filter to remove * *	Remove a filter from a socket and release its resources. */ static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp){	unsigned int size = sk_filter_len(fp);	atomic_sub(size, &sk->sk_omem_alloc);	if (atomic_dec_and_test(&fp->refcnt))		kfree(fp);}static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp){	atomic_inc(&fp->refcnt);	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);}/* * Socket reference counting postulates. * * * Each user of socket SHOULD hold a reference count. * * Each access point to socket (an hash table bucket, reference from a list, *   running timer, skb in flight MUST hold a reference count. * * When reference count hits 0, it means it will never increase back. * * When reference count hits 0, it means that no references from *   outside exist to this socket and current process on current CPU *   is last user and may/should destroy this socket. * * sk_free is called from any context: process, BH, IRQ. When *   it is called, socket has no references from outside -> sk_free *   may release descendant resources allocated by the socket, but *   to the time when it is called, socket is NOT referenced by any *   hash tables, lists etc. * * Packets, delivered from outside (from network or from another process) *   and enqueued on receive/error queues SHOULD NOT grab reference count, *   when they sit in queue. Otherwise, packets will leak to hole, when *   socket is looked up by one cpu and unhasing is made by another CPU. *   It is true for udp/raw, netlink (leak to receive and error queues), tcp *   (leak to backlog). Packet socket does all the processing inside *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets *   use separate SMP lock, so that they are prone too. */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?