📄 sock.h
字号:
* * union or struct { * ax25; * } ll_pinfo; * * union { * ipv4; * ipv6; * ipx; * netrom; * rose; * x25; * } net_pinfo; * * union { * tcp; * udp; * spx; * netrom; * } tp_pinfo; * * } * * The idea failed because IPv6 transition asssumes dual IP/IPv6 sockets. * So, net_pinfo is IPv6 are really, and protinfo unifies all another * private areas. *//* Define this to get the sk->debug debugging facility. */#define SOCK_DEBUGGING#ifdef SOCK_DEBUGGING#define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG msg); } while (0)#else#define SOCK_DEBUG(sk, msg...) do { } while (0)#endif/* This is the per-socket lock. The spinlock provides a synchronization * between user contexts and software interrupt processing, whereas the * mini-semaphore synchronizes multiple users amongst themselves. */typedef struct { spinlock_t slock; unsigned int users; wait_queue_head_t wq;} socket_lock_t;#define sock_lock_init(__sk) \do { spin_lock_init(&((__sk)->lock.slock)); \ (__sk)->lock.users = 0; \ init_waitqueue_head(&((__sk)->lock.wq)); \} while(0);struct sock { /* Socket demultiplex comparisons on incoming packets. */ __u32 daddr; /* Foreign IPv4 addr */ __u32 rcv_saddr; /* Bound local IPv4 addr */ __u16 dport; /* Destination port */ unsigned short num; /* Local port */ int bound_dev_if; /* Bound device index if != 0 */ /* Main hash linkage for various protocol lookup tables. */ struct sock *next; struct sock **pprev; struct sock *bind_next; struct sock **bind_pprev; volatile unsigned char state, /* Connection state */ zapped; /* In ax25 & ipx means not linked */ __u16 sport; /* Source port */ unsigned short family; /* Address family */ unsigned char reuse; /* SO_REUSEADDR setting */ unsigned char shutdown; atomic_t refcnt; /* Reference count */ socket_lock_t lock; /* Synchronizer... */ int rcvbuf; /* Size of receive buffer in bytes */ wait_queue_head_t *sleep; /* Sock wait queue */ struct dst_entry *dst_cache; /* Destination cache */ rwlock_t dst_lock; atomic_t rmem_alloc; /* Receive queue bytes committed */ struct sk_buff_head receive_queue; /* Incoming packets */ atomic_t wmem_alloc; /* Transmit queue bytes committed */ struct sk_buff_head write_queue; /* Packet sending queue */ atomic_t omem_alloc; /* "o" is "option" or "other" */ int wmem_queued; /* Persistent queue size */ int forward_alloc; /* Space allocated forward. */ __u32 saddr; /* Sending source */ unsigned int allocation; /* Allocation mode */ int sndbuf; /* Size of send buffer in bytes */ struct sock *prev; /* Not all are volatile, but some are, so we might as well say they all are. * XXX Make this a flag word -DaveM */ volatile char dead, done, urginline, keepopen, linger, destroy, no_check, broadcast, bsdism; unsigned char debug; unsigned char rcvtstamp; unsigned char use_write_queue; unsigned char userlocks; /* Hole of 3 bytes. Try to pack. */ int route_caps; int proc; unsigned long lingertime; int hashent; struct sock *pair; /* The backlog queue is special, it is always used with * the per-socket spinlock held and requires low latency * access. Therefore we special case it's implementation. */ struct { struct sk_buff *head; struct sk_buff *tail; } backlog; rwlock_t callback_lock; /* Error queue, rarely used. */ struct sk_buff_head error_queue; struct proto *prot;#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) union { struct ipv6_pinfo af_inet6; } net_pinfo;#endif union { struct tcp_opt af_tcp;#if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE) struct raw_opt tp_raw4;#endif#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) struct raw6_opt tp_raw;#endif /* CONFIG_IPV6 */#if defined(CONFIG_SPX) || defined (CONFIG_SPX_MODULE) struct spx_opt af_spx;#endif /* CONFIG_SPX */ } tp_pinfo; int err, err_soft; /* Soft holds errors that don't cause failure but are the cause of a persistent failure not just 'timed out' */ unsigned short ack_backlog; unsigned short max_ack_backlog; __u32 priority; unsigned short type; unsigned char localroute; /* Route locally only */ unsigned char protocol; struct ucred peercred; int rcvlowat; long rcvtimeo; long sndtimeo;#ifdef CONFIG_FILTER /* Socket Filtering Instructions */ struct sk_filter *filter;#endif /* CONFIG_FILTER */ /* This is where all the private (optional) areas that don't * overlap will eventually live. */ union { void *destruct_hook; struct unix_opt af_unix;#if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE) struct inet_opt af_inet;#endif#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE) struct atalk_sock af_at;#endif#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE) struct ipx_opt af_ipx;#endif#if defined (CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE) struct dn_scp dn;#endif#if defined (CONFIG_PACKET) || defined(CONFIG_PACKET_MODULE) struct packet_opt *af_packet;#endif#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE) x25_cb *x25;#endif#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) ax25_cb *ax25;#endif#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) nr_cb *nr;#endif#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE) rose_cb *rose;#endif#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE) struct pppox_opt *pppox;#endif struct netlink_opt *af_netlink;#if defined(CONFIG_ECONET) || defined(CONFIG_ECONET_MODULE) struct econet_opt *af_econet;#endif#if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE) struct atm_vcc *af_atm;#endif#if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE) struct irda_sock *irda;#endif#if defined(CONFIG_WAN_ROUTER) || defined(CONFIG_WAN_ROUTER_MODULE) struct wanpipe_opt *af_wanpipe;#endif } protinfo; /* This part is used for the timeout functions. */ struct timer_list timer; /* This is the sock cleanup timer. */ struct timeval stamp; /* Identd and reporting IO signals */ struct socket *socket; /* RPC layer private data */ void *user_data; /* Callbacks */ void (*state_change)(struct sock *sk); void (*data_ready)(struct sock *sk,int bytes); void (*write_space)(struct sock *sk); void (*error_report)(struct sock *sk); int (*backlog_rcv) (struct sock *sk, struct sk_buff *skb); void (*destruct)(struct sock *sk);};/* The per-socket spinlock must be held here. */#define sk_add_backlog(__sk, __skb) \do { if((__sk)->backlog.tail == NULL) { \ (__sk)->backlog.head = \ (__sk)->backlog.tail = (__skb); \ } else { \ ((__sk)->backlog.tail)->next = (__skb); \ (__sk)->backlog.tail = (__skb); \ } \ (__skb)->next = NULL; \} while(0)/* IP protocol blocks we attach to sockets. * socket layer -> transport layer interface * transport -> network interface is defined by struct inet_proto */struct proto { void (*close)(struct sock *sk, long timeout); int (*connect)(struct sock *sk, struct sockaddr *uaddr, int addr_len); int (*disconnect)(struct sock *sk, int flags); struct sock * (*accept) (struct sock *sk, int flags, int *err); int (*ioctl)(struct sock *sk, int cmd, unsigned long arg); int (*init)(struct sock *sk); int (*destroy)(struct sock *sk); void (*shutdown)(struct sock *sk, int how); int (*setsockopt)(struct sock *sk, int level, int optname, char *optval, int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char *optval, int *option); int (*sendmsg)(struct sock *sk, struct msghdr *msg, int len); int (*recvmsg)(struct sock *sk, struct msghdr *msg, int len, int noblock, int flags, int *addr_len); int (*bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len); int (*backlog_rcv) (struct sock *sk, struct sk_buff *skb); /* Keeping track of sk's, looking them up, and port selection methods. */ void (*hash)(struct sock *sk); void (*unhash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); char name[32]; struct { int inuse; u8 __pad[SMP_CACHE_BYTES - sizeof(int)]; } stats[NR_CPUS];};/* Called with local bh disabled */static __inline__ void sock_prot_inc_use(struct proto *prot){ prot->stats[smp_processor_id()].inuse++;}static __inline__ void sock_prot_dec_use(struct proto *prot){ prot->stats[smp_processor_id()].inuse--;}/* About 10 seconds */#define SOCK_DESTROY_TIME (10*HZ)/* Sockets 0-1023 can't be bound to unless you are superuser */#define PROT_SOCK 1024#define SHUTDOWN_MASK 3#define RCV_SHUTDOWN 1#define SEND_SHUTDOWN 2#define SOCK_SNDBUF_LOCK 1#define SOCK_RCVBUF_LOCK 2#define SOCK_BINDADDR_LOCK 4#define SOCK_BINDPORT_LOCK 8/* Used by processes to "lock" a socket state, so that * interrupts and bottom half handlers won't change it * from under us. It essentially blocks any incoming * packets, so that we won't get any new data or any * packets that change the state of the socket. * * While locked, BH processing will add new packets to * the backlog queue. This queue is processed by the * owner of the socket lock right before it is released. * * Since ~2.3.5 it is also exclusive sleep lock serializing * accesses from user process context. */extern void __lock_sock(struct sock *sk);extern void __release_sock(struct sock *sk);#define lock_sock(__sk) \do { spin_lock_bh(&((__sk)->lock.slock)); \ if ((__sk)->lock.users != 0) \ __lock_sock(__sk); \ (__sk)->lock.users = 1; \ spin_unlock_bh(&((__sk)->lock.slock)); \} while(0)#define release_sock(__sk) \do { spin_lock_bh(&((__sk)->lock.slock)); \ if ((__sk)->backlog.tail != NULL) \ __release_sock(__sk); \ (__sk)->lock.users = 0; \ if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \ spin_unlock_bh(&((__sk)->lock.slock)); \} while(0)/* BH context may only use the following locking interface. */#define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock))#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock))extern struct sock * sk_alloc(int family, int priority, int zero_it);extern void sk_free(struct sock *sk);extern struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority);extern struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority);extern void sock_wfree(struct sk_buff *skb);extern void sock_rfree(struct sk_buff *skb);extern int sock_setsockopt(struct socket *sock, int level, int op, char *optval, int optlen);extern int sock_getsockopt(struct socket *sock, int level, int op, char *optval, int *optlen);extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode);extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, int *errcode);extern void *sock_kmalloc(struct sock *sk, int size, int priority);extern void sock_kfree_s(struct sock *sk, void *mem, int size);/* * Functions to fill in entries in struct proto_ops when a protocol * does not implement a particular function. */extern int sock_no_release(struct socket *);extern int sock_no_bind(struct socket *, struct sockaddr *, int);extern int sock_no_connect(struct socket *, struct sockaddr *, int, int);extern int sock_no_socketpair(struct socket *, struct socket *);extern int sock_no_accept(struct socket *, struct socket *, int);extern int sock_no_getname(struct socket *, struct sockaddr *, int *, int);extern unsigned int sock_no_poll(struct file *, struct socket *, struct poll_table_struct *);extern int sock_no_ioctl(struct socket *, unsigned int, unsigned long);extern int sock_no_listen(struct socket *, int);extern int sock_no_shutdown(struct socket *, int);extern int sock_no_getsockopt(struct socket *, int , int, char *, int *);extern int sock_no_setsockopt(struct socket *, int, int, char *, int);extern int sock_no_fcntl(struct socket *, unsigned int, unsigned long);extern int sock_no_sendmsg(struct socket *, struct msghdr *, int, struct scm_cookie *);extern int sock_no_recvmsg(struct socket *, struct msghdr *, int, int, struct scm_cookie *);extern int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -