sock.h
来自「linux 内核源代码」· C头文件 代码 · 共 1,417 行 · 第 1/3 页
H
1,417 行
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the AF_INET socket handler. * * Version: @(#)sock.h 1.0.4 05/13/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche <flla@stud.uni-sb.de> * * Fixes: * Alan Cox : Volatiles in skbuff pointers. See * skbuff comments. May be overdone, * better to prove they can be removed * than the reverse. * Alan Cox : Added a zapped field for tcp to note * a socket is reset and must stay shut up * Alan Cox : New fields for options * Pauline Middelink : identd support * Alan Cox : Eliminate low level recv/recvfrom * David S. Miller : New socket lookup architecture. * Steve Whitehouse: Default routines for sock_ops * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made * protinfo be just a void pointer, as the * protocol specific parts were moved to * respective headers and ipv4/v6, etc now * use private slabcaches for its socks * Pedro Hortas : New flags field for socket options * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#ifndef _SOCK_H#define _SOCK_H#include <linux/kernel.h>#include <linux/list.h>#include <linux/timer.h>#include <linux/cache.h>#include <linux/module.h>#include <linux/lockdep.h>#include <linux/netdevice.h>#include <linux/skbuff.h> /* struct sk_buff */#include <linux/mm.h>#include <linux/security.h>#include <linux/filter.h>#include <asm/atomic.h>#include <net/dst.h>#include <net/checksum.h>#include <net/net_namespace.h>/* * This structure really needs to be cleaned up. * Most of it is for TCP, and not used by any of * the other protocols. *//* Define this to get the SOCK_DBG debugging facility. */#define SOCK_DEBUGGING#ifdef SOCK_DEBUGGING#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ printk(KERN_DEBUG msg); } while (0)#else#define SOCK_DEBUG(sk, msg...) do { } while (0)#endif/* This is the per-socket lock. The spinlock provides a synchronization * between user contexts and software interrupt processing, whereas the * mini-semaphore synchronizes multiple users amongst themselves. */typedef struct { spinlock_t slock; int owned; wait_queue_head_t wq; /* * We express the mutex-alike socket_lock semantics * to the lock validator by explicitly managing * the slock as a lock variant (in addition to * the slock itself): */#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map;#endif} socket_lock_t;struct sock;struct proto;/** * struct sock_common - minimal network layer representation of sockets * @skc_family: network address family * @skc_state: Connection state * @skc_reuse: %SO_REUSEADDR setting * @skc_bound_dev_if: bound device index if != 0 * @skc_node: main hash linkage for various protocol lookup tables * @skc_bind_node: bind hash linkage for various protocol lookup tables * @skc_refcnt: reference count * @skc_hash: hash value used with various protocol lookup tables * @skc_prot: protocol handlers inside a network family * @skc_net: reference to the network namespace of this socket * * This is the minimal network layer representation of sockets, the header * for struct sock and struct inet_timewait_sock. */struct sock_common { unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; int skc_bound_dev_if; struct hlist_node skc_node; struct hlist_node skc_bind_node; atomic_t skc_refcnt; unsigned int skc_hash; struct proto *skc_prot; struct net *skc_net;};/** * struct sock - network layer representation of sockets * @__sk_common: shared layout with inet_timewait_sock * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings * @sk_lock: synchronizer * @sk_rcvbuf: size of receive buffer in bytes * @sk_sleep: sock wait queue * @sk_dst_cache: destination cache * @sk_dst_lock: destination cache lock * @sk_policy: flow policy * @sk_rmem_alloc: receive queue bytes committed * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_write_queue: Packet sending queue * @sk_async_wait_queue: DMA copied packets * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward * @sk_allocation: allocation mode * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) * @sk_lingertime: %SO_LINGER l_linger setting * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct * @sk_error_queue: rarely used * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) * @sk_err: last error * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' * @sk_ack_backlog: current listen backlog * @sk_max_ack_backlog: listen backlog set in listen() * @sk_priority: %SO_PRIORITY setting * @sk_type: socket type (%SOCK_STREAM, etc) * @sk_protocol: which protocol this socket belongs in this network family * @sk_peercred: %SO_PEERCRED setting * @sk_rcvlowat: %SO_RCVLOWAT setting * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting * @sk_filter: socket filtering instructions * @sk_protinfo: private area, net family specific, when not using slab * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received * @sk_socket: Identd and reporting IO signals * @sk_user_data: RPC layer private data * @sk_sndmsg_page: cached page for sendmsg * @sk_sndmsg_off: cached offset for sendmsg * @sk_send_head: front of stuff to transmit * @sk_security: used by security modules * @sk_write_pending: a write to stream socket waits to start * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed * @sk_write_space: callback to indicate there is bf sending space available * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) * @sk_backlog_rcv: callback to process the backlog * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 */struct sock { /* * Now struct inet_timewait_sock also uses sock_common, so please just * don't add nothing before this first member (__sk_common) --acme */ struct sock_common __sk_common;#define sk_family __sk_common.skc_family#define sk_state __sk_common.skc_state#define sk_reuse __sk_common.skc_reuse#define sk_bound_dev_if __sk_common.skc_bound_dev_if#define sk_node __sk_common.skc_node#define sk_bind_node __sk_common.skc_bind_node#define sk_refcnt __sk_common.skc_refcnt#define sk_hash __sk_common.skc_hash#define sk_prot __sk_common.skc_prot#define sk_net __sk_common.skc_net unsigned char sk_shutdown : 2, sk_no_check : 2, sk_userlocks : 4; unsigned char sk_protocol; unsigned short sk_type; int sk_rcvbuf; socket_lock_t sk_lock; /* * The backlog queue is special, it is always used with * the per-socket spinlock held and requires low latency * access. Therefore we special case it's implementation. */ struct { struct sk_buff *head; struct sk_buff *tail; } sk_backlog; wait_queue_head_t *sk_sleep; struct dst_entry *sk_dst_cache; struct xfrm_policy *sk_policy[2]; rwlock_t sk_dst_lock; atomic_t sk_rmem_alloc; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; struct sk_buff_head sk_receive_queue; struct sk_buff_head sk_write_queue; struct sk_buff_head sk_async_wait_queue; int sk_wmem_queued; int sk_forward_alloc; gfp_t sk_allocation; int sk_route_caps; int sk_gso_type; int sk_rcvlowat; unsigned long sk_flags; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err, sk_err_soft; unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; struct ucred sk_peercred; long sk_rcvtimeo; long sk_sndtimeo; struct sk_filter *sk_filter; void *sk_protinfo; struct timer_list sk_timer; ktime_t sk_stamp; struct socket *sk_socket; void *sk_user_data; struct page *sk_sndmsg_page; struct sk_buff *sk_send_head; __u32 sk_sndmsg_off; int sk_write_pending; void *sk_security; void (*sk_state_change)(struct sock *sk); void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_write_space)(struct sock *sk); void (*sk_error_report)(struct sock *sk); int (*sk_backlog_rcv)(struct sock *sk, struct sk_buff *skb); void (*sk_destruct)(struct sock *sk);};/* * Hashed lists helper routines */static inline struct sock *__sk_head(const struct hlist_head *head){ return hlist_entry(head->first, struct sock, sk_node);}static inline struct sock *sk_head(const struct hlist_head *head){ return hlist_empty(head) ? NULL : __sk_head(head);}static inline struct sock *sk_next(const struct sock *sk){ return sk->sk_node.next ? hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;}static inline int sk_unhashed(const struct sock *sk){ return hlist_unhashed(&sk->sk_node);}static inline int sk_hashed(const struct sock *sk){ return !sk_unhashed(sk);}static __inline__ void sk_node_init(struct hlist_node *node){ node->pprev = NULL;}static __inline__ void __sk_del_node(struct sock *sk){ __hlist_del(&sk->sk_node);}static __inline__ int __sk_del_node_init(struct sock *sk){ if (sk_hashed(sk)) { __sk_del_node(sk); sk_node_init(&sk->sk_node); return 1; } return 0;}/* Grab socket reference count. This operation is valid only when sk is ALREADY grabbed f.e. it is found in hash table or a list and the lookup is made under lock preventing hash table modifications. */static inline void sock_hold(struct sock *sk){ atomic_inc(&sk->sk_refcnt);}/* Ungrab socket in the context, which assumes that socket refcnt cannot hit zero, f.e. it is true in context of any socketcall. */static inline void __sock_put(struct sock *sk){ atomic_dec(&sk->sk_refcnt);}static __inline__ int sk_del_node_init(struct sock *sk){ int rc = __sk_del_node_init(sk); if (rc) { /* paranoid for a while -acme */ WARN_ON(atomic_read(&sk->sk_refcnt) == 1); __sock_put(sk); } return rc;}static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list){ hlist_add_head(&sk->sk_node, list);}static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list){ sock_hold(sk); __sk_add_node(sk, list);}static __inline__ void __sk_del_bind_node(struct sock *sk){ __hlist_del(&sk->sk_bind_node);}static __inline__ void sk_add_bind_node(struct sock *sk, struct hlist_head *list){ hlist_add_head(&sk->sk_bind_node, list);}#define sk_for_each(__sk, node, list) \ hlist_for_each_entry(__sk, node, list, sk_node)#define sk_for_each_from(__sk, node) \ if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ hlist_for_each_entry_from(__sk, node, sk_node)#define sk_for_each_continue(__sk, node) \ if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ hlist_for_each_entry_continue(__sk, node, sk_node)#define sk_for_each_safe(__sk, node, tmp, list) \ hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)#define sk_for_each_bound(__sk, node, list) \ hlist_for_each_entry(__sk, node, list, sk_bind_node)/* Sock flags */enum sock_flags { SOCK_DEAD, SOCK_DONE, SOCK_URGINLINE, SOCK_KEEPOPEN, SOCK_LINGER, SOCK_DESTROY, SOCK_BROADCAST, SOCK_TIMESTAMP, SOCK_ZAPPED, SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ SOCK_DBG, /* %SO_DEBUG setting */ SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */};static inline void sock_copy_flags(struct sock *nsk, struct sock *osk){ nsk->sk_flags = osk->sk_flags;}static inline void sock_set_flag(struct sock *sk, enum sock_flags flag){ __set_bit(flag, &sk->sk_flags);}static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag){ __clear_bit(flag, &sk->sk_flags);}static inline int sock_flag(struct sock *sk, enum sock_flags flag){ return test_bit(flag, &sk->sk_flags);}static inline void sk_acceptq_removed(struct sock *sk){ sk->sk_ack_backlog--;}static inline void sk_acceptq_added(struct sock *sk){ sk->sk_ack_backlog++;}static inline int sk_acceptq_is_full(struct sock *sk){ return sk->sk_ack_backlog > sk->sk_max_ack_backlog;}/* * Compute minimal free write space needed to queue new packets. */static inline int sk_stream_min_wspace(struct sock *sk){ return sk->sk_wmem_queued / 2;}static inline int sk_stream_wspace(struct sock *sk){ return sk->sk_sndbuf - sk->sk_wmem_queued;}extern void sk_stream_write_space(struct sock *sk);static inline int sk_stream_memory_free(struct sock *sk){ return sk->sk_wmem_queued < sk->sk_sndbuf;}extern void sk_stream_rfree(struct sk_buff *skb);static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk){ skb->sk = sk; skb->destructor = sk_stream_rfree; atomic_add(skb->truesize, &sk->sk_rmem_alloc); sk->sk_forward_alloc -= skb->truesize;}static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb){ skb_truesize_check(skb); sock_set_flag(sk, SOCK_QUEUE_SHRUNK); sk->sk_wmem_queued -= skb->truesize; sk->sk_forward_alloc += skb->truesize; __kfree_skb(skb);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?