sock.h
来自「Linux Kernel 2.6.9 for OMAP1710」· C头文件 代码 · 共 1,342 行 · 第 1/3 页
H
1,342 行
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the AF_INET socket handler. * * Version: @(#)sock.h 1.0.4 05/13/93 * * Authors: Ross Biro, <bir7@leland.Stanford.Edu> * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche <flla@stud.uni-sb.de> * * Fixes: * Alan Cox : Volatiles in skbuff pointers. See * skbuff comments. May be overdone, * better to prove they can be removed * than the reverse. * Alan Cox : Added a zapped field for tcp to note * a socket is reset and must stay shut up * Alan Cox : New fields for options * Pauline Middelink : identd support * Alan Cox : Eliminate low level recv/recvfrom * David S. Miller : New socket lookup architecture. * Steve Whitehouse: Default routines for sock_ops * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made * protinfo be just a void pointer, as the * protocol specific parts were moved to * respective headers and ipv4/v6, etc now * use private slabcaches for its socks * Pedro Hortas : New flags field for socket options * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#ifndef _SOCK_H#define _SOCK_H#include <linux/config.h>#include <linux/list.h>#include <linux/timer.h>#include <linux/cache.h>#include <linux/module.h>#include <linux/netdevice.h>#include <linux/skbuff.h> /* struct sk_buff */#include <linux/security.h>#include <linux/filter.h>#include <asm/atomic.h>#include <net/dst.h>#include <net/checksum.h>/* * This structure really needs to be cleaned up. * Most of it is for TCP, and not used by any of * the other protocols. *//* Define this to get the sk->sk_debug debugging facility. */#define SOCK_DEBUGGING#ifdef SOCK_DEBUGGING#define SOCK_DEBUG(sk, msg...) do { if ((sk) && ((sk)->sk_debug)) \ printk(KERN_DEBUG msg); } while (0)#else#define SOCK_DEBUG(sk, msg...) do { } while (0)#endif/* This is the per-socket lock. The spinlock provides a synchronization * between user contexts and software interrupt processing, whereas the * mini-semaphore synchronizes multiple users amongst themselves. */struct sock_iocb;typedef struct { spinlock_t slock; struct sock_iocb *owner; wait_queue_head_t wq;} socket_lock_t;#define sock_lock_init(__sk) \do { spin_lock_init(&((__sk)->sk_lock.slock)); \ (__sk)->sk_lock.owner = NULL; \ init_waitqueue_head(&((__sk)->sk_lock.wq)); \} while(0)struct sock;/** * struct sock_common - minimal network layer representation of sockets * @skc_family - network address family * @skc_state - Connection state * @skc_reuse - %SO_REUSEADDR setting * @skc_bound_dev_if - bound device index if != 0 * @skc_node - main hash linkage for various protocol lookup tables * @skc_bind_node - bind hash linkage for various protocol lookup tables * @skc_refcnt - reference count * * This is the minimal network layer representation of sockets, the header * for struct sock and struct tcp_tw_bucket. */struct sock_common { unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; int skc_bound_dev_if; struct hlist_node skc_node; struct hlist_node skc_bind_node; atomic_t skc_refcnt;};/** * struct sock - network layer representation of sockets * @__sk_common - shared layout with tcp_tw_bucket * @sk_zapped - ax25 & ipx means !linked * @sk_shutdown - mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN * @sk_use_write_queue - wheter to call sk->sk_write_space in sock_wfree * @sk_userlocks - %SO_SNDBUF and %SO_RCVBUF settings * @sk_lock - synchronizer * @sk_rcvbuf - size of receive buffer in bytes * @sk_sleep - sock wait queue * @sk_dst_cache - destination cache * @sk_dst_lock - destination cache lock * @sk_policy - flow policy * @sk_rmem_alloc - receive queue bytes committed * @sk_receive_queue - incoming packets * @sk_wmem_alloc - transmit queue bytes committed * @sk_write_queue - Packet sending queue * @sk_omem_alloc - "o" is "option" or "other" * @sk_wmem_queued - persistent queue size * @sk_forward_alloc - space allocated forward * @sk_allocation - allocation mode * @sk_sndbuf - size of send buffer in bytes * @sk_flags - %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings * @sk_no_check - %SO_NO_CHECK setting, wether or not checkup packets * @sk_debug - %SO_DEBUG setting * @sk_rcvtstamp - %SO_TIMESTAMP setting * @sk_no_largesend - whether to sent large segments or not * @sk_route_caps - route capabilities (e.g. %NETIF_F_TSO) * @sk_lingertime - %SO_LINGER l_linger setting * @sk_hashent - hash entry in several tables (e.g. tcp_ehash) * @sk_backlog - always used with the per-socket spinlock held * @sk_callback_lock - used with the callbacks in the end of this struct * @sk_error_queue - rarely used * @sk_prot - protocol handlers inside a network family * @sk_err - last error * @sk_err_soft - errors that don't cause failure but are the cause of a persistent failure not just 'timed out' * @sk_ack_backlog - current listen backlog * @sk_max_ack_backlog - listen backlog set in listen() * @sk_priority - %SO_PRIORITY setting * @sk_type - socket type (%SOCK_STREAM, etc) * @sk_localroute - route locally only, %SO_DONTROUTE setting * @sk_protocol - which protocol this socket belongs in this network family * @sk_peercred - %SO_PEERCRED setting * @sk_rcvlowat - %SO_RCVLOWAT setting * @sk_rcvtimeo - %SO_RCVTIMEO setting * @sk_sndtimeo - %SO_SNDTIMEO setting * @sk_filter - socket filtering instructions * @sk_protinfo - private area, net family specific, when not using slab * @sk_slab - the slabcache this instance was allocated from * @sk_timer - sock cleanup timer * @sk_stamp - time stamp of last packet received * @sk_socket - Identd and reporting IO signals * @sk_user_data - RPC layer private data * @sk_owner - module that owns this socket * @sk_sndmsg_page - cached page for sendmsg * @sk_sndmsg_off - cached offset for sendmsg * @sk_send_head - front of stuff to transmit * @sk_write_pending - a write to stream socket waits to start * @sk_queue_shrunk - write queue has been shrunk recently * @sk_state_change - callback to indicate change in the state of the sock * @sk_data_ready - callback to indicate there is data to be processed * @sk_write_space - callback to indicate there is bf sending space available * @sk_error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE) * @sk_backlog_rcv - callback to process the backlog * @sk_destruct - called at sock freeing time, i.e. when all refcnt == 0 */struct sock { /* * Now struct tcp_tw_bucket also uses sock_common, so please just * don't add nothing before this first member (__sk_common) --acme */ struct sock_common __sk_common;#define sk_family __sk_common.skc_family#define sk_state __sk_common.skc_state#define sk_reuse __sk_common.skc_reuse#define sk_bound_dev_if __sk_common.skc_bound_dev_if#define sk_node __sk_common.skc_node#define sk_bind_node __sk_common.skc_bind_node#define sk_refcnt __sk_common.skc_refcnt volatile unsigned char sk_zapped; unsigned char sk_shutdown; unsigned char sk_use_write_queue; unsigned char sk_userlocks; socket_lock_t sk_lock; int sk_rcvbuf; wait_queue_head_t *sk_sleep; struct dst_entry *sk_dst_cache; rwlock_t sk_dst_lock; struct xfrm_policy *sk_policy[2]; atomic_t sk_rmem_alloc; struct sk_buff_head sk_receive_queue; atomic_t sk_wmem_alloc; struct sk_buff_head sk_write_queue; atomic_t sk_omem_alloc; int sk_wmem_queued; int sk_forward_alloc; unsigned int sk_allocation; int sk_sndbuf; unsigned long sk_flags; char sk_no_check; unsigned char sk_debug; unsigned char sk_rcvtstamp; unsigned char sk_no_largesend; int sk_route_caps; unsigned long sk_lingertime; int sk_hashent; /* * The backlog queue is special, it is always used with * the per-socket spinlock held and requires low latency * access. Therefore we special case it's implementation. */ struct { struct sk_buff *head; struct sk_buff *tail; } sk_backlog; rwlock_t sk_callback_lock; struct sk_buff_head sk_error_queue; struct proto *sk_prot; int sk_err, sk_err_soft; unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; unsigned short sk_type; unsigned char sk_localroute; unsigned char sk_protocol; struct ucred sk_peercred; int sk_rcvlowat; long sk_rcvtimeo; long sk_sndtimeo; struct sk_filter *sk_filter; void *sk_protinfo; kmem_cache_t *sk_slab; struct timer_list sk_timer; struct timeval sk_stamp; struct socket *sk_socket; void *sk_user_data; struct module *sk_owner; struct page *sk_sndmsg_page; __u32 sk_sndmsg_off; struct sk_buff *sk_send_head; int sk_write_pending; void *sk_security; __u8 sk_queue_shrunk; /* three bytes hole, try to pack */ void (*sk_state_change)(struct sock *sk); void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_write_space)(struct sock *sk); void (*sk_error_report)(struct sock *sk); int (*sk_backlog_rcv)(struct sock *sk, struct sk_buff *skb); void (*sk_destruct)(struct sock *sk);};/* * Hashed lists helper routines */static inline struct sock *__sk_head(struct hlist_head *head){ return hlist_entry(head->first, struct sock, sk_node);}static inline struct sock *sk_head(struct hlist_head *head){ return hlist_empty(head) ? NULL : __sk_head(head);}static inline struct sock *sk_next(struct sock *sk){ return sk->sk_node.next ? hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;}static inline int sk_unhashed(struct sock *sk){ return hlist_unhashed(&sk->sk_node);}static inline int sk_hashed(struct sock *sk){ return sk->sk_node.pprev != NULL;}static __inline__ void sk_node_init(struct hlist_node *node){ node->pprev = NULL;}static __inline__ void __sk_del_node(struct sock *sk){ __hlist_del(&sk->sk_node);}static __inline__ int __sk_del_node_init(struct sock *sk){ if (sk_hashed(sk)) { __sk_del_node(sk); sk_node_init(&sk->sk_node); return 1; } return 0;}/* Grab socket reference count. This operation is valid only when sk is ALREADY grabbed f.e. it is found in hash table or a list and the lookup is made under lock preventing hash table modifications. */static inline void sock_hold(struct sock *sk){ atomic_inc(&sk->sk_refcnt);}/* Ungrab socket in the context, which assumes that socket refcnt cannot hit zero, f.e. it is true in context of any socketcall. */static inline void __sock_put(struct sock *sk){ atomic_dec(&sk->sk_refcnt);}static __inline__ int sk_del_node_init(struct sock *sk){ int rc = __sk_del_node_init(sk); if (rc) { /* paranoid for a while -acme */ WARN_ON(atomic_read(&sk->sk_refcnt) == 1); __sock_put(sk); } return rc;}static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list){ hlist_add_head(&sk->sk_node, list);}static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list){ sock_hold(sk); __sk_add_node(sk, list);}static __inline__ void __sk_del_bind_node(struct sock *sk){ __hlist_del(&sk->sk_bind_node);}static __inline__ void sk_add_bind_node(struct sock *sk, struct hlist_head *list){ hlist_add_head(&sk->sk_bind_node, list);}#define sk_for_each(__sk, node, list) \ hlist_for_each_entry(__sk, node, list, sk_node)#define sk_for_each_from(__sk, node) \ if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ hlist_for_each_entry_from(__sk, node, sk_node)#define sk_for_each_continue(__sk, node) \ if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ hlist_for_each_entry_continue(__sk, node, sk_node)#define sk_for_each_safe(__sk, node, tmp, list) \ hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)#define sk_for_each_bound(__sk, node, list) \ hlist_for_each_entry(__sk, node, list, sk_bind_node)/* Sock flags */enum sock_flags { SOCK_DEAD, SOCK_DONE, SOCK_URGINLINE, SOCK_KEEPOPEN, SOCK_LINGER, SOCK_DESTROY, SOCK_BROADCAST, SOCK_TIMESTAMP,};static inline void sock_set_flag(struct sock *sk, enum sock_flags flag){ __set_bit(flag, &sk->sk_flags);}static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag){ __clear_bit(flag, &sk->sk_flags);}static inline int sock_flag(struct sock *sk, enum sock_flags flag){ return test_bit(flag, &sk->sk_flags);}static inline void sk_acceptq_removed(struct sock *sk){ sk->sk_ack_backlog--;}static inline void sk_acceptq_added(struct sock *sk){ sk->sk_ack_backlog++;}static inline int sk_acceptq_is_full(struct sock *sk){ return sk->sk_ack_backlog > sk->sk_max_ack_backlog;}/* * Compute minimal free write space needed to queue new packets. */static inline int sk_stream_min_wspace(struct sock *sk){ return sk->sk_wmem_queued / 2;}static inline int sk_stream_wspace(struct sock *sk){ return sk->sk_sndbuf - sk->sk_wmem_queued;}extern void sk_stream_write_space(struct sock *sk);static inline int sk_stream_memory_free(struct sock *sk){ return sk->sk_wmem_queued < sk->sk_sndbuf;}extern void sk_stream_rfree(struct sk_buff *skb);static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?