📄 skbuff.h
字号:
/* * Definitions for the 'struct sk_buff' memory handlers. * * Authors: * Alan Cox, <gw4pts@gw4pts.ampr.org> * Florian La Roche, <rzsfl@rz.uni-sb.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#ifndef _LINUX_SKBUFF_H#define _LINUX_SKBUFF_H#include <linux/kernel.h>#include <linux/compiler.h>#include <linux/time.h>#include <linux/cache.h>#include <asm/atomic.h>#include <asm/types.h>#include <linux/spinlock.h>#include <linux/net.h>#include <linux/textsearch.h>#include <net/checksum.h>#include <linux/rcupdate.h>#include <linux/dmaengine.h>#define HAVE_ALLOC_SKB /* For the drivers to know */#define HAVE_ALIGNABLE_SKB /* Ditto 8) */#define CHECKSUM_NONE 0#define CHECKSUM_PARTIAL 1#define CHECKSUM_UNNECESSARY 2#define CHECKSUM_COMPLETE 3#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ ~(SMP_CACHE_BYTES - 1))#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \ sizeof(struct skb_shared_info)) & \ ~(SMP_CACHE_BYTES - 1))#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))/* A. Checksumming of received packets by device. * * NONE: device failed to checksum this packet. * skb->csum is undefined. * * UNNECESSARY: device parsed packet and wouldbe verified checksum. * skb->csum is undefined. * It is bad option, but, unfortunately, many of vendors do this. * Apparently with secret goal to sell you new device, when you * will add new protocol to your host. F.e. IPv6. 8) * * COMPLETE: the most generic way. Device supplied checksum of _all_ * the packet as seen by netif_rx in skb->csum. * NOTE: Even if device supports only some protocols, but * is able to produce some skb->csum, it MUST use COMPLETE, * not UNNECESSARY. * * B. Checksumming on output. * * NONE: skb is checksummed by protocol or csum is not required. * * PARTIAL: device is required to csum packet as seen by hard_start_xmit * from skb->h.raw to the end and to record the checksum * at skb->h.raw+skb->csum. * * Device must show its capabilities in dev->features, set * at device setup time. * NETIF_F_HW_CSUM - it is clever device, it is able to checksum * everything. * NETIF_F_NO_CSUM - loopback or reliable single hop media. * NETIF_F_IP_CSUM - device is dumb. It is able to csum only * TCP/UDP over IPv4. Sigh. Vendors like this * way by an unknown reason. Though, see comment above * about CHECKSUM_UNNECESSARY. 8) * * Any questions? No questions, good. --ANK */struct net_device;#ifdef CONFIG_NETFILTERstruct nf_conntrack { atomic_t use; void (*destroy)(struct nf_conntrack *);};#ifdef CONFIG_BRIDGE_NETFILTERstruct nf_bridge_info { atomic_t use; struct net_device *physindev; struct net_device *physoutdev;#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) struct net_device *netoutdev;#endif unsigned int mask; unsigned long data[32 / sizeof(unsigned long)];};#endif#endifstruct sk_buff_head { /* These two members must be first. */ struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock;};struct sk_buff;/* To allow 64K frame to be packed as single skb without frag_list */#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)typedef struct skb_frag_struct skb_frag_t;struct skb_frag_struct { struct page *page; __u16 page_offset; __u16 size;};/* This data is invariant across clones and lives at * the end of the header data, ie. at skb->end. */struct skb_shared_info { atomic_t dataref; unsigned short nr_frags; unsigned short gso_size; /* Warning: this field is not always filled in (UFO)! */ unsigned short gso_segs; unsigned short gso_type; __be32 ip6_frag_id; struct sk_buff *frag_list; skb_frag_t frags[MAX_SKB_FRAGS];};/* We divide dataref into two halves. The higher 16 bits hold references * to the payload part of skb->data. The lower 16 bits hold references to * the entire skb->data. It is up to the users of the skb to agree on * where the payload starts. * * All users must obey the rule that the skb->data reference count must be * greater than or equal to the payload reference count. * * Holding a reference to the payload part means that the user does not * care about modifications to the header part of skb->data. */#define SKB_DATAREF_SHIFT 16#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)struct skb_timeval { u32 off_sec; u32 off_usec;};enum { SKB_FCLONE_UNAVAILABLE, SKB_FCLONE_ORIG, SKB_FCLONE_CLONE,};enum { SKB_GSO_TCPV4 = 1 << 0, SKB_GSO_UDP = 1 << 1, /* This indicates the skb is from an untrusted source. */ SKB_GSO_DODGY = 1 << 2, /* This indicates the tcp segment has CWR set. */ SKB_GSO_TCP_ECN = 1 << 3, SKB_GSO_TCPV6 = 1 << 4,};/** * struct sk_buff - socket buffer * @next: Next buffer in list * @prev: Previous buffer in list * @sk: Socket we are owned by * @tstamp: Time we arrived * @dev: Device we arrived on/are leaving by * @input_dev: Device we arrived on * @h: Transport layer header * @nh: Network layer header * @mac: Link layer header * @dst: destination entry * @sp: the security path, used for xfrm * @cb: Control buffer. Free for use by every layer. Put private vars here * @len: Length of actual data * @data_len: Data length * @mac_len: Length of link layer header * @csum: Checksum * @local_df: allow local fragmentation * @cloned: Head may be cloned (check refcnt to be sure) * @nohdr: Payload reference only, must not modify header * @pkt_type: Packet class * @fclone: skbuff clone status * @ip_summed: Driver fed us an IP checksum * @priority: Packet queueing priority * @users: User count - see {datagram,tcp}.c * @protocol: Packet protocol from driver * @truesize: Buffer size * @head: Head of buffer * @data: Data head pointer * @tail: Tail pointer * @end: End pointer * @destructor: Destruct function * @mark: Generic packet mark * @nfct: Associated connection, if any * @ipvs_property: skbuff is owned by ipvs * @nfctinfo: Relationship of this skb to the connection * @nfct_reasm: netfilter conntrack re-assembly pointer * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @tc_index: Traffic control index * @tc_verd: traffic control verdict * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking */struct sk_buff { /* These two members must be first. */ struct sk_buff *next; struct sk_buff *prev; struct sock *sk; struct skb_timeval tstamp; struct net_device *dev; struct net_device *input_dev; union { struct tcphdr *th; struct udphdr *uh; struct icmphdr *icmph; struct igmphdr *igmph; struct iphdr *ipiph; struct ipv6hdr *ipv6h; unsigned char *raw; } h; union { struct iphdr *iph; struct ipv6hdr *ipv6h; struct arphdr *arph; unsigned char *raw; } nh; union { unsigned char *raw; } mac; struct dst_entry *dst; struct sec_path *sp; /* * This is the control buffer. It is free to use for every * layer. Please put your private variables there. If you * want to keep them across layers you have to do a skb_clone() * first. This is owned by whoever has the skb queued ATM. */ char cb[48]; unsigned int len, data_len, mac_len; union { __wsum csum; __u32 csum_offset; }; __u32 priority; __u8 local_df:1, cloned:1, ip_summed:2, nohdr:1, nfctinfo:3; __u8 pkt_type:3, fclone:2, ipvs_property:1; __be16 protocol; void (*destructor)(struct sk_buff *skb);#ifdef CONFIG_NETFILTER struct nf_conntrack *nfct;#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct sk_buff *nfct_reasm;#endif#ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge;#endif#endif /* CONFIG_NETFILTER */#ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */#ifdef CONFIG_NET_CLS_ACT __u16 tc_verd; /* traffic control verdict */#endif#endif#ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie;#endif#ifdef CONFIG_NETWORK_SECMARK __u32 secmark;#endif __u32 mark; /* These elements must be at the end, see alloc_skb() for details. */ unsigned int truesize; atomic_t users; unsigned char *head, *data, *tail, *end;};#ifdef __KERNEL__/* * Handling routines are only of interest to the kernel */#include <linux/slab.h>#include <asm/system.h>extern void kfree_skb(struct sk_buff *skb);extern void __kfree_skb(struct sk_buff *skb);extern struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int fclone, int node);static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority){ return __alloc_skb(size, priority, 0, -1);}static inline struct sk_buff *alloc_skb_fclone(unsigned int size, gfp_t priority){ return __alloc_skb(size, priority, 1, -1);}extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, unsigned int size, gfp_t priority);extern void kfree_skbmem(struct sk_buff *skb);extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);extern struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);extern struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask);extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority);extern int skb_pad(struct sk_buff *skb, int pad);#define dev_kfree_skb(a) kfree_skb(a)extern void skb_over_panic(struct sk_buff *skb, int len, void *here);extern void skb_under_panic(struct sk_buff *skb, int len, void *here);extern void skb_truesize_bug(struct sk_buff *skb);static inline void skb_truesize_check(struct sk_buff *skb){ if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len)) skb_truesize_bug(skb);}extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, int getfrag(void *from, char *to, int offset, int len,int odd, struct sk_buff *skb), void *from, int length);struct skb_seq_state{ __u32 lower_offset; __u32 upper_offset; __u32 frag_idx; __u32 stepped_offset; struct sk_buff *root_skb; struct sk_buff *cur_skb; __u8 *frag_data;};extern void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, unsigned int to, struct skb_seq_state *st);extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, struct skb_seq_state *st);extern void skb_abort_seq_read(struct skb_seq_state *st);extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config, struct ts_state *state);/* Internal */#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))/** * skb_queue_empty - check if a queue is empty * @list: queue head * * Returns true if the queue is empty, false otherwise. */static inline int skb_queue_empty(const struct sk_buff_head *list){ return list->next == (struct sk_buff *)list;}/** * skb_get - reference buffer * @skb: buffer to reference * * Makes another reference to a socket buffer and returns a pointer * to the buffer. */static inline struct sk_buff *skb_get(struct sk_buff *skb){ atomic_inc(&skb->users); return skb;}/* * If users == 1, we are the only owner and are can avoid redundant * atomic change. *//** * skb_cloned - is the buffer a clone * @skb: buffer to check * * Returns true if the buffer was generated with skb_clone() and is * one of multiple shared copies of the buffer. Cloned buffers are * shared data so must not be written to under normal circumstances. */static inline int skb_cloned(const struct sk_buff *skb){ return skb->cloned && (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;}/** * skb_header_cloned - is the header a clone * @skb: buffer to check * * Returns true if modifying the header part of the buffer requires * the data to be copied. */static inline int skb_header_cloned(const struct sk_buff *skb){ int dataref; if (!skb->cloned) return 0; dataref = atomic_read(&skb_shinfo(skb)->dataref); dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); return dataref != 1;}/** * skb_header_release - release reference to header * @skb: buffer to operate on * * Drop a reference to the header part of the buffer. This is done * by acquiring a payload reference. You must not read from the header * part of skb->data after this. */static inline void skb_header_release(struct sk_buff *skb){ BUG_ON(skb->nohdr); skb->nohdr = 1; atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);}/** * skb_shared - is the buffer shared * @skb: buffer to check * * Returns true if more than one person has a reference to this * buffer. */static inline int skb_shared(const struct sk_buff *skb){ return atomic_read(&skb->users) != 1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -