📄 skbuff.h
字号:
* perhaps setting it to a cacheline in size (since that will maintain * cacheline alignment of the DMA). It must be a power of 2. * * Various parts of the networking layer expect at least 16 bytes of * headroom, you should not reduce this. */#ifndef NET_SKB_PAD#define NET_SKB_PAD 16#endifextern int ___pskb_trim(struct sk_buff *skb, unsigned int len);static inline void __skb_trim(struct sk_buff *skb, unsigned int len){ if (unlikely(skb->data_len)) { WARN_ON(1); return; } skb->len = len; skb->tail = skb->data + len;}/** * skb_trim - remove end from a buffer * @skb: buffer to alter * @len: new length * * Cut the length of a buffer down by removing data from the tail. If * the buffer is already under the length specified it is not modified. * The skb must be linear. */static inline void skb_trim(struct sk_buff *skb, unsigned int len){ if (skb->len > len) __skb_trim(skb, len);}static inline int __pskb_trim(struct sk_buff *skb, unsigned int len){ if (skb->data_len) return ___pskb_trim(skb, len); __skb_trim(skb, len); return 0;}static inline int pskb_trim(struct sk_buff *skb, unsigned int len){ return (len < skb->len) ? __pskb_trim(skb, len) : 0;}/** * pskb_trim_unique - remove end from a paged unique (not cloned) buffer * @skb: buffer to alter * @len: new length * * This is identical to pskb_trim except that the caller knows that * the skb is not cloned so we should never get an error due to out- * of-memory. */static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len){ int err = pskb_trim(skb, len); BUG_ON(err);}/** * skb_orphan - orphan a buffer * @skb: buffer to orphan * * If a buffer currently has an owner then we call the owner's * destructor function and make the @skb unowned. The buffer continues * to exist but is no longer charged to its former owner. */static inline void skb_orphan(struct sk_buff *skb){ if (skb->destructor) skb->destructor(skb); skb->destructor = NULL; skb->sk = NULL;}/** * __skb_queue_purge - empty a list * @list: list to empty * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function does not take the * list lock and the caller must hold the relevant locks to use it. */extern void skb_queue_purge(struct sk_buff_head *list);static inline void __skb_queue_purge(struct sk_buff_head *list){ struct sk_buff *skb; while ((skb = __skb_dequeue(list)) != NULL) kfree_skb(skb);}/** * __dev_alloc_skb - allocate an skbuff for receiving * @length: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned if there is no free memory. */static inline struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask){ struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); if (likely(skb)) skb_reserve(skb, NET_SKB_PAD); return skb;}/** * dev_alloc_skb - allocate an skbuff for receiving * @length: length to allocate * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned if there is no free memory. Although this function * allocates memory it can be called from an interrupt. */static inline struct sk_buff *dev_alloc_skb(unsigned int length){ return __dev_alloc_skb(length, GFP_ATOMIC);}extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask);/** * netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on * @length: length to allocate * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned if there is no free memory. Although this function * allocates memory it can be called from an interrupt. */static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, unsigned int length){ return __netdev_alloc_skb(dev, length, GFP_ATOMIC);}/** * skb_cow - copy header of skb when it is required * @skb: buffer to cow * @headroom: needed headroom * * If the skb passed lacks sufficient headroom or its data part * is shared, data is reallocated. If reallocation fails, an error * is returned and original skb is not changed. * * The result is skb with writable area skb->head...skb->tail * and at least @headroom of space at head. */static inline int skb_cow(struct sk_buff *skb, unsigned int headroom){ int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - skb_headroom(skb); if (delta < 0) delta = 0; if (delta || skb_cloned(skb)) return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); return 0;}/** * skb_padto - pad an skbuff up to a minimal size * @skb: buffer to pad * @len: minimal length * * Pads up a buffer to ensure the trailing bytes exist and are * blanked. If the buffer already contains sufficient data it * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ static inline int skb_padto(struct sk_buff *skb, unsigned int len){ unsigned int size = skb->len; if (likely(size >= len)) return 0; return skb_pad(skb, len-size);}static inline int skb_add_data(struct sk_buff *skb, char __user *from, int copy){ const int off = skb->len; if (skb->ip_summed == CHECKSUM_NONE) { int err = 0; __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), copy, 0, &err); if (!err) { skb->csum = csum_block_add(skb->csum, csum, off); return 0; } } else if (!copy_from_user(skb_put(skb, copy), from, copy)) return 0; __skb_trim(skb, off); return -EFAULT;}static inline int skb_can_coalesce(struct sk_buff *skb, int i, struct page *page, int off){ if (i) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; return page == frag->page && off == frag->page_offset + frag->size; } return 0;}static inline int __skb_linearize(struct sk_buff *skb){ return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;}/** * skb_linearize - convert paged skb to linear one * @skb: buffer to linarize * * If there is no free memory -ENOMEM is returned, otherwise zero * is returned and the old skb data released. */static inline int skb_linearize(struct sk_buff *skb){ return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;}/** * skb_linearize_cow - make sure skb is linear and writable * @skb: buffer to process * * If there is no free memory -ENOMEM is returned, otherwise zero * is returned and the old skb data released. */static inline int skb_linearize_cow(struct sk_buff *skb){ return skb_is_nonlinear(skb) || skb_cloned(skb) ? __skb_linearize(skb) : 0;}/** * skb_postpull_rcsum - update checksum for received skb after pull * @skb: buffer to update * @start: start of data before pull * @len: length of data pulled * * After doing a pull on a received packet, you need to call this to * update the CHECKSUM_COMPLETE checksum, or set ip_summed to * CHECKSUM_NONE so that it can be recomputed from scratch. */static inline void skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len){ if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));}unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);/** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim * @len: new length * * This is exactly the same as pskb_trim except that it ensures the * checksum of received packets are still valid after the operation. */static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len){ if (likely(len >= skb->len)) return 0; if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; return __pskb_trim(skb, len);}#define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ skb = skb->next)#define skb_queue_reverse_walk(queue, skb) \ for (skb = (queue)->prev; \ prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ skb = skb->prev)extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err);extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);extern int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to, int size);extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov);extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);extern __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum);extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);extern int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len);extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum);extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer){ int hlen = skb_headlen(skb); if (hlen - offset >= len) return skb->data + offset; if (skb_copy_bits(skb, offset, buffer, len) < 0) return NULL; return buffer;}extern void skb_init(void);extern void skb_add_mtu(int mtu);/** * skb_get_timestamp - get timestamp from a skb * @skb: skb to get stamp from * @stamp: pointer to struct timeval to store stamp in * * Timestamps are stored in the skb as offsets to a base timestamp. * This function converts the offset back to a struct timeval and stores * it in stamp. */static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp){ stamp->tv_sec = skb->tstamp.off_sec; stamp->tv_usec = skb->tstamp.off_usec;}/** * skb_set_timestamp - set timestamp of a skb * @skb: skb to set stamp of * @stamp: pointer to struct timeval to get stamp from * * Timestamps are stored in the skb as offsets to a base timestamp. * This function converts a struct timeval to an offset and stores * it in the skb. */static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp){ skb->tstamp.off_sec = stamp->tv_sec; skb->tstamp.off_usec = stamp->tv_usec;}extern void __net_timestamp(struct sk_buff *skb);extern __sum16 __skb_checksum_complete(struct sk_buff *skb);/** * skb_checksum_complete - Calculate checksum of an entire packet * @skb: packet to process * * This function calculates the checksum over the entire packet plus * the value of skb->csum. The latter can be used to supply the * checksum of a pseudo header as used by TCP/UDP. It returns the * checksum. * * For protocols that contain complete checksums such as ICMP/TCP/UDP, * this function can be used to verify that checksum on received * packets. In that case the function should return zero if the * checksum is correct. In particular, this function will return zero * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the * hardware has already verified the correctness of the checksum. */static inline unsigned int skb_checksum_complete(struct sk_buff *skb){ return skb->ip_summed != CHECKSUM_UNNECESSARY && __skb_checksum_complete(skb);}#ifdef CONFIG_NETFILTERstatic inline void nf_conntrack_put(struct nf_conntrack *nfct){ if (nfct && atomic_dec_and_test(&nfct->use)) nfct->destroy(nfct);}static inline void nf_conntrack_get(struct nf_conntrack *nfct){ if (nfct) atomic_inc(&nfct->use);}#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)static inline void nf_conntrack_get_reasm(struct sk_buff *skb){ if (skb) atomic_inc(&skb->users);}static inline void nf_conntrack_put_reasm(struct sk_buff *skb){ if (skb) kfree_skb(skb);}#endif#ifdef CONFIG_BRIDGE_NETFILTERstatic inline void nf_bridge_put(struct nf_bridge_info *nf_bridge){ if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) kfree(nf_bridge);}static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge){ if (nf_bridge) atomic_inc(&nf_bridge->use);}#endif /* CONFIG_BRIDGE_NETFILTER */static inline void nf_reset(struct sk_buff *skb){ nf_conntrack_put(skb->nfct); skb->nfct = NULL;#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put_reasm(skb->nfct_reasm); skb->nfct_reasm = NULL;#endif#ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); skb->nf_bridge = NULL;#endif}#else /* CONFIG_NETFILTER */static inline void nf_reset(struct sk_buff *skb) {}#endif /* CONFIG_NETFILTER */#ifdef CONFIG_NETWORK_SECMARKstatic inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from){ to->secmark = from->secmark;}static inline void skb_init_secmark(struct sk_buff *skb){ skb->secmark = 0;}#elsestatic inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from){ }static inline void skb_init_secmark(struct sk_buff *skb){ }#endifstatic inline int skb_is_gso(const struct sk_buff *skb){ return skb_shinfo(skb)->gso_size;}#endif /* __KERNEL__ */#endif /* _LINUX_SKBUFF_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -