skbuff.h
来自「xen虚拟机源代码安装包」· C头文件 代码 · 共 539 行
H
539 行
/* * Definitions for the 'struct sk_buff' memory handlers. * * Authors: * Alan Cox, <gw4pts@gw4pts.ampr.org> * Florian La Roche, <rzsfl@rz.uni-sb.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#ifndef _VNET_SKBUFF_H#define _VNET_SKBUFF_H#include "sys_kernel.h"#include "spinlock.h"struct sk_buff;struct sk_buff_head { /* These two members must be first. */ struct sk_buff *next; struct sk_buff *prev; __u32 qlen; spinlock_t lock;};#define MAX_SKB_FRAGS 8 // (65536/PAGE_SIZE + 2)typedef struct skb_frag_struct skb_frag_t;struct skb_frag_struct { //struct page *page; void *page; __u16 page_offset; __u16 size;};/* This data is invariant across clones and lives at * the end of the header data, ie. at skb->end. */struct skb_shared_info { atomic_t dataref; unsigned int nr_frags; unsigned short tso_size; unsigned short tso_segs; struct sk_buff *frag_list; skb_frag_t frags[MAX_SKB_FRAGS];};struct sk_buff { /* These two members must be first. */ struct sk_buff *next; struct sk_buff *prev; struct sk_buff_head *list; struct net_device *dev; union { struct tcphdr *th; struct udphdr *uh; struct icmphdr *icmph; struct igmphdr *igmph; struct iphdr *ipiph; struct ipv6hdr *ipv6h; unsigned char *raw; } h; union { struct iphdr *iph; struct ipv6hdr *ipv6h; struct arphdr *arph; unsigned char *raw; } nh; union { unsigned char *raw; } mac; unsigned int len, data_len; unsigned char pkt_type; unsigned short protocol; void (*destructor)(struct sk_buff *skb); /* These elements must be at the end, see alloc_skb() for details. */ unsigned int truesize; atomic_t users; unsigned char *head, *data, *tail, *end;};extern void skb_over_panic(struct sk_buff *skb, int len, void *here);extern void skb_under_panic(struct sk_buff *skb, int len, void *here);#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))extern void __kfree_skb(struct sk_buff *skb);extern struct sk_buff *alloc_skb(unsigned int size, int priority);extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask);extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, int priority);extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);static inline void kfree_skb(struct sk_buff *skb){ if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users)) __kfree_skb(skb);}static inline void dev_kfree_skb(struct sk_buff *skb){ kfree_skb(skb);}static inline int skb_cloned(const struct sk_buff *skb){ return 0;}/** * skb_shared - is the buffer shared * @skb: buffer to check * * Returns true if more than one person has a reference to this * buffer. */static inline int skb_shared(const struct sk_buff *skb){ return atomic_read(&skb->users) != 1;}/** * skb_peek * @list_: list to peek at * * Peek an &sk_buff. Unlike most other operations you _MUST_ * be careful with this one. A peek leaves the buffer on the * list and someone else may run off with it. You must hold * the appropriate locks or have a private queue to do this. * * Returns %NULL for an empty list or a pointer to the head element. * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */static inline struct sk_buff *skb_peek(struct sk_buff_head *list_){ struct sk_buff *list = ((struct sk_buff *)list_)->next; if (list == (struct sk_buff *)list_) list = NULL; return list;}/** * skb_peek_tail * @list_: list to peek at * * Peek an &sk_buff. Unlike most other operations you _MUST_ * be careful with this one. A peek leaves the buffer on the * list and someone else may run off with it. You must hold * the appropriate locks or have a private queue to do this. * * Returns %NULL for an empty list or a pointer to the tail element. * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_){ struct sk_buff *list = ((struct sk_buff *)list_)->prev; if (list == (struct sk_buff *)list_) list = NULL; return list;}/** * skb_queue_len - get queue length * @list_: list to measure * * Return the length of an &sk_buff queue. */static inline __u32 skb_queue_len(const struct sk_buff_head *list_){ return list_->qlen;}static inline void skb_queue_head_init(struct sk_buff_head *list){ spin_lock_init(&list->lock); list->prev = list->next = (struct sk_buff *)list; list->qlen = 0;}/* * Insert an sk_buff at the start of a list. * * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. *//** * __skb_queue_head - queue a buffer at the list head * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the start of a list. This function takes no locks * and you must therefore hold required locks before calling it. * * A buffer cannot be placed on two lists at the same time. */extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk){ struct sk_buff *prev, *next; newsk->list = list; list->qlen++; prev = (struct sk_buff *)list; next = prev->next; newsk->next = next; newsk->prev = prev; next->prev = prev->next = newsk;}/** * __skb_queue_tail - queue a buffer at the list tail * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the end of a list. This function takes no locks * and you must therefore hold required locks before calling it. * * A buffer cannot be placed on two lists at the same time. */extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk){ struct sk_buff *prev, *next; newsk->list = list; list->qlen++; next = (struct sk_buff *)list; prev = next->prev; newsk->next = next; newsk->prev = prev; next->prev = prev->next = newsk;}/** * __skb_dequeue - remove from the head of the queue * @list: list to dequeue from * * Remove the head of the list. This function does not take any locks * so must be used with appropriate locks held only. The head item is * returned or %NULL if the list is empty. */extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list){ struct sk_buff *next, *prev, *result; prev = (struct sk_buff *) list; next = prev->next; result = NULL; if (next != prev) { result = next; next = next->next; list->qlen--; next->prev = prev; prev->next = next; result->next = result->prev = NULL; result->list = NULL; } return result;}/* * Insert a packet on a list. */extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk);static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list){ newsk->next = next; newsk->prev = prev; next->prev = prev->next = newsk; newsk->list = list; list->qlen++;}/* * Place a packet after a given packet in a list. */extern void skb_append(struct sk_buff *old, struct sk_buff *newsk);static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk){ __skb_insert(newsk, old, old->next, old->list);}/* * remove sk_buff from list. _Must_ be called atomically, and with * the list known.. */extern void skb_unlink(struct sk_buff *skb);static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list){ struct sk_buff *next, *prev; list->qlen--; next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; skb->list = NULL; next->prev = prev; prev->next = next;}/* XXX: more streamlined implementation *//** * __skb_dequeue_tail - remove from the tail of the queue * @list: list to dequeue from * * Remove the tail of the list. This function does not take any locks * so must be used with appropriate locks held only. The tail item is * returned or %NULL if the list is empty. */extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list){ struct sk_buff *skb = skb_peek_tail(list); if (skb) __skb_unlink(skb, list); return skb;}/* * Add data to an sk_buff */static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len){ unsigned char *tmp = skb->tail; skb->tail += len; skb->len += len; return tmp;}/** * skb_put - add data to a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer. If this would * exceed the total buffer size the kernel will panic. A pointer to the * first byte of the extra data is returned. */static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len){ unsigned char *tmp = skb->tail; skb->tail += len; skb->len += len; if (unlikely(skb->tail>skb->end)) skb_over_panic(skb, len, current_text_addr()); return tmp;}static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len){ skb->data -= len; skb->len += len; return skb->data;}/** * skb_push - add data to the start of a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer at the buffer * start. If this would exceed the total buffer headroom the kernel will * panic. A pointer to the first byte of the extra data is returned. */static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len){ skb->data -= len; skb->len += len; if (unlikely(skb->data<skb->head)){ skb_under_panic(skb, len, current_text_addr()); } return skb->data;}static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len){ skb->len -= len; //BUG_ON(skb->len < skb->data_len); return skb->data += len;}/** * skb_pull - remove data from the start of a buffer * @skb: buffer to use * @len: amount of data to remove * * This function removes data from the start of a buffer, returning * the memory to the headroom. A pointer to the next data in the buffer * is returned. Once the data has been pulled future pushes will overwrite * the old data. */static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len){ return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);}static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len){ return (len <= skb->len);}static inline unsigned int skb_headlen(const struct sk_buff *skb){ return skb->len - skb->data_len;}/** * skb_headroom - bytes at buffer head * @skb: buffer to check * * Return the number of bytes of free space at the head of an &sk_buff. */static inline int skb_headroom(const struct sk_buff *skb){ return skb->data - skb->head;}/** * skb_tailroom - bytes at buffer end * @skb: buffer to check * * Return the number of bytes of free space at the tail of an sk_buff */static inline int skb_tailroom(const struct sk_buff *skb){ return skb->end - skb->tail;}/** * skb_reserve - adjust headroom * @skb: buffer to alter * @len: bytes to move * * Increase the headroom of an empty &sk_buff by reducing the tail * room. This is only allowed for an empty buffer. */static inline void skb_reserve(struct sk_buff *skb, unsigned int len){ skb->data += len; skb->tail += len;}/** * __skb_queue_purge - empty a list * @list: list to empty * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function does not take the * list lock and the caller must hold the relevant locks to use it. */extern void skb_queue_purge(struct sk_buff_head *list);static inline void __skb_queue_purge(struct sk_buff_head *list){ struct sk_buff *skb; while ((skb = __skb_dequeue(list)) != NULL) kfree_skb(skb);}/** * __dev_alloc_skb - allocate an skbuff for sending * @length: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned in there is no free memory. */static inline struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask){ struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); if (likely(skb)) skb_reserve(skb, 16); return skb;}/** * dev_alloc_skb - allocate an skbuff for sending * @length: length to allocate * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned in there is no free memory. Although this function * allocates memory it can be called from an interrupt. */static inline struct sk_buff *dev_alloc_skb(unsigned int length){ return __dev_alloc_skb(length, GFP_ATOMIC);}#define MULTICAST(x) (((x) & htonl(0xf0000000)) == htonl(0xe0000000))#endif /* _VNET_SKBUFF_H */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?