skbuff.h
来自「此工具是arm-linux-GCC交叉编译工具(cross-3.4.4)」· C头文件 代码 · 共 1,310 行 · 第 1/3 页
H
1,310 行
if (!skb->cloned) return 0; dataref = atomic_read(&skb_shinfo(skb)->dataref); dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); return dataref != 1;}/** * skb_header_release - release reference to header * @skb: buffer to operate on * * Drop a reference to the header part of the buffer. This is done * by acquiring a payload reference. You must not read from the header * part of skb->data after this. */static inline void skb_header_release(struct sk_buff *skb){ BUG_ON(skb->nohdr); skb->nohdr = 1; atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);}/** * skb_shared - is the buffer shared * @skb: buffer to check * * Returns true if more than one person has a reference to this * buffer. */static inline int skb_shared(const struct sk_buff *skb){ return atomic_read(&skb->users) != 1;}/** * skb_share_check - check if buffer is shared and if so clone it * @skb: buffer to check * @pri: priority for memory allocation * * If the buffer is shared the buffer is cloned and the old copy * drops a reference. A new clone with a single reference is returned. * If the buffer is not shared the original buffer is returned. When * being called from interrupt status or with spinlocks held pri must * be GFP_ATOMIC. * * NULL is returned on a memory allocation failure. */static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri){ might_sleep_if(pri & __GFP_WAIT); if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, pri); kfree_skb(skb); skb = nskb; } return skb;}/* * Copy shared buffers into a new sk_buff. We effectively do COW on * packets to handle cases where we have a local reader and forward * and a couple of other messy ones. The normal one is tcpdumping * a packet thats being forwarded. *//** * skb_unshare - make a copy of a shared buffer * @skb: buffer to check * @pri: priority for memory allocation * * If the socket buffer is a clone then this function creates a new * copy of the data, drops a reference count on the old copy and returns * the new copy with the reference count at 1. If the buffer is not a clone * the original buffer is returned. When called with a spinlock held or * from interrupt state @pri must be %GFP_ATOMIC * * %NULL is returned on a memory allocation failure. */static inline struct sk_buff *skb_unshare(struct sk_buff *skb, gfp_t pri){ might_sleep_if(pri & __GFP_WAIT); if (skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, pri); kfree_skb(skb); /* Free our shared copy */ skb = nskb; } return skb;}/** * skb_peek * @list_: list to peek at * * Peek an &sk_buff. Unlike most other operations you _MUST_ * be careful with this one. A peek leaves the buffer on the * list and someone else may run off with it. You must hold * the appropriate locks or have a private queue to do this. * * Returns %NULL for an empty list or a pointer to the head element. * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */static inline struct sk_buff *skb_peek(struct sk_buff_head *list_){ struct sk_buff *list = ((struct sk_buff *)list_)->next; if (list == (struct sk_buff *)list_) list = NULL; return list;}/** * skb_peek_tail * @list_: list to peek at * * Peek an &sk_buff. Unlike most other operations you _MUST_ * be careful with this one. A peek leaves the buffer on the * list and someone else may run off with it. You must hold * the appropriate locks or have a private queue to do this. * * Returns %NULL for an empty list or a pointer to the tail element. * The reference count is not incremented and the reference is therefore * volatile. Use with caution. */static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_){ struct sk_buff *list = ((struct sk_buff *)list_)->prev; if (list == (struct sk_buff *)list_) list = NULL; return list;}/** * skb_queue_len - get queue length * @list_: list to measure * * Return the length of an &sk_buff queue. */static inline __u32 skb_queue_len(const struct sk_buff_head *list_){ return list_->qlen;}static inline void skb_queue_head_init(struct sk_buff_head *list){ spin_lock_init(&list->lock); list->prev = list->next = (struct sk_buff *)list; list->qlen = 0;}/* * Insert an sk_buff at the start of a list. * * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. *//** * __skb_queue_head - queue a buffer at the list head * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the start of a list. This function takes no locks * and you must therefore hold required locks before calling it. * * A buffer cannot be placed on two lists at the same time. */extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk){ struct sk_buff *prev, *next; list->qlen++; prev = (struct sk_buff *)list; next = prev->next; newsk->next = next; newsk->prev = prev; next->prev = prev->next = newsk;}/** * __skb_queue_tail - queue a buffer at the list tail * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the end of a list. This function takes no locks * and you must therefore hold required locks before calling it. * * A buffer cannot be placed on two lists at the same time. */extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk){ struct sk_buff *prev, *next; list->qlen++; next = (struct sk_buff *)list; prev = next->prev; newsk->next = next; newsk->prev = prev; next->prev = prev->next = newsk;}/** * __skb_dequeue - remove from the head of the queue * @list: list to dequeue from * * Remove the head of the list. This function does not take any locks * so must be used with appropriate locks held only. The head item is * returned or %NULL if the list is empty. */extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list){ struct sk_buff *next, *prev, *result; prev = (struct sk_buff *) list; next = prev->next; result = NULL; if (next != prev) { result = next; next = next->next; list->qlen--; next->prev = prev; prev->next = next; result->next = result->prev = NULL; } return result;}/* * Insert a packet on a list. */extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list){ newsk->next = next; newsk->prev = prev; next->prev = prev->next = newsk; list->qlen++;}/* * Place a packet after a given packet in a list. */extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list){ __skb_insert(newsk, old, old->next, list);}/* * remove sk_buff from list. _Must_ be called atomically, and with * the list known.. */extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list){ struct sk_buff *next, *prev; list->qlen--; next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; next->prev = prev; prev->next = next;}/* XXX: more streamlined implementation *//** * __skb_dequeue_tail - remove from the tail of the queue * @list: list to dequeue from * * Remove the tail of the list. This function does not take any locks * so must be used with appropriate locks held only. The tail item is * returned or %NULL if the list is empty. */extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list){ struct sk_buff *skb = skb_peek_tail(list); if (skb) __skb_unlink(skb, list); return skb;}static inline int skb_is_nonlinear(const struct sk_buff *skb){ return skb->data_len;}static inline unsigned int skb_headlen(const struct sk_buff *skb){ return skb->len - skb->data_len;}static inline int skb_pagelen(const struct sk_buff *skb){ int i, len = 0; for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) len += skb_shinfo(skb)->frags[i].size; return len + skb_headlen(skb);}static inline void skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size){ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag->page = page; frag->page_offset = off; frag->size = size; skb_shinfo(skb)->nr_frags = i + 1;}#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))/* * Add data to an sk_buff */static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len){ unsigned char *tmp = skb->tail; SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; return tmp;}/** * skb_put - add data to a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer. If this would * exceed the total buffer size the kernel will panic. A pointer to the * first byte of the extra data is returned. */static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len){ unsigned char *tmp = skb->tail; SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; if (unlikely(skb->tail>skb->end)) skb_over_panic(skb, len, current_text_addr()); return tmp;}static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len){ skb->data -= len; skb->len += len; return skb->data;}/** * skb_push - add data to the start of a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer at the buffer * start. If this would exceed the total buffer headroom the kernel will * panic. A pointer to the first byte of the extra data is returned. */static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len){ skb->data -= len; skb->len += len; if (unlikely(skb->data<skb->head)) skb_under_panic(skb, len, current_text_addr()); return skb->data;}static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len){ skb->len -= len; BUG_ON(skb->len < skb->data_len); return skb->data += len;}/** * skb_pull - remove data from the start of a buffer * @skb: buffer to use * @len: amount of data to remove * * This function removes data from the start of a buffer, returning * the memory to the headroom. A pointer to the next data in the buffer * is returned. Once the data has been pulled future pushes will overwrite * the old data. */static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len){ return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);}extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len){ if (len > skb_headlen(skb) && !__pskb_pull_tail(skb, len-skb_headlen(skb))) return NULL; skb->len -= len; return skb->data += len;}static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len){ return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);}static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len){ if (likely(len <= skb_headlen(skb))) return 1; if (unlikely(len > skb->len)) return 0; return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;}/** * skb_headroom - bytes at buffer head
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?