📄 skbuff.h
字号:
return result;}/** * skb_dequeue - remove from the head of the queue * @list: list to dequeue from * * Remove the head of the list. The list lock is taken so the function * may be used safely with other locking list functions. The head item is * returned or %NULL if the list is empty. */static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list){ unsigned long flags; struct sk_buff *result; spin_lock_irqsave(&list->lock, flags); result = __skb_dequeue(list); spin_unlock_irqrestore(&list->lock, flags); return result;}/* * Insert a packet on a list. */static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff * prev, struct sk_buff *next, struct sk_buff_head * list){ newsk->next = next; newsk->prev = prev; next->prev = newsk; prev->next = newsk; newsk->list = list; list->qlen++;}/** * skb_insert - insert a buffer * @old: buffer to insert before * @newsk: buffer to insert * * Place a packet before a given packet in a list. The list locks are taken * and this function is atomic with respect to other list locked calls * A buffer cannot be placed on two lists at the same time. */static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk){ unsigned long flags; spin_lock_irqsave(&old->list->lock, flags); __skb_insert(newsk, old->prev, old, old->list); spin_unlock_irqrestore(&old->list->lock, flags);}/* * Place a packet after a given packet in a list. */static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk){ __skb_insert(newsk, old, old->next, old->list);}/** * skb_append - append a buffer * @old: buffer to insert after * @newsk: buffer to insert * * Place a packet after a given packet in a list. The list locks are taken * and this function is atomic with respect to other list locked calls. * A buffer cannot be placed on two lists at the same time. */static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk){ unsigned long flags; spin_lock_irqsave(&old->list->lock, flags); __skb_append(old, newsk); spin_unlock_irqrestore(&old->list->lock, flags);}/* * remove sk_buff from list. _Must_ be called atomically, and with * the list known.. */ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list){ struct sk_buff * next, * prev; list->qlen--; next = skb->next; prev = skb->prev; skb->next = NULL; skb->prev = NULL; skb->list = NULL; next->prev = prev; prev->next = next;}/** * skb_unlink - remove a buffer from a list * @skb: buffer to remove * * Place a packet after a given packet in a list. The list locks are taken * and this function is atomic with respect to other list locked calls * * Works even without knowing the list it is sitting on, which can be * handy at times. It also means that THE LIST MUST EXIST when you * unlink. Thus a list must have its contents unlinked before it is * destroyed. */static inline void skb_unlink(struct sk_buff *skb){ struct sk_buff_head *list = skb->list; if(list) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); if(skb->list == list) __skb_unlink(skb, skb->list); spin_unlock_irqrestore(&list->lock, flags); }}/* XXX: more streamlined implementation *//** * __skb_dequeue_tail - remove from the tail of the queue * @list: list to dequeue from * * Remove the tail of the list. This function does not take any locks * so must be used with appropriate locks held only. The tail item is * returned or %NULL if the list is empty. */static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list){ struct sk_buff *skb = skb_peek_tail(list); if (skb) __skb_unlink(skb, list); return skb;}/** * skb_dequeue - remove from the head of the queue * @list: list to dequeue from * * Remove the head of the list. The list lock is taken so the function * may be used safely with other locking list functions. The tail item is * returned or %NULL if the list is empty. */static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list){ unsigned long flags; struct sk_buff *result; spin_lock_irqsave(&list->lock, flags); result = __skb_dequeue_tail(list); spin_unlock_irqrestore(&list->lock, flags); return result;}static inline int skb_is_nonlinear(const struct sk_buff *skb){ return skb->data_len;}static inline int skb_headlen(const struct sk_buff *skb){ return skb->len - skb->data_len;}#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) out_of_line_bug(); } while (0)#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) out_of_line_bug(); } while (0)#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) out_of_line_bug(); } while (0)/* * Add data to an sk_buff */ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len){ unsigned char *tmp=skb->tail; SKB_LINEAR_ASSERT(skb); skb->tail+=len; skb->len+=len; return tmp;}/** * skb_put - add data to a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer. If this would * exceed the total buffer size the kernel will panic. A pointer to the * first byte of the extra data is returned. */ static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len){ unsigned char *tmp=skb->tail; SKB_LINEAR_ASSERT(skb); skb->tail+=len; skb->len+=len; if(skb->tail>skb->end) { skb_over_panic(skb, len, current_text_addr()); } return tmp;}static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len){ skb->data-=len; skb->len+=len; return skb->data;}/** * skb_push - add data to the start of a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer at the buffer * start. If this would exceed the total buffer headroom the kernel will * panic. A pointer to the first byte of the extra data is returned. */static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len){ skb->data-=len; skb->len+=len; if(skb->data<skb->head) { skb_under_panic(skb, len, current_text_addr()); } return skb->data;}static inline char *__skb_pull(struct sk_buff *skb, unsigned int len){ skb->len-=len; if (skb->len < skb->data_len) out_of_line_bug(); return skb->data+=len;}/** * skb_pull - remove data from the start of a buffer * @skb: buffer to use * @len: amount of data to remove * * This function removes data from the start of a buffer, returning * the memory to the headroom. A pointer to the next data in the buffer * is returned. Once the data has been pulled future pushes will overwrite * the old data. */static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len){ if (len > skb->len) return NULL; return __skb_pull(skb,len);}extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len){ if (len > skb_headlen(skb) && __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL) return NULL; skb->len -= len; return skb->data += len;}static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len){ if (len > skb->len) return NULL; return __pskb_pull(skb,len);}static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len){ if (len <= skb_headlen(skb)) return 1; if (len > skb->len) return 0; return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);}/** * skb_headroom - bytes at buffer head * @skb: buffer to check * * Return the number of bytes of free space at the head of an &sk_buff. */ static inline int skb_headroom(const struct sk_buff *skb){ return skb->data-skb->head;}/** * skb_tailroom - bytes at buffer end * @skb: buffer to check * * Return the number of bytes of free space at the tail of an sk_buff */static inline int skb_tailroom(const struct sk_buff *skb){ return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;}/** * skb_reserve - adjust headroom * @skb: buffer to alter * @len: bytes to move * * Increase the headroom of an empty &sk_buff by reducing the tail * room. This is only allowed for an empty buffer. */static inline void skb_reserve(struct sk_buff *skb, unsigned int len){ skb->data+=len; skb->tail+=len;}extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);static inline void __skb_trim(struct sk_buff *skb, unsigned int len){ if (!skb->data_len) { skb->len = len; skb->tail = skb->data+len; } else { ___pskb_trim(skb, len, 0); }}/** * skb_trim - remove end from a buffer * @skb: buffer to alter * @len: new length * * Cut the length of a buffer down by removing data from the tail. If * the buffer is already under the length specified it is not modified. */static inline void skb_trim(struct sk_buff *skb, unsigned int len){ if (skb->len > len) { __skb_trim(skb, len); }}static inline int __pskb_trim(struct sk_buff *skb, unsigned int len){ if (!skb->data_len) { skb->len = len; skb->tail = skb->data+len; return 0; } else { return ___pskb_trim(skb, len, 1); }}static inline int pskb_trim(struct sk_buff *skb, unsigned int len){ if (len < skb->len) return __pskb_trim(skb, len); return 0;}/** * skb_orphan - orphan a buffer * @skb: buffer to orphan * * If a buffer currently has an owner then we call the owner's * destructor function and make the @skb unowned. The buffer continues * to exist but is no longer charged to its former owner. */static inline void skb_orphan(struct sk_buff *skb){ if (skb->destructor) skb->destructor(skb); skb->destructor = NULL; skb->sk = NULL;}/** * skb_purge - empty a list * @list: list to empty * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function takes the list * lock and is atomic with respect to other list locking functions. */static inline void skb_queue_purge(struct sk_buff_head *list){ struct sk_buff *skb; while ((skb=skb_dequeue(list))!=NULL) kfree_skb(skb);}/** * __skb_purge - empty a list * @list: list to empty * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function does not take the * list lock and the caller must hold the relevant locks to use it. */static inline void __skb_queue_purge(struct sk_buff_head *list){ struct sk_buff *skb; while ((skb=__skb_dequeue(list))!=NULL) kfree_skb(skb);}/** * __dev_alloc_skb - allocate an skbuff for sending * @length: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned in there is no free memory. */ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask){ struct sk_buff *skb; skb = alloc_skb(length+16, gfp_mask); if (skb) skb_reserve(skb,16); return skb;}/** * dev_alloc_skb - allocate an skbuff for sending * @length: length to allocate * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned in there is no free memory. Although this function * allocates memory it can be called from an interrupt. */ static inline struct sk_buff *dev_alloc_skb(unsigned int length){ return __dev_alloc_skb(length, GFP_ATOMIC);}/** * skb_cow - copy header of skb when it is required * @skb: buffer to cow * @headroom: needed headroom * * If the skb passed lacks sufficient headroom or its data part * is shared, data is reallocated. If reallocation fails, an error * is returned and original skb is not changed. * * The result is skb with writable area skb->head...skb->tail * and at least @headroom of space at head. */static inline intskb_cow(struct sk_buff *skb, unsigned int headroom){ int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb); if (delta < 0) delta = 0; if (delta || skb_cloned(skb)) return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC); return 0;}/** * skb_linearize - convert paged skb to linear one * @skb: buffer to linarize * @gfp: allocation mode * * If there is no free memory -ENOMEM is returned, otherwise zero * is returned and the old skb data released. */int skb_linearize(struct sk_buff *skb, int gfp);static inline void *kmap_skb_frag(const skb_frag_t *frag){#ifdef CONFIG_HIGHMEM if (in_irq()) out_of_line_bug(); local_bh_disable();#endif return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);}static inline void kunmap_skb_frag(void *vaddr){ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);#ifdef CONFIG_HIGHMEM local_bh_enable();#endif}#define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ (skb != (struct sk_buff *)(queue)); \ skb=skb->next)extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);extern int skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);extern int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size);extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);extern int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov);extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);extern void skb_init(void);extern void skb_add_mtu(int mtu);#ifdef CONFIG_NETFILTERstatic inline voidnf_conntrack_put(struct nf_ct_info *nfct){ if (nfct && atomic_dec_and_test(&nfct->master->use)) nfct->master->destroy(nfct->master);}static inline voidnf_conntrack_get(struct nf_ct_info *nfct){ if (nfct) atomic_inc(&nfct->master->use);}#endif#endif /* __KERNEL__ */#endif /* _LINUX_SKBUFF_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -