📄 skbuff.c
字号:
end: /* Bug trap for callers */ i->data = NULL; return 0;}void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i){ /* Unmap previous, if not head fragment. */ if (i->data && i->nextfrag) kunmap_skb_frag(i->data); /* Bug trap for callers */ i->data = NULL;}/* Checksum skb data. */unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum){ int start = skb_headlen(skb); int i, copy = start - offset; int pos = 0; /* Checksum header. */ if (copy > 0) { if (copy > len) copy = len; csum = csum_partial(skb->data + offset, copy, csum); if ((len -= copy) == 0) return csum; offset += copy; pos = copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { unsigned int csum2; u8 *vaddr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; vaddr = kmap_skb_frag(frag); csum2 = csum_partial(vaddr + frag->page_offset + offset - start, copy, 0); kunmap_skb_frag(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; offset += copy; pos += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { int end; BUG_TRAP(start <= offset + len); end = start + list->len; if ((copy = end - offset) > 0) { unsigned int csum2; if (copy > len) copy = len; csum2 = skb_checksum(list, offset - start, copy, 0); csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; pos += copy; } start = end; } } if (len) BUG(); return csum;}/* Both of above in one bottle. */unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum){ int start = skb_headlen(skb); int i, copy = start - offset; int pos = 0; /* Copy header. */ if (copy > 0) { if (copy > len) copy = len; csum = csum_partial_copy_nocheck(skb->data + offset, to, copy, csum); if ((len -= copy) == 0) return csum; offset += copy; to += copy; pos = copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; BUG_TRAP(start <= offset + len); end = start + skb_shinfo(skb)->frags[i].size; if ((copy = end - offset) > 0) { unsigned int csum2; u8 *vaddr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; vaddr = kmap_skb_frag(frag); csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset + offset - start, to, copy, 0); kunmap_skb_frag(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; offset += copy; to += copy; pos += copy; } start = end; } if (skb_shinfo(skb)->frag_list) { struct sk_buff *list = skb_shinfo(skb)->frag_list; for (; list; list = list->next) { unsigned int csum2; int end; BUG_TRAP(start <= offset + len); end = start + list->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; csum2 = skb_copy_and_csum_bits(list, offset - start, to, copy, 0); csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; to += copy; pos += copy; } start = end; } } if (len) BUG(); return csum;}void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to){ unsigned int csum; long csstart; if (skb->ip_summed == CHECKSUM_HW) csstart = skb->h.raw - skb->data; else csstart = skb_headlen(skb); if (csstart > skb_headlen(skb)) BUG(); memcpy(to, skb->data, csstart); csum = 0; if (csstart != skb->len) csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, skb->len - csstart, 0); if (skb->ip_summed == CHECKSUM_HW) { long csstuff = csstart + skb->csum; *((unsigned short *)(to + csstuff)) = csum_fold(csum); }}/** * skb_dequeue - remove from the head of the queue * @list: list to dequeue from * * Remove the head of the list. The list lock is taken so the function * may be used safely with other locking list functions. The head item is * returned or %NULL if the list is empty. */struct sk_buff *skb_dequeue(struct sk_buff_head *list){ unsigned long flags; struct sk_buff *result; spin_lock_irqsave(&list->lock, flags); result = __skb_dequeue(list); spin_unlock_irqrestore(&list->lock, flags); return result;}/** * skb_dequeue_tail - remove from the tail of the queue * @list: list to dequeue from * * Remove the tail of the list. The list lock is taken so the function * may be used safely with other locking list functions. The tail item is * returned or %NULL if the list is empty. */struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list){ unsigned long flags; struct sk_buff *result; spin_lock_irqsave(&list->lock, flags); result = __skb_dequeue_tail(list); spin_unlock_irqrestore(&list->lock, flags); return result;}/** * skb_queue_purge - empty a list * @list: list to empty * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function takes the list * lock and is atomic with respect to other list locking functions. */void skb_queue_purge(struct sk_buff_head *list){ struct sk_buff *skb; while ((skb = skb_dequeue(list)) != NULL) kfree_skb(skb);}/** * skb_queue_head - queue a buffer at the list head * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the start of the list. This function takes the * list lock and can be used safely with other locking &sk_buff functions * safely. * * A buffer cannot be placed on two lists at the same time. */void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk){ unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_queue_head(list, newsk); spin_unlock_irqrestore(&list->lock, flags);}/** * skb_queue_tail - queue a buffer at the list tail * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the tail of the list. This function takes the * list lock and can be used safely with other locking &sk_buff functions * safely. * * A buffer cannot be placed on two lists at the same time. */void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk){ unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_queue_tail(list, newsk); spin_unlock_irqrestore(&list->lock, flags);}/** * skb_unlink - remove a buffer from a list * @skb: buffer to remove * * Place a packet after a given packet in a list. The list locks are taken * and this function is atomic with respect to other list locked calls * * Works even without knowing the list it is sitting on, which can be * handy at times. It also means that THE LIST MUST EXIST when you * unlink. Thus a list must have its contents unlinked before it is * destroyed. */void skb_unlink(struct sk_buff *skb){ struct sk_buff_head *list = skb->list; if (list) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); if (skb->list == list) __skb_unlink(skb, skb->list); spin_unlock_irqrestore(&list->lock, flags); }}/** * skb_append - append a buffer * @old: buffer to insert after * @newsk: buffer to insert * * Place a packet after a given packet in a list. The list locks are taken * and this function is atomic with respect to other list locked calls. * A buffer cannot be placed on two lists at the same time. */void skb_append(struct sk_buff *old, struct sk_buff *newsk){ unsigned long flags; spin_lock_irqsave(&old->list->lock, flags); __skb_append(old, newsk); spin_unlock_irqrestore(&old->list->lock, flags);}/** * skb_insert - insert a buffer * @old: buffer to insert before * @newsk: buffer to insert * * Place a packet before a given packet in a list. The list locks are taken * and this function is atomic with respect to other list locked calls * A buffer cannot be placed on two lists at the same time. */void skb_insert(struct sk_buff *old, struct sk_buff *newsk){ unsigned long flags; spin_lock_irqsave(&old->list->lock, flags); __skb_insert(newsk, old->prev, old, old->list); spin_unlock_irqrestore(&old->list->lock, flags);}#if 0/* * Tune the memory allocator for a new MTU size. */void skb_add_mtu(int mtu){ /* Must match allocation in alloc_skb */ mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info); kmem_add_cache_size(mtu);}#endifstatic void inline skb_split_inside_header(struct sk_buff *skb, struct sk_buff* skb1, const u32 len, const int pos){ int i; memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len); /* And move data appendix as is. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; skb_shinfo(skb)->nr_frags = 0; skb1->data_len = skb->data_len; skb1->len += skb1->data_len; skb->data_len = 0; skb->len = len; skb->tail = skb->data + len;}static void inline skb_split_no_header(struct sk_buff *skb, struct sk_buff* skb1, const u32 len, int pos){ int i, k = 0; const int nfrags = skb_shinfo(skb)->nr_frags; skb_shinfo(skb)->nr_frags = 0; skb1->len = skb1->data_len = skb->len - len; skb->len = len; skb->data_len = len - pos; for (i = 0; i < nfrags; i++) { int size = skb_shinfo(skb)->frags[i].size; if (pos + size > len) { skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; if (pos < len) { /* Split frag. * We have to variants in this case: * 1. Move all the frag to the second * part, if it is possible. F.e. * this approach is mandatory for TUX, * where splitting is expensive. * 2. Split is accurately. We make this. */ get_page(skb_shinfo(skb)->frags[i].page); skb_shinfo(skb1)->frags[0].page_offset += len - pos; skb_shinfo(skb1)->frags[0].size -= len - pos; skb_shinfo(skb)->frags[i].size = len - pos; skb_shinfo(skb)->nr_frags++; } k++; } else skb_shinfo(skb)->nr_frags++; pos += size; } skb_shinfo(skb1)->nr_frags = k;}/** * skb_split - Split fragmented skb to two parts at length len. */void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len){ int pos = skb_headlen(skb); if (len < pos) /* Split line is inside header. */ skb_split_inside_header(skb, skb1, len, pos); else /* Second chunk has no header, nothing to copy. */ skb_split_no_header(skb, skb1, len, pos);}void __init skb_init(void){ skbuff_head_cache = kmem_cache_create("skbuff_head_cache", sizeof(struct sk_buff), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (!skbuff_head_cache) panic("cannot create skbuff cache");}EXPORT_SYMBOL(___pskb_trim);EXPORT_SYMBOL(__kfree_skb);EXPORT_SYMBOL(__pskb_pull_tail);EXPORT_SYMBOL(alloc_skb);EXPORT_SYMBOL(pskb_copy);EXPORT_SYMBOL(pskb_expand_head);EXPORT_SYMBOL(skb_checksum);EXPORT_SYMBOL(skb_clone);EXPORT_SYMBOL(skb_clone_fraglist);EXPORT_SYMBOL(skb_copy);EXPORT_SYMBOL(skb_copy_and_csum_bits);EXPORT_SYMBOL(skb_copy_and_csum_dev);EXPORT_SYMBOL(skb_copy_bits);EXPORT_SYMBOL(skb_copy_expand);EXPORT_SYMBOL(skb_over_panic);EXPORT_SYMBOL(skb_pad);EXPORT_SYMBOL(skb_realloc_headroom);EXPORT_SYMBOL(skb_under_panic);EXPORT_SYMBOL(skb_dequeue);EXPORT_SYMBOL(skb_dequeue_tail);EXPORT_SYMBOL(skb_insert);EXPORT_SYMBOL(skb_queue_purge);EXPORT_SYMBOL(skb_queue_head);EXPORT_SYMBOL(skb_queue_tail);EXPORT_SYMBOL(skb_unlink);EXPORT_SYMBOL(skb_append);EXPORT_SYMBOL(skb_split);EXPORT_SYMBOL(skb_iter_first);EXPORT_SYMBOL(skb_iter_next);EXPORT_SYMBOL(skb_iter_abort);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -