⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 skbuff.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
 *	@from: source buffer *	@len: number of bytes to copy * *	Copy the specified number of bytes from the source buffer to the *	destination skb.  This function handles all the messy bits of *	traversing fragment lists and such. */int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len){	int i, copy;	int start = skb_headlen(skb);	if (offset > (int)skb->len - len)		goto fault;	if ((copy = start - offset) > 0) {		if (copy > len)			copy = len;		skb_copy_to_linear_data_offset(skb, offset, from, copy);		if ((len -= copy) == 0)			return 0;		offset += copy;		from += copy;	}	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];		int end;		BUG_TRAP(start <= offset + len);		end = start + frag->size;		if ((copy = end - offset) > 0) {			u8 *vaddr;			if (copy > len)				copy = len;			vaddr = kmap_skb_frag(frag);			memcpy(vaddr + frag->page_offset + offset - start,			       from, copy);			kunmap_skb_frag(vaddr);			if ((len -= copy) == 0)				return 0;			offset += copy;			from += copy;		}		start = end;	}	if (skb_shinfo(skb)->frag_list) {		struct sk_buff *list = skb_shinfo(skb)->frag_list;		for (; list; list = list->next) {			int end;			BUG_TRAP(start <= offset + len);			end = start + list->len;			if ((copy = end - offset) > 0) {				if (copy > len)					copy = len;				if (skb_store_bits(list, offset - start,						   from, copy))					goto fault;				if ((len -= copy) == 0)					return 0;				offset += copy;				from += copy;			}			start = end;		}	}	if (!len)		return 0;fault:	return -EFAULT;}EXPORT_SYMBOL(skb_store_bits);/* Checksum skb data. */__wsum skb_checksum(const struct sk_buff *skb, int offset,			  int len, __wsum csum){	int start = skb_headlen(skb);	int i, copy = start - offset;	int pos = 0;	/* Checksum header. */	if (copy > 0) {		if (copy > len)			copy = len;		csum = csum_partial(skb->data + offset, copy, csum);		if ((len -= copy) == 0)			return csum;		offset += copy;		pos	= copy;	}	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {		int end;		BUG_TRAP(start <= offset + len);		end = start + skb_shinfo(skb)->frags[i].size;		if ((copy = end - offset) > 0) {			__wsum csum2;			u8 *vaddr;			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];			if (copy > len)				copy = len;			vaddr = kmap_skb_frag(frag);			csum2 = csum_partial(vaddr + frag->page_offset +					     offset - start, copy, 0);			kunmap_skb_frag(vaddr);			csum = csum_block_add(csum, csum2, pos);			if (!(len -= copy))				return csum;			offset += copy;			pos    += copy;		}		start = end;	}	if (skb_shinfo(skb)->frag_list) {		struct sk_buff *list = skb_shinfo(skb)->frag_list;		for (; list; list = list->next) {			int end;			BUG_TRAP(start <= offset + len);			end = start + list->len;			if ((copy = end - offset) > 0) {				__wsum csum2;				if (copy > len)					copy = len;				csum2 = skb_checksum(list, offset - start,						     copy, 0);				csum = csum_block_add(csum, csum2, pos);				if ((len -= copy) == 0)					return csum;				offset += copy;				pos    += copy;			}			start = end;		}	}	BUG_ON(len);	return csum;}/* Both of above in one bottle. */__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,				    u8 *to, int len, __wsum csum){	int start = skb_headlen(skb);	int i, copy = start - offset;	int pos = 0;	/* Copy header. */	if (copy > 0) {		if (copy > len)			copy = len;		csum = csum_partial_copy_nocheck(skb->data + offset, to,						 copy, csum);		if ((len -= copy) == 0)			return csum;		offset += copy;		to     += copy;		pos	= copy;	}	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {		int end;		BUG_TRAP(start <= offset + len);		end = start + skb_shinfo(skb)->frags[i].size;		if ((copy = end - offset) > 0) {			__wsum csum2;			u8 *vaddr;			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];			if (copy > len)				copy = len;			vaddr = kmap_skb_frag(frag);			csum2 = csum_partial_copy_nocheck(vaddr +							  frag->page_offset +							  offset - start, to,							  copy, 0);			kunmap_skb_frag(vaddr);			csum = csum_block_add(csum, csum2, pos);			if (!(len -= copy))				return csum;			offset += copy;			to     += copy;			pos    += copy;		}		start = end;	}	if (skb_shinfo(skb)->frag_list) {		struct sk_buff *list = skb_shinfo(skb)->frag_list;		for (; list; list = list->next) {			__wsum csum2;			int end;			BUG_TRAP(start <= offset + len);			end = start + list->len;			if ((copy = end - offset) > 0) {				if (copy > len)					copy = len;				csum2 = skb_copy_and_csum_bits(list,							       offset - start,							       to, copy, 0);				csum = csum_block_add(csum, csum2, pos);				if ((len -= copy) == 0)					return csum;				offset += copy;				to     += copy;				pos    += copy;			}			start = end;		}	}	BUG_ON(len);	return csum;}void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to){	__wsum csum;	long csstart;	if (skb->ip_summed == CHECKSUM_PARTIAL)		csstart = skb->csum_start - skb_headroom(skb);	else		csstart = skb_headlen(skb);	BUG_ON(csstart > skb_headlen(skb));	skb_copy_from_linear_data(skb, to, csstart);	csum = 0;	if (csstart != skb->len)		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,					      skb->len - csstart, 0);	if (skb->ip_summed == CHECKSUM_PARTIAL) {		long csstuff = csstart + skb->csum_offset;		*((__sum16 *)(to + csstuff)) = csum_fold(csum);	}}/** *	skb_dequeue - remove from the head of the queue *	@list: list to dequeue from * *	Remove the head of the list. The list lock is taken so the function *	may be used safely with other locking list functions. The head item is *	returned or %NULL if the list is empty. */struct sk_buff *skb_dequeue(struct sk_buff_head *list){	unsigned long flags;	struct sk_buff *result;	spin_lock_irqsave(&list->lock, flags);	result = __skb_dequeue(list);	spin_unlock_irqrestore(&list->lock, flags);	return result;}/** *	skb_dequeue_tail - remove from the tail of the queue *	@list: list to dequeue from * *	Remove the tail of the list. The list lock is taken so the function *	may be used safely with other locking list functions. The tail item is *	returned or %NULL if the list is empty. */struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list){	unsigned long flags;	struct sk_buff *result;	spin_lock_irqsave(&list->lock, flags);	result = __skb_dequeue_tail(list);	spin_unlock_irqrestore(&list->lock, flags);	return result;}/** *	skb_queue_purge - empty a list *	@list: list to empty * *	Delete all buffers on an &sk_buff list. Each buffer is removed from *	the list and one reference dropped. This function takes the list *	lock and is atomic with respect to other list locking functions. */void skb_queue_purge(struct sk_buff_head *list){	struct sk_buff *skb;	while ((skb = skb_dequeue(list)) != NULL)		kfree_skb(skb);}/** *	skb_queue_head - queue a buffer at the list head *	@list: list to use *	@newsk: buffer to queue * *	Queue a buffer at the start of the list. This function takes the *	list lock and can be used safely with other locking &sk_buff functions *	safely. * *	A buffer cannot be placed on two lists at the same time. */void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk){	unsigned long flags;	spin_lock_irqsave(&list->lock, flags);	__skb_queue_head(list, newsk);	spin_unlock_irqrestore(&list->lock, flags);}/** *	skb_queue_tail - queue a buffer at the list tail *	@list: list to use *	@newsk: buffer to queue * *	Queue a buffer at the tail of the list. This function takes the *	list lock and can be used safely with other locking &sk_buff functions *	safely. * *	A buffer cannot be placed on two lists at the same time. */void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk){	unsigned long flags;	spin_lock_irqsave(&list->lock, flags);	__skb_queue_tail(list, newsk);	spin_unlock_irqrestore(&list->lock, flags);}/** *	skb_unlink	-	remove a buffer from a list *	@skb: buffer to remove *	@list: list to use * *	Remove a packet from a list. The list locks are taken and this *	function is atomic with respect to other list locked calls * *	You must know what list the SKB is on. */void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list){	unsigned long flags;	spin_lock_irqsave(&list->lock, flags);	__skb_unlink(skb, list);	spin_unlock_irqrestore(&list->lock, flags);}/** *	skb_append	-	append a buffer *	@old: buffer to insert after *	@newsk: buffer to insert *	@list: list to use * *	Place a packet after a given packet in a list. The list locks are taken *	and this function is atomic with respect to other list locked calls. *	A buffer cannot be placed on two lists at the same time. */void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list){	unsigned long flags;	spin_lock_irqsave(&list->lock, flags);	__skb_append(old, newsk, list);	spin_unlock_irqrestore(&list->lock, flags);}/** *	skb_insert	-	insert a buffer *	@old: buffer to insert before *	@newsk: buffer to insert *	@list: list to use * *	Place a packet before a given packet in a list. The list locks are * 	taken and this function is atomic with respect to other list locked *	calls. * *	A buffer cannot be placed on two lists at the same time. */void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list){	unsigned long flags;	spin_lock_irqsave(&list->lock, flags);	__skb_insert(newsk, old->prev, old, list);	spin_unlock_irqrestore(&list->lock, flags);}static inline void skb_split_inside_header(struct sk_buff *skb,					   struct sk_buff* skb1,					   const u32 len, const int pos){	int i;	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),					 pos - len);	/* And move data appendix as is. */	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;	skb_shinfo(skb)->nr_frags  = 0;	skb1->data_len		   = skb->data_len;	skb1->len		   += skb1->data_len;	skb->data_len		   = 0;	skb->len		   = len;	skb_set_tail_pointer(skb, len);}static inline void skb_split_no_header(struct sk_buff *skb,				       struct sk_buff* skb1,				       const u32 len, int pos){	int i, k = 0;	const int nfrags = skb_shinfo(skb)->nr_frags;	skb_shinfo(skb)->nr_frags = 0;	skb1->len		  = skb1->data_len = skb->len - len;	skb->len		  = len;	skb->data_len		  = len - pos;	for (i = 0; i < nfrags; i++) {		int size = skb_shinfo(skb)->frags[i].size;		if (pos + size > len) {			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];			if (pos < len) {				/* Split frag.				 * We have two variants in this case:				 * 1. Move all the frag to the second				 *    part, if it is possible. F.e.				 *    this approach is mandatory for TUX,				 *    where splitting is expensive.				 * 2. Split is accurately. We make this.				 */				get_page(skb_shinfo(skb)->frags[i].page);				skb_shinfo(skb1)->frags[0].page_offset += len - pos;				skb_shinfo(skb1)->frags[0].size -= len - pos;				skb_shinfo(skb)->frags[i].size	= len - pos;				skb_shinfo(skb)->nr_frags++;			}			k++;		} else			skb_shinfo(skb)->nr_frags++;		pos += size;	}	skb_shinfo(skb1)->nr_frags = k;}/** * skb_split - Split fragmented skb to two parts at length len. * @skb: the buffer to split * @skb1: the buffer to receive the second part * @len: new length for skb */void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len){	int pos = skb_headlen(skb);	if (len < pos)	/* Split line is inside header. */		skb_split_inside_header(skb, skb1, len, pos);	else		/* Second chunk has no header, nothing to copy. */		skb_split_no_header(skb, skb1, len, pos);}/** * skb_prepare_seq_read - Prepare a sequential read of skb data * @skb: the buffer to read * @from: lower offset of data to be read * @to: upper offset of data to be read * @st: state variable * * Initializes the specified state variable. Must be called before * invoking skb_seq_read() for the first time. */void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,			  unsigned int to, struct skb_seq_state *st){	st->lower_offset = from;	st->upper_offset = to;	st->root_skb = st->cur_skb = skb;	st->frag_idx = st->stepped_offset = 0;	st->frag_data = NULL;}/** * skb_seq_read - Sequentially read skb data * @consumed: number of bytes consumed by the caller so far * @data: destination pointer for data to be returned * @st: state variable * * Reads a block of skb data at &consumed relative to the * lower offset specified to skb_prepare_seq_read(). Assigns * the head of the data block to &data and returns the length * of the block or 0 if the end of the skb data or the upper * offset has been reached. * * The caller is not required to consume all of the data * returned, i.e. &consumed is typically set to the number * of bytes already consumed and the next call to * skb_seq_read() will return the remaining part of the block. * * Note: The size of each block of data returned can be arbitary, *       this limitation is the cost for zerocopy seqeuental *       reads of potentially non linear data. * * Note: Fragment lists within fragments are not implemented *       at the moment, state->root_skb could be replaced with *       a stack for this purpose. */unsigned int skb_seq_read(unsigned int consumed, const u8 **data,			  struct skb_seq_state *st){	unsigned int block_limit, abs_offset = consumed + st->lower_offset;	skb_frag_t *frag;	if (unlikely(abs_offset >= st->upper_offset))		return 0;next_skb:	block_limit = skb_headlen(st->cur_skb);	if (abs_offset < block_limit) {		*data = st->cur_skb->data + abs_offset;		return block_limit - abs_offset;	}	if (st->frag_idx == 0 && !st->frag_data)		st->stepped_offset += skb_headlen(st->cur_skb);	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -