⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 skbuff.c

📁 《嵌入式系统设计与实例开发实验教材二源码》Linux内核移植与编译实验
💻 C
📖 第 1 页 / 共 2 页
字号:
 *	@nhead: room to add at head *	@ntail: room to add at tail *	@gfp_mask: allocation priority * *	Expands (or creates identical copy, if &nhead and &ntail are zero) *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have *	reference count of 1. Returns zero in the case of success or error, *	if expansion failed. In the last case, &sk_buff is not changed. * *	All the pointers pointing into skb header may change and must be *	reloaded after call to this function. */int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask){	int i;	u8 *data;	int size = nhead + (skb->end - skb->head) + ntail;	long off;	if (skb_shared(skb))		BUG();	size = SKB_DATA_ALIGN(size);	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);	if (data == NULL)		goto nodata;	/* Copy only real data... and, alas, header. This should be	 * optimized for the cases when header is void. */	memcpy(data+nhead, skb->head, skb->tail-skb->head);	memcpy(data+size, skb->end, sizeof(struct skb_shared_info));	for (i=0; i<skb_shinfo(skb)->nr_frags; i++)		get_page(skb_shinfo(skb)->frags[i].page);	if (skb_shinfo(skb)->frag_list)		skb_clone_fraglist(skb);	skb_release_data(skb);	off = (data+nhead) - skb->head;	skb->head = data;	skb->end  = data+size;	skb->data += off;	skb->tail += off;	skb->mac.raw += off;	skb->h.raw += off;	skb->nh.raw += off;	skb->cloned = 0;	atomic_set(&skb_shinfo(skb)->dataref, 1);	return 0;nodata:	return -ENOMEM;}/* Make private copy of skb with writable head and some headroom */struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom){	struct sk_buff *skb2;	int delta = headroom - skb_headroom(skb);	if (delta <= 0)		return pskb_copy(skb, GFP_ATOMIC);	skb2 = skb_clone(skb, GFP_ATOMIC);	if (skb2 == NULL ||	    !pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))		return skb2;	kfree_skb(skb2);	return NULL;}/** *	skb_copy_expand	-	copy and expand sk_buff *	@skb: buffer to copy *	@newheadroom: new free bytes at head *	@newtailroom: new free bytes at tail *	@gfp_mask: allocation priority * *	Make a copy of both an &sk_buff and its data and while doing so  *	allocate additional space. * *	This is used when the caller wishes to modify the data and needs a  *	private copy of the data to alter as well as more space for new fields. *	Returns %NULL on failure or the pointer to the buffer *	on success. The returned buffer has a reference count of 1. * *	You must pass %GFP_ATOMIC as the allocation priority if this function *	is called from an interrupt. */ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,				int newheadroom,				int newtailroom,				int gfp_mask){	struct sk_buff *n;	/*	 *	Allocate the copy buffer	 */ 	 	n=alloc_skb(newheadroom + skb->len + newtailroom,		    gfp_mask);	if(n==NULL)		return NULL;	skb_reserve(n,newheadroom);	/* Set the tail pointer and length */	skb_put(n,skb->len);	/* Copy the data only. */	if (skb_copy_bits(skb, 0, n->data, skb->len))		BUG();	copy_skb_header(n, skb);	return n;}/* Trims skb to length len. It can change skb pointers, if "realloc" is 1. * If realloc==0 and trimming is impossible without change of data, * it is BUG(). */int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc){	int offset = skb_headlen(skb);	int nfrags = skb_shinfo(skb)->nr_frags;	int i;	for (i=0; i<nfrags; i++) {		int end = offset + skb_shinfo(skb)->frags[i].size;		if (end > len) {			if (skb_cloned(skb)) {				if (!realloc)					BUG();				if (!pskb_expand_head(skb, 0, 0, GFP_ATOMIC))					return -ENOMEM;			}			if (len <= offset) {				put_page(skb_shinfo(skb)->frags[i].page);				skb_shinfo(skb)->nr_frags--;			} else {				skb_shinfo(skb)->frags[i].size = len-offset;			}		}		offset = end;	}	if (offset < len) {		skb->data_len -= skb->len - len;		skb->len = len;	} else {		if (len <= skb_headlen(skb)) {			skb->len = len;			skb->data_len = 0;			skb->tail = skb->data + len;			if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))				skb_drop_fraglist(skb);		} else {			skb->data_len -= skb->len - len;			skb->len = len;		}	}	return 0;}/** *	__pskb_pull_tail - advance tail of skb header  *	@skb: buffer to reallocate *	@delta: number of bytes to advance tail * *	The function makes a sense only on a fragmented &sk_buff, *	it expands header moving its tail forward and copying necessary *	data from fragmented part. * *	&sk_buff MUST have reference count of 1. * *	Returns %NULL (and &sk_buff does not change) if pull failed *	or value of new tail of skb in the case of success. * *	All the pointers pointing into skb header may change and must be *	reloaded after call to this function. *//* Moves tail of skb head forward, copying data from fragmented part, * when it is necessary. * 1. It may fail due to malloc failure. * 2. It may change skb pointers. * * It is pretty complicated. Luckily, it is called only in exceptional cases. */unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta){	int i, k, eat;	/* If skb has not enough free space at tail, get new one	 * plus 128 bytes for future expansions. If we have enough	 * room at tail, reallocate without expansion only if skb is cloned.	 */	eat = (skb->tail+delta) - skb->end;	if (eat > 0 || skb_cloned(skb)) {		if (pskb_expand_head(skb, 0, eat>0 ? eat+128 : 0, GFP_ATOMIC))			return NULL;	}	if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))		BUG();	/* Optimization: no fragments, no reasons to preestimate	 * size of pulled pages. Superb.	 */	if (skb_shinfo(skb)->frag_list == NULL)		goto pull_pages;	/* Estimate size of pulled pages. */	eat = delta;	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {		if (skb_shinfo(skb)->frags[i].size >= eat)			goto pull_pages;		eat -= skb_shinfo(skb)->frags[i].size;	}	/* If we need update frag list, we are in troubles.	 * Certainly, it possible to add an offset to skb data,	 * but taking into account that pulling is expected to	 * be very rare operation, it is worth to fight against	 * further bloating skb head and crucify ourselves here instead.	 * Pure masohism, indeed. 8)8)	 */	if (eat) {		struct sk_buff *list = skb_shinfo(skb)->frag_list;		struct sk_buff *clone = NULL;		struct sk_buff *insp = NULL;		do {			if (list == NULL)				BUG();			if (list->len <= eat) {				/* Eaten as whole. */				eat -= list->len;				list = list->next;				insp = list;			} else {				/* Eaten partially. */				if (skb_shared(list)) {					/* Sucks! We need to fork list. :-( */					clone = skb_clone(list, GFP_ATOMIC);					if (clone == NULL)						return NULL;					insp = list->next;					list = clone;				} else {					/* This may be pulled without					 * problems. */					insp = list;				}				if (pskb_pull(list, eat) == NULL) {					if (clone)						kfree_skb(clone);					return NULL;				}				break;			}		} while (eat);		/* Free pulled out fragments. */		while ((list = skb_shinfo(skb)->frag_list) != insp) {			skb_shinfo(skb)->frag_list = list->next;			kfree_skb(list);		}		/* And insert new clone at head. */		if (clone) {			clone->next = list;			skb_shinfo(skb)->frag_list = clone;		}	}	/* Success! Now we may commit changes to skb data. */pull_pages:	eat = delta;	k = 0;	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {		if (skb_shinfo(skb)->frags[i].size <= eat) {			put_page(skb_shinfo(skb)->frags[i].page);			eat -= skb_shinfo(skb)->frags[i].size;		} else {			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];			if (eat) {				skb_shinfo(skb)->frags[k].page_offset += eat;				skb_shinfo(skb)->frags[k].size -= eat;				eat = 0;			}			k++;		}	}	skb_shinfo(skb)->nr_frags = k;	skb->tail += delta;	skb->data_len -= delta;	return skb->tail;}/* Copy some data bits from skb to kernel buffer. */int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len){	int i, copy;	int start = skb->len - skb->data_len;	if (offset > (int)skb->len-len)		goto fault;	/* Copy header. */	if ((copy = start-offset) > 0) {		if (copy > len)			copy = len;		memcpy(to, skb->data + offset, copy);		if ((len -= copy) == 0)			return 0;		offset += copy;		to += copy;	}	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {		int end;		BUG_TRAP(start <= offset+len);		end = start + skb_shinfo(skb)->frags[i].size;		if ((copy = end-offset) > 0) {			u8 *vaddr;			if (copy > len)				copy = len;			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);			memcpy(to, vaddr+skb_shinfo(skb)->frags[i].page_offset+			       offset-start, copy);			kunmap_skb_frag(vaddr);			if ((len -= copy) == 0)				return 0;			offset += copy;			to += copy;		}		start = end;	}	if (skb_shinfo(skb)->frag_list) {		struct sk_buff *list;		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {			int end;			BUG_TRAP(start <= offset+len);			end = start + list->len;			if ((copy = end-offset) > 0) {				if (copy > len)					copy = len;				if (skb_copy_bits(list, offset-start, to, copy))					goto fault;				if ((len -= copy) == 0)					return 0;				offset += copy;				to += copy;			}			start = end;		}	}	if (len == 0)		return 0;fault:	return -EFAULT;}/* Checksum skb data. */unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum){	int i, copy;	int start = skb->len - skb->data_len;	int pos = 0;	/* Checksum header. */	if ((copy = start-offset) > 0) {		if (copy > len)			copy = len;		csum = csum_partial(skb->data+offset, copy, csum);		if ((len -= copy) == 0)			return csum;		offset += copy;		pos = copy;	}	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {		int end;		BUG_TRAP(start <= offset+len);		end = start + skb_shinfo(skb)->frags[i].size;		if ((copy = end-offset) > 0) {			unsigned int csum2;			u8 *vaddr;			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];			if (copy > len)				copy = len;			vaddr = kmap_skb_frag(frag);			csum2 = csum_partial(vaddr + frag->page_offset +					     offset-start, copy, 0);			kunmap_skb_frag(vaddr);			csum = csum_block_add(csum, csum2, pos);			if (!(len -= copy))				return csum;			offset += copy;			pos += copy;		}		start = end;	}	if (skb_shinfo(skb)->frag_list) {		struct sk_buff *list;		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {			int end;			BUG_TRAP(start <= offset+len);			end = start + list->len;			if ((copy = end-offset) > 0) {				unsigned int csum2;				if (copy > len)					copy = len;				csum2 = skb_checksum(list, offset-start, copy, 0);				csum = csum_block_add(csum, csum2, pos);				if ((len -= copy) == 0)					return csum;				offset += copy;				pos += copy;			}			start = end;		}	}	if (len == 0)		return csum;	BUG();	return csum;}/* Both of above in one bottle. */unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum){	int i, copy;	int start = skb->len - skb->data_len;	int pos = 0;	/* Copy header. */	if ((copy = start-offset) > 0) {		if (copy > len)			copy = len;		csum = csum_partial_copy_nocheck(skb->data+offset, to, copy, csum);		if ((len -= copy) == 0)			return csum;		offset += copy;		to += copy;		pos = copy;	}	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {		int end;		BUG_TRAP(start <= offset+len);		end = start + skb_shinfo(skb)->frags[i].size;		if ((copy = end-offset) > 0) {			unsigned int csum2;			u8 *vaddr;			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];			if (copy > len)				copy = len;			vaddr = kmap_skb_frag(frag);			csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset +						      offset-start, to, copy, 0);			kunmap_skb_frag(vaddr);			csum = csum_block_add(csum, csum2, pos);			if (!(len -= copy))				return csum;			offset += copy;			to += copy;			pos += copy;		}		start = end;	}	if (skb_shinfo(skb)->frag_list) {		struct sk_buff *list;		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {			unsigned int csum2;			int end;			BUG_TRAP(start <= offset+len);			end = start + list->len;			if ((copy = end-offset) > 0) {				if (copy > len)					copy = len;				csum2 = skb_copy_and_csum_bits(list, offset-start, to, copy, 0);				csum = csum_block_add(csum, csum2, pos);				if ((len -= copy) == 0)					return csum;				offset += copy;				to += copy;				pos += copy;			}			start = end;		}	}	if (len == 0)		return csum;	BUG();	return csum;}void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to){	unsigned int csum;	long csstart;	if (skb->ip_summed == CHECKSUM_HW)		csstart = skb->h.raw - skb->data;	else		csstart = skb->len - skb->data_len;	if (csstart > skb->len - skb->data_len)		BUG();	memcpy(to, skb->data, csstart);	csum = 0;	if (csstart != skb->len)		csum = skb_copy_and_csum_bits(skb, csstart, to+csstart,				skb->len-csstart, 0);	if (skb->ip_summed == CHECKSUM_HW) {		long csstuff = csstart + skb->csum;		*((unsigned short *)(to + csstuff)) = csum_fold(csum);	}}#if 0/*  * 	Tune the memory allocator for a new MTU size. */void skb_add_mtu(int mtu){	/* Must match allocation in alloc_skb */	mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);	kmem_add_cache_size(mtu);}#endifvoid __init skb_init(void){	int i;	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",					      sizeof(struct sk_buff),					      0,					      SLAB_HWCACHE_ALIGN,					      skb_headerinit, NULL);	if (!skbuff_head_cache)		panic("cannot create skbuff cache");	for (i=0; i<NR_CPUS; i++)		skb_queue_head_init(&skb_head_pool[i].list);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -