⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 skbuff.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 3 页
字号:
	}	copy_skb_header(n, skb);out:	return n;}/** *	pskb_expand_head - reallocate header of &sk_buff *	@skb: buffer to reallocate *	@nhead: room to add at head *	@ntail: room to add at tail *	@gfp_mask: allocation priority * *	Expands (or creates identical copy, if &nhead and &ntail are zero) *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have *	reference count of 1. Returns zero in the case of success or error, *	if expansion failed. In the last case, &sk_buff is not changed. * *	All the pointers pointing into skb header may change and must be *	reloaded after call to this function. */int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask){	int i;	u8 *data;	int size = nhead + (skb->end - skb->head) + ntail;	long off;	if (skb_shared(skb))		BUG();	size = SKB_DATA_ALIGN(size);	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);	if (!data)		goto nodata;	/* Copy only real data... and, alas, header. This should be	 * optimized for the cases when header is void. */	memcpy(data + nhead, skb->head, skb->tail - skb->head);	memcpy(data + size, skb->end, sizeof(struct skb_shared_info));	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)		get_page(skb_shinfo(skb)->frags[i].page);	if (skb_shinfo(skb)->frag_list)		skb_clone_fraglist(skb);	skb_release_data(skb);	off = (data + nhead) - skb->head;	skb->head     = data;	skb->end      = data + size;	skb->data    += off;	skb->tail    += off;	skb->mac.raw += off;	skb->h.raw   += off;	skb->nh.raw  += off;	skb->cloned   = 0;	atomic_set(&skb_shinfo(skb)->dataref, 1);	return 0;nodata:	return -ENOMEM;}/* Make private copy of skb with writable head and some headroom */struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom){	struct sk_buff *skb2;	int delta = headroom - skb_headroom(skb);	if (delta <= 0)		skb2 = pskb_copy(skb, GFP_ATOMIC);	else {		skb2 = skb_clone(skb, GFP_ATOMIC);		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,					     GFP_ATOMIC)) {			kfree_skb(skb2);			skb2 = NULL;		}	}	return skb2;}/** *	skb_copy_expand	-	copy and expand sk_buff *	@skb: buffer to copy *	@newheadroom: new free bytes at head *	@newtailroom: new free bytes at tail *	@gfp_mask: allocation priority * *	Make a copy of both an &sk_buff and its data and while doing so *	allocate additional space. * *	This is used when the caller wishes to modify the data and needs a *	private copy of the data to alter as well as more space for new fields. *	Returns %NULL on failure or the pointer to the buffer *	on success. The returned buffer has a reference count of 1. * *	You must pass %GFP_ATOMIC as the allocation priority if this function *	is called from an interrupt. * *	BUG ALERT: ip_summed is not copied. Why does this work? Is it used *	only by netfilter in the cases when checksum is recalculated? --ANK */struct sk_buff *skb_copy_expand(const struct sk_buff *skb,				int newheadroom, int newtailroom, int gfp_mask){	/*	 *	Allocate the copy buffer	 */	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,				      gfp_mask);	int head_copy_len, head_copy_off;	if (!n)		return NULL;	skb_reserve(n, newheadroom);	/* Set the tail pointer and length */	skb_put(n, skb->len);	head_copy_len = skb_headroom(skb);	head_copy_off = 0;	if (newheadroom <= head_copy_len)		head_copy_len = newheadroom;	else		head_copy_off = newheadroom - head_copy_len;	/* Copy the linear header and data. */	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,			  skb->len + head_copy_len))		BUG();	copy_skb_header(n, skb);	skb_shinfo(n)->tso_size = skb_shinfo(skb)->tso_size;	skb_shinfo(n)->tso_segs = skb_shinfo(skb)->tso_segs;	return n;}/** *	skb_pad			-	zero pad the tail of an skb *	@skb: buffer to pad *	@pad: space to pad * *	Ensure that a buffer is followed by a padding area that is zero *	filled. Used by network drivers which may DMA or transfer data *	beyond the buffer end onto the wire. * *	May return NULL in out of memory cases. */ struct sk_buff *skb_pad(struct sk_buff *skb, int pad){	struct sk_buff *nskb;		/* If the skbuff is non linear tailroom is always zero.. */	if (skb_tailroom(skb) >= pad) {		memset(skb->data+skb->len, 0, pad);		return skb;	}		nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);	kfree_skb(skb);	if (nskb)		memset(nskb->data+nskb->len, 0, pad);	return nskb;}	 /* Trims skb to length len. It can change skb pointers, if "realloc" is 1. * If realloc==0 and trimming is impossible without change of data, * it is BUG(). */int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc){	int offset = skb_headlen(skb);	int nfrags = skb_shinfo(skb)->nr_frags;	int i;	for (i = 0; i < nfrags; i++) {		int end = offset + skb_shinfo(skb)->frags[i].size;		if (end > len) {			if (skb_cloned(skb)) {				if (!realloc)					BUG();				if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))					return -ENOMEM;			}			if (len <= offset) {				put_page(skb_shinfo(skb)->frags[i].page);				skb_shinfo(skb)->nr_frags--;			} else {				skb_shinfo(skb)->frags[i].size = len - offset;			}		}		offset = end;	}	if (offset < len) {		skb->data_len -= skb->len - len;		skb->len       = len;	} else {		if (len <= skb_headlen(skb)) {			skb->len      = len;			skb->data_len = 0;			skb->tail     = skb->data + len;			if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))				skb_drop_fraglist(skb);		} else {			skb->data_len -= skb->len - len;			skb->len       = len;		}	}	return 0;}/** *	__pskb_pull_tail - advance tail of skb header *	@skb: buffer to reallocate *	@delta: number of bytes to advance tail * *	The function makes a sense only on a fragmented &sk_buff, *	it expands header moving its tail forward and copying necessary *	data from fragmented part. * *	&sk_buff MUST have reference count of 1. * *	Returns %NULL (and &sk_buff does not change) if pull failed *	or value of new tail of skb in the case of success. * *	All the pointers pointing into skb header may change and must be *	reloaded after call to this function. *//* Moves tail of skb head forward, copying data from fragmented part, * when it is necessary. * 1. It may fail due to malloc failure. * 2. It may change skb pointers. * * It is pretty complicated. Luckily, it is called only in exceptional cases. */unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta){	/* If skb has not enough free space at tail, get new one	 * plus 128 bytes for future expansions. If we have enough	 * room at tail, reallocate without expansion only if skb is cloned.	 */	int i, k, eat = (skb->tail + delta) - skb->end;	if (eat > 0 || skb_cloned(skb)) {		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,				     GFP_ATOMIC))			return NULL;	}	if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))		BUG();	/* Optimization: no fragments, no reasons to preestimate	 * size of pulled pages. Superb.	 */	if (!skb_shinfo(skb)->frag_list)		goto pull_pages;	/* Estimate size of pulled pages. */	eat = delta;	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {		if (skb_shinfo(skb)->frags[i].size >= eat)			goto pull_pages;		eat -= skb_shinfo(skb)->frags[i].size;	}	/* If we need update frag list, we are in troubles.	 * Certainly, it possible to add an offset to skb data,	 * but taking into account that pulling is expected to	 * be very rare operation, it is worth to fight against	 * further bloating skb head and crucify ourselves here instead.	 * Pure masohism, indeed. 8)8)	 */	if (eat) {		struct sk_buff *list = skb_shinfo(skb)->frag_list;		struct sk_buff *clone = NULL;		struct sk_buff *insp = NULL;		do {			if (!list)				BUG();			if (list->len <= eat) {				/* Eaten as whole. */				eat -= list->len;				list = list->next;				insp = list;			} else {				/* Eaten partially. */				if (skb_shared(list)) {					/* Sucks! We need to fork list. :-( */					clone = skb_clone(list, GFP_ATOMIC);					if (!clone)						return NULL;					insp = list->next;					list = clone;				} else {					/* This may be pulled without					 * problems. */					insp = list;				}				if (!pskb_pull(list, eat)) {					if (clone)						kfree_skb(clone);					return NULL;				}				break;			}		} while (eat);		/* Free pulled out fragments. */		while ((list = skb_shinfo(skb)->frag_list) != insp) {			skb_shinfo(skb)->frag_list = list->next;			kfree_skb(list);		}		/* And insert new clone at head. */		if (clone) {			clone->next = list;			skb_shinfo(skb)->frag_list = clone;		}	}	/* Success! Now we may commit changes to skb data. */pull_pages:	eat = delta;	k = 0;	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {		if (skb_shinfo(skb)->frags[i].size <= eat) {			put_page(skb_shinfo(skb)->frags[i].page);			eat -= skb_shinfo(skb)->frags[i].size;		} else {			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];			if (eat) {				skb_shinfo(skb)->frags[k].page_offset += eat;				skb_shinfo(skb)->frags[k].size -= eat;				eat = 0;			}			k++;		}	}	skb_shinfo(skb)->nr_frags = k;	skb->tail     += delta;	skb->data_len -= delta;	return skb->tail;}/* Copy some data bits from skb to kernel buffer. */int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len){	int i, copy;	int start = skb_headlen(skb);	if (offset > (int)skb->len - len)		goto fault;	/* Copy header. */	if ((copy = start - offset) > 0) {		if (copy > len)			copy = len;		memcpy(to, skb->data + offset, copy);		if ((len -= copy) == 0)			return 0;		offset += copy;		to     += copy;	}	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {		int end;		BUG_TRAP(start <= offset + len);		end = start + skb_shinfo(skb)->frags[i].size;		if ((copy = end - offset) > 0) {			u8 *vaddr;			if (copy > len)				copy = len;			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);			memcpy(to,			       vaddr + skb_shinfo(skb)->frags[i].page_offset+			       offset - start, copy);			kunmap_skb_frag(vaddr);			if ((len -= copy) == 0)				return 0;			offset += copy;			to     += copy;		}		start = end;	}	if (skb_shinfo(skb)->frag_list) {		struct sk_buff *list = skb_shinfo(skb)->frag_list;		for (; list; list = list->next) {			int end;			BUG_TRAP(start <= offset + len);			end = start + list->len;			if ((copy = end - offset) > 0) {				if (copy > len)					copy = len;				if (skb_copy_bits(list, offset - start,						  to, copy))					goto fault;				if ((len -= copy) == 0)					return 0;				offset += copy;				to     += copy;			}			start = end;		}	}	if (!len)		return 0;fault:	return -EFAULT;}/* Keep iterating until skb_iter_next returns false. */void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i){	i->len = skb_headlen(skb);	i->data = (unsigned char *)skb->data;	i->nextfrag = 0;	i->fraglist = NULL;}int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i){	/* Unmap previous, if not head fragment. */	if (i->nextfrag)		kunmap_skb_frag(i->data);	if (i->fraglist) {	fraglist:		/* We're iterating through fraglist. */		if (i->nextfrag < skb_shinfo(i->fraglist)->nr_frags) {			i->data = kmap_skb_frag(&skb_shinfo(i->fraglist)						->frags[i->nextfrag]);			i->len = skb_shinfo(i->fraglist)->frags[i->nextfrag]				.size;			i->nextfrag++;			return 1;		}		/* Fragments with fragments?  Too hard! */		BUG_ON(skb_shinfo(i->fraglist)->frag_list);		i->fraglist = i->fraglist->next;		if (!i->fraglist)			goto end;		i->len = skb_headlen(i->fraglist);		i->data = i->fraglist->data;		i->nextfrag = 0;		return 1;	}	if (i->nextfrag < skb_shinfo(skb)->nr_frags) {		i->data = kmap_skb_frag(&skb_shinfo(skb)->frags[i->nextfrag]);		i->len = skb_shinfo(skb)->frags[i->nextfrag].size;		i->nextfrag++;		return 1;	}	i->fraglist = skb_shinfo(skb)->frag_list;	if (i->fraglist)		goto fraglist;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -