⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 skbuff.h

📁 Axis 221 camera embedded programing interface
💻 H
📖 第 1 页 / 共 3 页
字号:
}/** *	skb_share_check - check if buffer is shared and if so clone it *	@skb: buffer to check *	@pri: priority for memory allocation * *	If the buffer is shared the buffer is cloned and the old copy *	drops a reference. A new clone with a single reference is returned. *	If the buffer is not shared the original buffer is returned. When *	being called from interrupt status or with spinlocks held pri must *	be GFP_ATOMIC. * *	NULL is returned on a memory allocation failure. */static inline struct sk_buff *skb_share_check(struct sk_buff *skb,					      gfp_t pri){	might_sleep_if(pri & __GFP_WAIT);	if (skb_shared(skb)) {		struct sk_buff *nskb = skb_clone(skb, pri);		kfree_skb(skb);		skb = nskb;	}	return skb;}/* *	Copy shared buffers into a new sk_buff. We effectively do COW on *	packets to handle cases where we have a local reader and forward *	and a couple of other messy ones. The normal one is tcpdumping *	a packet thats being forwarded. *//** *	skb_unshare - make a copy of a shared buffer *	@skb: buffer to check *	@pri: priority for memory allocation * *	If the socket buffer is a clone then this function creates a new *	copy of the data, drops a reference count on the old copy and returns *	the new copy with the reference count at 1. If the buffer is not a clone *	the original buffer is returned. When called with a spinlock held or *	from interrupt state @pri must be %GFP_ATOMIC * *	%NULL is returned on a memory allocation failure. */static inline struct sk_buff *skb_unshare(struct sk_buff *skb,					  gfp_t pri){	might_sleep_if(pri & __GFP_WAIT);	if (skb_cloned(skb)) {		struct sk_buff *nskb = skb_copy(skb, pri);		kfree_skb(skb);	/* Free our shared copy */		skb = nskb;	}	return skb;}/** *	skb_peek *	@list_: list to peek at * *	Peek an &sk_buff. Unlike most other operations you _MUST_ *	be careful with this one. A peek leaves the buffer on the *	list and someone else may run off with it. You must hold *	the appropriate locks or have a private queue to do this. * *	Returns %NULL for an empty list or a pointer to the head element. *	The reference count is not incremented and the reference is therefore *	volatile. Use with caution. */static inline struct sk_buff *skb_peek(struct sk_buff_head *list_){	struct sk_buff *list = ((struct sk_buff *)list_)->next;	if (list == (struct sk_buff *)list_)		list = NULL;	return list;}/** *	skb_peek_tail *	@list_: list to peek at * *	Peek an &sk_buff. Unlike most other operations you _MUST_ *	be careful with this one. A peek leaves the buffer on the *	list and someone else may run off with it. You must hold *	the appropriate locks or have a private queue to do this. * *	Returns %NULL for an empty list or a pointer to the tail element. *	The reference count is not incremented and the reference is therefore *	volatile. Use with caution. */static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_){	struct sk_buff *list = ((struct sk_buff *)list_)->prev;	if (list == (struct sk_buff *)list_)		list = NULL;	return list;}/** *	skb_queue_len	- get queue length *	@list_: list to measure * *	Return the length of an &sk_buff queue. */static inline __u32 skb_queue_len(const struct sk_buff_head *list_){	return list_->qlen;}/* * This function creates a split out lock class for each invocation; * this is needed for now since a whole lot of users of the skb-queue * infrastructure in drivers have different locking usage (in hardirq) * than the networking core (in softirq only). In the long run either the * network layer or drivers should need annotation to consolidate the * main types of usage into 3 classes. */static inline void skb_queue_head_init(struct sk_buff_head *list){	spin_lock_init(&list->lock);	list->prev = list->next = (struct sk_buff *)list;	list->qlen = 0;}/* *	Insert an sk_buff at the start of a list. * *	The "__skb_xxxx()" functions are the non-atomic ones that *	can only be called with interrupts disabled. *//** *	__skb_queue_after - queue a buffer at the list head *	@list: list to use *	@prev: place after this buffer *	@newsk: buffer to queue * *	Queue a buffer int the middle of a list. This function takes no locks *	and you must therefore hold required locks before calling it. * *	A buffer cannot be placed on two lists at the same time. */static inline void __skb_queue_after(struct sk_buff_head *list,				     struct sk_buff *prev,				     struct sk_buff *newsk){	struct sk_buff *next;	list->qlen++;	next = prev->next;	newsk->next = next;	newsk->prev = prev;	next->prev  = prev->next = newsk;}/** *	__skb_queue_head - queue a buffer at the list head *	@list: list to use *	@newsk: buffer to queue * *	Queue a buffer at the start of a list. This function takes no locks *	and you must therefore hold required locks before calling it. * *	A buffer cannot be placed on two lists at the same time. */extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);static inline void __skb_queue_head(struct sk_buff_head *list,				    struct sk_buff *newsk){	__skb_queue_after(list, (struct sk_buff *)list, newsk);}/** *	__skb_queue_tail - queue a buffer at the list tail *	@list: list to use *	@newsk: buffer to queue * *	Queue a buffer at the end of a list. This function takes no locks *	and you must therefore hold required locks before calling it. * *	A buffer cannot be placed on two lists at the same time. */extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);static inline void __skb_queue_tail(struct sk_buff_head *list,				   struct sk_buff *newsk){	struct sk_buff *prev, *next;	list->qlen++;	next = (struct sk_buff *)list;	prev = next->prev;	newsk->next = next;	newsk->prev = prev;	next->prev  = prev->next = newsk;}/** *	__skb_dequeue - remove from the head of the queue *	@list: list to dequeue from * *	Remove the head of the list. This function does not take any locks *	so must be used with appropriate locks held only. The head item is *	returned or %NULL if the list is empty. */extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list){	struct sk_buff *next, *prev, *result;	prev = (struct sk_buff *) list;	next = prev->next;	result = NULL;	if (next != prev) {		result	     = next;		next	     = next->next;		list->qlen--;		next->prev   = prev;		prev->next   = next;		result->next = result->prev = NULL;	}	return result;}/* *	Insert a packet on a list. */extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);static inline void __skb_insert(struct sk_buff *newsk,				struct sk_buff *prev, struct sk_buff *next,				struct sk_buff_head *list){	newsk->next = next;	newsk->prev = prev;	next->prev  = prev->next = newsk;	list->qlen++;}/* *	Place a packet after a given packet in a list. */extern void	   skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list){	__skb_insert(newsk, old, old->next, list);}/* * remove sk_buff from list. _Must_ be called atomically, and with * the list known.. */extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list){	struct sk_buff *next, *prev;	list->qlen--;	next	   = skb->next;	prev	   = skb->prev;	skb->next  = skb->prev = NULL;	next->prev = prev;	prev->next = next;}/* XXX: more streamlined implementation *//** *	__skb_dequeue_tail - remove from the tail of the queue *	@list: list to dequeue from * *	Remove the tail of the list. This function does not take any locks *	so must be used with appropriate locks held only. The tail item is *	returned or %NULL if the list is empty. */extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list){	struct sk_buff *skb = skb_peek_tail(list);	if (skb)		__skb_unlink(skb, list);	return skb;}static inline int skb_is_nonlinear(const struct sk_buff *skb){	return skb->data_len;}static inline unsigned int skb_headlen(const struct sk_buff *skb){	return skb->len - skb->data_len;}static inline int skb_pagelen(const struct sk_buff *skb){	int i, len = 0;	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)		len += skb_shinfo(skb)->frags[i].size;	return len + skb_headlen(skb);}static inline void skb_fill_page_desc(struct sk_buff *skb, int i,				      struct page *page, int off, int size){	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];	frag->page		  = page;	frag->page_offset	  = off;	frag->size		  = size;	skb_shinfo(skb)->nr_frags = i + 1;}#define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)#define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->frag_list)#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))/* *	Add data to an sk_buff */static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len){	unsigned char *tmp = skb->tail;	SKB_LINEAR_ASSERT(skb);	skb->tail += len;	skb->len  += len;	return tmp;}/** *	skb_put - add data to a buffer *	@skb: buffer to use *	@len: amount of data to add * *	This function extends the used data area of the buffer. If this would *	exceed the total buffer size the kernel will panic. A pointer to the *	first byte of the extra data is returned. */static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len){	unsigned char *tmp = skb->tail;	SKB_LINEAR_ASSERT(skb);	skb->tail += len;	skb->len  += len;	if (unlikely(skb->tail>skb->end))		skb_over_panic(skb, len, current_text_addr());	return tmp;}static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len){	skb->data -= len;	skb->len  += len;	return skb->data;}/** *	skb_push - add data to the start of a buffer *	@skb: buffer to use *	@len: amount of data to add * *	This function extends the used data area of the buffer at the buffer *	start. If this would exceed the total buffer headroom the kernel will *	panic. A pointer to the first byte of the extra data is returned. */static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len){	skb->data -= len;	skb->len  += len;	if (unlikely(skb->data<skb->head))		skb_under_panic(skb, len, current_text_addr());	return skb->data;}static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len){	skb->len -= len;	BUG_ON(skb->len < skb->data_len);	return skb->data += len;}/** *	skb_pull - remove data from the start of a buffer *	@skb: buffer to use *	@len: amount of data to remove * *	This function removes data from the start of a buffer, returning *	the memory to the headroom. A pointer to the next data in the buffer *	is returned. Once the data has been pulled future pushes will overwrite *	the old data. */static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len){	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);}extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len){	if (len > skb_headlen(skb) &&	    !__pskb_pull_tail(skb, len-skb_headlen(skb)))		return NULL;	skb->len -= len;	return skb->data += len;}static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len){	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);}static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len){	if (likely(len <= skb_headlen(skb)))		return 1;	if (unlikely(len > skb->len))		return 0;	return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;}/** *	skb_headroom - bytes at buffer head *	@skb: buffer to check * *	Return the number of bytes of free space at the head of an &sk_buff. */static inline int skb_headroom(const struct sk_buff *skb){	return skb->data - skb->head;}/** *	skb_tailroom - bytes at buffer end *	@skb: buffer to check * *	Return the number of bytes of free space at the tail of an sk_buff */static inline int skb_tailroom(const struct sk_buff *skb){	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;}/** *	skb_reserve - adjust headroom *	@skb: buffer to alter *	@len: bytes to move * *	Increase the headroom of an empty &sk_buff by reducing the tail *	room. This is only allowed for an empty buffer. */static inline void skb_reserve(struct sk_buff *skb, int len){	skb->data += len;	skb->tail += len;}/* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the * hardware handles it or large if we have to take an exception and fix it * in software. * * Since an ethernet header is 14 bytes network drivers often end up with * the IP header at an unaligned offset. The IP header can be aligned by * shifting the start of the packet by 2 bytes. Drivers should do this * with: * * skb_reserve(NET_IP_ALIGN); * * The downside to this alignment of the IP header is that the DMA is now * unaligned. On some architectures the cost of an unaligned DMA is high * and this cost outweighs the gains made by aligning the IP header. *  * Since this trade off varies between architectures, we allow NET_IP_ALIGN * to be overridden. */#ifndef NET_IP_ALIGN#define NET_IP_ALIGN	2#endif/* * The networking layer reserves some headroom in skb data (via * dev_alloc_skb). This is used to avoid having to reallocate skb data when * the header has to grow. In the default case, if the header has to grow * 16 bytes or less we avoid the reallocation. * * Unfortunately this headroom changes the DMA alignment of the resulting * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive * on some architectures. An architecture can override this value,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -