skbuff.h

来自「linux 内核源代码」· C头文件 代码 · 共 1,809 行 · 第 1/4 页

H
1,809
字号
 *	first byte of the extra data is returned. */static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len){	unsigned char *tmp = skb_tail_pointer(skb);	SKB_LINEAR_ASSERT(skb);	skb->tail += len;	skb->len  += len;	if (unlikely(skb->tail > skb->end))		skb_over_panic(skb, len, current_text_addr());	return tmp;}static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len){	skb->data -= len;	skb->len  += len;	return skb->data;}/** *	skb_push - add data to the start of a buffer *	@skb: buffer to use *	@len: amount of data to add * *	This function extends the used data area of the buffer at the buffer *	start. If this would exceed the total buffer headroom the kernel will *	panic. A pointer to the first byte of the extra data is returned. */static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len){	skb->data -= len;	skb->len  += len;	if (unlikely(skb->data<skb->head))		skb_under_panic(skb, len, current_text_addr());	return skb->data;}static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len){	skb->len -= len;	BUG_ON(skb->len < skb->data_len);	return skb->data += len;}/** *	skb_pull - remove data from the start of a buffer *	@skb: buffer to use *	@len: amount of data to remove * *	This function removes data from the start of a buffer, returning *	the memory to the headroom. A pointer to the next data in the buffer *	is returned. Once the data has been pulled future pushes will overwrite *	the old data. */static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len){	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);}extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len){	if (len > skb_headlen(skb) &&	    !__pskb_pull_tail(skb, len-skb_headlen(skb)))		return NULL;	skb->len -= len;	return skb->data += len;}static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len){	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);}static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len){	if (likely(len <= skb_headlen(skb)))		return 1;	if (unlikely(len > skb->len))		return 0;	return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;}/** *	skb_headroom - bytes at buffer head *	@skb: buffer to check * *	Return the number of bytes of free space at the head of an &sk_buff. */static inline unsigned int skb_headroom(const struct sk_buff *skb){	return skb->data - skb->head;}/** *	skb_tailroom - bytes at buffer end *	@skb: buffer to check * *	Return the number of bytes of free space at the tail of an sk_buff */static inline int skb_tailroom(const struct sk_buff *skb){	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;}/** *	skb_reserve - adjust headroom *	@skb: buffer to alter *	@len: bytes to move * *	Increase the headroom of an empty &sk_buff by reducing the tail *	room. This is only allowed for an empty buffer. */static inline void skb_reserve(struct sk_buff *skb, int len){	skb->data += len;	skb->tail += len;}#ifdef NET_SKBUFF_DATA_USES_OFFSETstatic inline unsigned char *skb_transport_header(const struct sk_buff *skb){	return skb->head + skb->transport_header;}static inline void skb_reset_transport_header(struct sk_buff *skb){	skb->transport_header = skb->data - skb->head;}static inline void skb_set_transport_header(struct sk_buff *skb,					    const int offset){	skb_reset_transport_header(skb);	skb->transport_header += offset;}static inline unsigned char *skb_network_header(const struct sk_buff *skb){	return skb->head + skb->network_header;}static inline void skb_reset_network_header(struct sk_buff *skb){	skb->network_header = skb->data - skb->head;}static inline void skb_set_network_header(struct sk_buff *skb, const int offset){	skb_reset_network_header(skb);	skb->network_header += offset;}static inline unsigned char *skb_mac_header(const struct sk_buff *skb){	return skb->head + skb->mac_header;}static inline int skb_mac_header_was_set(const struct sk_buff *skb){	return skb->mac_header != ~0U;}static inline void skb_reset_mac_header(struct sk_buff *skb){	skb->mac_header = skb->data - skb->head;}static inline void skb_set_mac_header(struct sk_buff *skb, const int offset){	skb_reset_mac_header(skb);	skb->mac_header += offset;}#else /* NET_SKBUFF_DATA_USES_OFFSET */static inline unsigned char *skb_transport_header(const struct sk_buff *skb){	return skb->transport_header;}static inline void skb_reset_transport_header(struct sk_buff *skb){	skb->transport_header = skb->data;}static inline void skb_set_transport_header(struct sk_buff *skb,					    const int offset){	skb->transport_header = skb->data + offset;}static inline unsigned char *skb_network_header(const struct sk_buff *skb){	return skb->network_header;}static inline void skb_reset_network_header(struct sk_buff *skb){	skb->network_header = skb->data;}static inline void skb_set_network_header(struct sk_buff *skb, const int offset){	skb->network_header = skb->data + offset;}static inline unsigned char *skb_mac_header(const struct sk_buff *skb){	return skb->mac_header;}static inline int skb_mac_header_was_set(const struct sk_buff *skb){	return skb->mac_header != NULL;}static inline void skb_reset_mac_header(struct sk_buff *skb){	skb->mac_header = skb->data;}static inline void skb_set_mac_header(struct sk_buff *skb, const int offset){	skb->mac_header = skb->data + offset;}#endif /* NET_SKBUFF_DATA_USES_OFFSET */static inline int skb_transport_offset(const struct sk_buff *skb){	return skb_transport_header(skb) - skb->data;}static inline u32 skb_network_header_len(const struct sk_buff *skb){	return skb->transport_header - skb->network_header;}static inline int skb_network_offset(const struct sk_buff *skb){	return skb_network_header(skb) - skb->data;}/* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the * hardware handles it or large if we have to take an exception and fix it * in software. * * Since an ethernet header is 14 bytes network drivers often end up with * the IP header at an unaligned offset. The IP header can be aligned by * shifting the start of the packet by 2 bytes. Drivers should do this * with: * * skb_reserve(NET_IP_ALIGN); * * The downside to this alignment of the IP header is that the DMA is now * unaligned. On some architectures the cost of an unaligned DMA is high * and this cost outweighs the gains made by aligning the IP header. *  * Since this trade off varies between architectures, we allow NET_IP_ALIGN * to be overridden. */#ifndef NET_IP_ALIGN#define NET_IP_ALIGN	2#endif/* * The networking layer reserves some headroom in skb data (via * dev_alloc_skb). This is used to avoid having to reallocate skb data when * the header has to grow. In the default case, if the header has to grow * 16 bytes or less we avoid the reallocation. * * Unfortunately this headroom changes the DMA alignment of the resulting * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive * on some architectures. An architecture can override this value, * perhaps setting it to a cacheline in size (since that will maintain * cacheline alignment of the DMA). It must be a power of 2. * * Various parts of the networking layer expect at least 16 bytes of * headroom, you should not reduce this. */#ifndef NET_SKB_PAD#define NET_SKB_PAD	16#endifextern int ___pskb_trim(struct sk_buff *skb, unsigned int len);static inline void __skb_trim(struct sk_buff *skb, unsigned int len){	if (unlikely(skb->data_len)) {		WARN_ON(1);		return;	}	skb->len = len;	skb_set_tail_pointer(skb, len);}/** *	skb_trim - remove end from a buffer *	@skb: buffer to alter *	@len: new length * *	Cut the length of a buffer down by removing data from the tail. If *	the buffer is already under the length specified it is not modified. *	The skb must be linear. */static inline void skb_trim(struct sk_buff *skb, unsigned int len){	if (skb->len > len)		__skb_trim(skb, len);}static inline int __pskb_trim(struct sk_buff *skb, unsigned int len){	if (skb->data_len)		return ___pskb_trim(skb, len);	__skb_trim(skb, len);	return 0;}static inline int pskb_trim(struct sk_buff *skb, unsigned int len){	return (len < skb->len) ? __pskb_trim(skb, len) : 0;}/** *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer *	@skb: buffer to alter *	@len: new length * *	This is identical to pskb_trim except that the caller knows that *	the skb is not cloned so we should never get an error due to out- *	of-memory. */static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len){	int err = pskb_trim(skb, len);	BUG_ON(err);}/** *	skb_orphan - orphan a buffer *	@skb: buffer to orphan * *	If a buffer currently has an owner then we call the owner's *	destructor function and make the @skb unowned. The buffer continues *	to exist but is no longer charged to its former owner. */static inline void skb_orphan(struct sk_buff *skb){	if (skb->destructor)		skb->destructor(skb);	skb->destructor = NULL;	skb->sk		= NULL;}/** *	__skb_queue_purge - empty a list *	@list: list to empty * *	Delete all buffers on an &sk_buff list. Each buffer is removed from *	the list and one reference dropped. This function does not take the *	list lock and the caller must hold the relevant locks to use it. */extern void skb_queue_purge(struct sk_buff_head *list);static inline void __skb_queue_purge(struct sk_buff_head *list){	struct sk_buff *skb;	while ((skb = __skb_dequeue(list)) != NULL)		kfree_skb(skb);}/** *	__dev_alloc_skb - allocate an skbuff for receiving *	@length: length to allocate *	@gfp_mask: get_free_pages mask, passed to alloc_skb * *	Allocate a new &sk_buff and assign it a usage count of one. The *	buffer has unspecified headroom built in. Users should allocate *	the headroom they think they need without accounting for the *	built in space. The built in space is used for optimisations. * *	%NULL is returned if there is no free memory. */static inline struct sk_buff *__dev_alloc_skb(unsigned int length,					      gfp_t gfp_mask){	struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);	if (likely(skb))		skb_reserve(skb, NET_SKB_PAD);	return skb;}/** *	dev_alloc_skb - allocate an skbuff for receiving *	@length: length to allocate * *	Allocate a new &sk_buff and assign it a usage count of one. The *	buffer has unspecified headroom built in. Users should allocate *	the headroom they think they need without accounting for the *	built in space. The built in space is used for optimisations. * *	%NULL is returned if there is no free memory. Although this function *	allocates memory it can be called from an interrupt. */static inline struct sk_buff *dev_alloc_skb(unsigned int length){	return __dev_alloc_skb(length, GFP_ATOMIC);}extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,		unsigned int length, gfp_t gfp_mask);/** *	netdev_alloc_skb - allocate an skbuff for rx on a specific device *	@dev: network device to receive on *	@length: length to allocate * *	Allocate a new &sk_buff and assign it a usage count of one. The *	buffer has unspecified headroom built in. Users should allocate *	the headroom they think they need without accounting for the *	built in space. The built in space is used for optimisations. * *	%NULL is returned if there is no free memory. Although this function *	allocates memory it can be called from an interrupt. */static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,		unsigned int length){	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);}/** *	skb_clone_writable - is the header of a clone writable *	@skb: buffer to check *	@len: length up to which to write * *	Returns true if modifying the header part of the cloned buffer *	does not requires the data to be copied. */static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len){	return !skb_header_cloned(skb) &&	       skb_headroom(skb) + len <= skb->hdr_len;}static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,			    int cloned){

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?