⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 skbuff.h

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 H
📖 第 1 页 / 共 2 页
字号:
/** *	__skb_dequeue - remove from the head of the queue *	@list: list to dequeue from * *	Remove the head of the list. This function does not take any locks *	so must be used with appropriate locks held only. The head item is *	returned or %NULL if the list is empty. */static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list){	struct sk_buff *next, *prev, *result;	prev = (struct sk_buff *) list;	next = prev->next;	result = NULL;	if (next != prev) {		result = next;		next = next->next;		list->qlen--;		next->prev = prev;		prev->next = next;		result->next = NULL;		result->prev = NULL;		result->list = NULL;	}	return result;}/** *	skb_dequeue - remove from the head of the queue *	@list: list to dequeue from * *	Remove the head of the list. The list lock is taken so the function *	may be used safely with other locking list functions. The head item is *	returned or %NULL if the list is empty. */static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list){	long flags;	struct sk_buff *result;	spin_lock_irqsave(&list->lock, flags);	result = __skb_dequeue(list);	spin_unlock_irqrestore(&list->lock, flags);	return result;}/* *	Insert a packet on a list. */static inline void __skb_insert(struct sk_buff *newsk,	struct sk_buff * prev, struct sk_buff *next,	struct sk_buff_head * list){	newsk->next = next;	newsk->prev = prev;	next->prev = newsk;	prev->next = newsk;	newsk->list = list;	list->qlen++;}/** *	skb_insert	-	insert a buffer *	@old: buffer to insert before *	@newsk: buffer to insert * *	Place a packet before a given packet in a list. The list locks are taken *	and this function is atomic with respect to other list locked calls *	A buffer cannot be placed on two lists at the same time. */static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk){	unsigned long flags;	spin_lock_irqsave(&old->list->lock, flags);	__skb_insert(newsk, old->prev, old, old->list);	spin_unlock_irqrestore(&old->list->lock, flags);}/* *	Place a packet after a given packet in a list. */static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk){	__skb_insert(newsk, old, old->next, old->list);}/** *	skb_append	-	append a buffer *	@old: buffer to insert after *	@newsk: buffer to insert * *	Place a packet after a given packet in a list. The list locks are taken *	and this function is atomic with respect to other list locked calls. *	A buffer cannot be placed on two lists at the same time. */static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk){	unsigned long flags;	spin_lock_irqsave(&old->list->lock, flags);	__skb_append(old, newsk);	spin_unlock_irqrestore(&old->list->lock, flags);}/* * remove sk_buff from list. _Must_ be called atomically, and with * the list known.. */ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list){	struct sk_buff * next, * prev;	list->qlen--;	next = skb->next;	prev = skb->prev;	skb->next = NULL;	skb->prev = NULL;	skb->list = NULL;	next->prev = prev;	prev->next = next;}/** *	skb_unlink	-	remove a buffer from a list *	@skb: buffer to remove * *	Place a packet after a given packet in a list. The list locks are taken *	and this function is atomic with respect to other list locked calls *	 *	Works even without knowing the list it is sitting on, which can be  *	handy at times. It also means that THE LIST MUST EXIST when you  *	unlink. Thus a list must have its contents unlinked before it is *	destroyed. */static inline void skb_unlink(struct sk_buff *skb){	struct sk_buff_head *list = skb->list;	if(list) {		unsigned long flags;		spin_lock_irqsave(&list->lock, flags);		if(skb->list == list)			__skb_unlink(skb, skb->list);		spin_unlock_irqrestore(&list->lock, flags);	}}/* XXX: more streamlined implementation *//** *	__skb_dequeue_tail - remove from the tail of the queue *	@list: list to dequeue from * *	Remove the tail of the list. This function does not take any locks *	so must be used with appropriate locks held only. The tail item is *	returned or %NULL if the list is empty. */static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list){	struct sk_buff *skb = skb_peek_tail(list); 	if (skb)		__skb_unlink(skb, list);	return skb;}/** *	skb_dequeue - remove from the head of the queue *	@list: list to dequeue from * *	Remove the head of the list. The list lock is taken so the function *	may be used safely with other locking list functions. The tail item is *	returned or %NULL if the list is empty. */static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list){	long flags;	struct sk_buff *result;	spin_lock_irqsave(&list->lock, flags);	result = __skb_dequeue_tail(list);	spin_unlock_irqrestore(&list->lock, flags);	return result;}/* *	Add data to an sk_buff */ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len){	unsigned char *tmp=skb->tail;	skb->tail+=len;	skb->len+=len;	return tmp;}/** *	skb_put - add data to a buffer *	@skb: buffer to use  *	@len: amount of data to add * *	This function extends the used data area of the buffer. If this would *	exceed the total buffer size the kernel will panic. A pointer to the *	first byte of the extra data is returned. */ static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len){	unsigned char *tmp=skb->tail;	skb->tail+=len;	skb->len+=len;	if(skb->tail>skb->end) {		skb_over_panic(skb, len, current_text_addr());	}	return tmp;}static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len){	skb->data-=len;	skb->len+=len;	return skb->data;}/** *	skb_push - add data to the start of a buffer *	@skb: buffer to use  *	@len: amount of data to add * *	This function extends the used data area of the buffer at the buffer *	start. If this would exceed the total buffer headroom the kernel will *	panic. A pointer to the first byte of the extra data is returned. */static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len){	skb->data-=len;	skb->len+=len;	if(skb->data<skb->head) {		skb_under_panic(skb, len, current_text_addr());	}	return skb->data;}static inline char *__skb_pull(struct sk_buff *skb, unsigned int len){	skb->len-=len;	return 	skb->data+=len;}/** *	skb_pull - remove data from the start of a buffer *	@skb: buffer to use  *	@len: amount of data to remove * *	This function removes data from the start of a buffer, returning *	the memory to the headroom. A pointer to the next data in the buffer *	is returned. Once the data has been pulled future pushes will overwrite *	the old data. */static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len){		if (len > skb->len)		return NULL;	return __skb_pull(skb,len);}/** *	skb_headroom - bytes at buffer head *	@skb: buffer to check * *	Return the number of bytes of free space at the head of an &sk_buff. */ static inline int skb_headroom(const struct sk_buff *skb){	return skb->data-skb->head;}/** *	skb_tailroom - bytes at buffer end *	@skb: buffer to check * *	Return the number of bytes of free space at the tail of an sk_buff */static inline int skb_tailroom(const struct sk_buff *skb){	return skb->end-skb->tail;}/** *	skb_reserve - adjust headroom *	@skb: buffer to alter *	@len: bytes to move * *	Increase the headroom of an empty &sk_buff by reducing the tail *	room. This is only allowed for an empty buffer. */static inline void skb_reserve(struct sk_buff *skb, unsigned int len){	skb->data+=len;	skb->tail+=len;}static inline void __skb_trim(struct sk_buff *skb, unsigned int len){	skb->len = len;	skb->tail = skb->data+len;}/** *	skb_trim - remove end from a buffer *	@skb: buffer to alter *	@len: new length * *	Cut the length of a buffer down by removing data from the tail. If *	the buffer is already under the length specified it is not modified. */static inline void skb_trim(struct sk_buff *skb, unsigned int len){	if (skb->len > len) {		__skb_trim(skb, len);	}}/** *	skb_orphan - orphan a buffer *	@skb: buffer to orphan * *	If a buffer currently has an owner then we call the owner's *	destructor function and make the @skb unowned. The buffer continues *	to exist but is no longer charged to its former owner. */static inline void skb_orphan(struct sk_buff *skb){	if (skb->destructor)		skb->destructor(skb);	skb->destructor = NULL;	skb->sk = NULL;}/** *	skb_purge - empty a list *	@list: list to empty * *	Delete all buffers on an &sk_buff list. Each buffer is removed from *	the list and one reference dropped. This function takes the list *	lock and is atomic with respect to other list locking functions. */static inline void skb_queue_purge(struct sk_buff_head *list){	struct sk_buff *skb;	while ((skb=skb_dequeue(list))!=NULL)		kfree_skb(skb);}/** *	__skb_purge - empty a list *	@list: list to empty * *	Delete all buffers on an &sk_buff list. Each buffer is removed from *	the list and one reference dropped. This function does not take the *	list lock and the caller must hold the relevant locks to use it. */static inline void __skb_queue_purge(struct sk_buff_head *list){	struct sk_buff *skb;	while ((skb=__skb_dequeue(list))!=NULL)		kfree_skb(skb);}/** *	dev_alloc_skb - allocate an skbuff for sending *	@length: length to allocate * *	Allocate a new &sk_buff and assign it a usage count of one. The *	buffer has unspecified headroom built in. Users should allocate *	the headroom they think they need without accounting for the *	built in space. The built in space is used for optimisations. * *	%NULL is returned in there is no free memory. Although this function *	allocates memory it can be called from an interrupt. */ static inline struct sk_buff *dev_alloc_skb(unsigned int length){	struct sk_buff *skb;	skb = alloc_skb(length+16, GFP_ATOMIC);	if (skb)		skb_reserve(skb,16);	return skb;}/** *	skb_cow - copy a buffer if need be *	@skb: buffer to copy *	@headroom: needed headroom * *	If the buffer passed lacks sufficient headroom or is a clone then *	it is copied and the additional headroom made available. If there *	is no free memory %NULL is returned. The new buffer is returned if *	a copy was made (and the old one dropped a reference). The existing *	buffer is returned otherwise. * *	This function primarily exists to avoid making two copies when making *	a writable copy of a buffer and then growing the headroom. */ static inline struct sk_buff *skb_cow(struct sk_buff *skb, unsigned int headroom){	headroom = (headroom+15)&~15;	if ((unsigned)skb_headroom(skb) < headroom || skb_cloned(skb)) {		struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);		kfree_skb(skb);		skb = skb2;	}	return skb;}#define skb_queue_walk(queue, skb) \		for (skb = (queue)->next;			\		     (skb != (struct sk_buff *)(queue));	\		     skb=skb->next)extern struct sk_buff *		skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);extern unsigned int		datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);extern int			skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);extern int			skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);extern void			skb_free_datagram(struct sock * sk, struct sk_buff *skb);extern void skb_init(void);extern void skb_add_mtu(int mtu);#ifdef CONFIG_NETFILTERstatic inline voidnf_conntrack_put(struct nf_ct_info *nfct){	if (nfct && atomic_dec_and_test(&nfct->master->use))		nfct->master->destroy(nfct->master);}static inline voidnf_conntrack_get(struct nf_ct_info *nfct){	if (nfct)		atomic_inc(&nfct->master->use);}#endif#endif	/* __KERNEL__ */#endif	/* _LINUX_SKBUFF_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -