📄 tcpcore.h
字号:
/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS TCP/IP protocol driver
* FILE: include/tcpcore.h
* PURPOSE: Transmission Control Protocol definitions
* REVISIONS:
* CSH 01/01-2003 Ported from linux kernel 2.4.20
*/
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for the TCP module.
*
* Version: @(#)tcp.h 1.0.5 05/23/93
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __TCPCORE_H
#define __TCPCORE_H
#include "tcpdef.h"
struct socket;
#if 1 /* skbuff */
#define HAVE_ALLOC_SKB /* For the drivers to know */
#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
#define SLAB_SKB /* Slabified skbuffs */
#define CHECKSUM_NONE 0
#define CHECKSUM_HW 1
#define CHECKSUM_UNNECESSARY 2
#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0))
#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2))
/* A. Checksumming of received packets by device.
*
* NONE: device failed to checksum this packet.
* skb->csum is undefined.
*
* UNNECESSARY: device parsed packet and wouldbe verified checksum.
* skb->csum is undefined.
* It is bad option, but, unfortunately, many of vendors do this.
* Apparently with secret goal to sell you new device, when you
* will add new protocol to your host. F.e. IPv6. 8)
*
* HW: the most generic way. Device supplied checksum of _all_
* the packet as seen by netif_rx in skb->csum.
* NOTE: Even if device supports only some protocols, but
* is able to produce some skb->csum, it MUST use HW,
* not UNNECESSARY.
*
* B. Checksumming on output.
*
* NONE: skb is checksummed by protocol or csum is not required.
*
* HW: device is required to csum packet as seen by hard_start_xmit
* from skb->h.raw to the end and to record the checksum
* at skb->h.raw+skb->csum.
*
* Device must show its capabilities in dev->features, set
* at device setup time.
* NETIF_F_HW_CSUM - it is clever device, it is able to checksum
* everything.
* NETIF_F_NO_CSUM - loopback or reliable single hop media.
* NETIF_F_IP_CSUM - device is dumb. It is able to csum only
* TCP/UDP over IPv4. Sigh. Vendors like this
* way by an unknown reason. Though, see comment above
* about CHECKSUM_UNNECESSARY. 8)
*
* Any questions? No questions, good. --ANK
*/
#ifdef __i386__
#define NET_CALLER(arg) (*(((void**)&arg)-1))
#else
#define NET_CALLER(arg) __builtin_return_address(0)
#endif
#ifdef CONFIG_NETFILTER
struct nf_conntrack {
atomic_t use;
void (*destroy)(struct nf_conntrack *);
};
struct nf_ct_info {
struct nf_conntrack *master;
};
#endif
struct sk_buff_head {
/* These two members must be first. */
struct sk_buff * next;
struct sk_buff * prev;
__u32 qlen;
spinlock_t lock;
};
struct sk_buff;
#define MAX_SKB_FRAGS 6
typedef struct skb_frag_struct skb_frag_t;
struct skb_frag_struct
{
struct page *page;
__u16 page_offset;
__u16 size;
};
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
atomic_t dataref;
unsigned int nr_frags;
struct sk_buff *frag_list;
skb_frag_t frags[MAX_SKB_FRAGS];
};
struct sk_buff {
/* These two members must be first. */
struct sk_buff * next; /* Next buffer in list */
struct sk_buff * prev; /* Previous buffer in list */
struct sk_buff_head * list; /* List we are on */
struct sock *sk; /* Socket we are owned by */
struct timeval stamp; /* Time we arrived */
struct net_device *dev; /* Device we arrived on/are leaving by */
/* Transport layer header */
union
{
struct tcphdr *th;
struct udphdr *uh;
struct icmphdr *icmph;
struct igmphdr *igmph;
struct iphdr *ipiph;
struct spxhdr *spxh;
unsigned char *raw;
} h;
/* Network layer header */
union
{
struct iphdr *iph;
struct ipv6hdr *ipv6h;
struct arphdr *arph;
struct ipxhdr *ipxh;
unsigned char *raw;
} nh;
/* Link layer header */
union
{
struct ethhdr *ethernet;
unsigned char *raw;
} mac;
struct dst_entry *dst;
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
char cb[48];
unsigned int len; /* Length of actual data */
unsigned int data_len;
unsigned int csum; /* Checksum */
unsigned char __unused, /* Dead field, may be reused */
cloned, /* head may be cloned (check refcnt to be sure). */
pkt_type, /* Packet class */
ip_summed; /* Driver fed us an IP checksum */
__u32 priority; /* Packet queueing priority */
atomic_t users; /* User count - see datagram.c,tcp.c */
unsigned short protocol; /* Packet protocol from driver. */
unsigned short security; /* Security level of packet */
unsigned int truesize; /* Buffer size */
unsigned char *head; /* Head of buffer */
unsigned char *data; /* Data head pointer */
unsigned char *tail; /* Tail pointer */
unsigned char *end; /* End pointer */
void (*destructor)(struct sk_buff *); /* Destruct function */
#ifdef CONFIG_NETFILTER
/* Can be used for communication between hooks. */
unsigned long nfmark;
/* Cache info */
__u32 nfcache;
/* Associated connection, if any */
struct nf_ct_info *nfct;
#ifdef CONFIG_NETFILTER_DEBUG
unsigned int nf_debug;
#endif
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_HIPPI)
union{
__u32 ifield;
} private;
#endif
#ifdef CONFIG_NET_SCHED
__u32 tc_index; /* traffic control index */
#endif
};
#define SK_WMEM_MAX 65535
#define SK_RMEM_MAX 65535
#if 1
//#ifdef __KERNEL__
/*
* Handling routines are only of interest to the kernel
*/
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff * alloc_skb(unsigned int size, int priority);
extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority);
extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask);
extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb,
int newheadroom,
int newtailroom,
int priority);
#define dev_kfree_skb(a) kfree_skb(a)
extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head
*
* Returns true if the queue is empty, false otherwise.
*/
static __inline int skb_queue_empty(struct sk_buff_head *list)
{
return (list->next == (struct sk_buff *) list);
}
/**
* skb_get - reference buffer
* @skb: buffer to reference
*
* Makes another reference to a socket buffer and returns a pointer
* to the buffer.
*/
static __inline struct sk_buff *skb_get(struct sk_buff *skb)
{
atomic_inc(&skb->users);
return skb;
}
/*
* If users==1, we are the only owner and are can avoid redundant
* atomic change.
*/
/**
* kfree_skb - free an sk_buff
* @skb: buffer to free
*
* Drop a reference to the buffer and free it if the usage count has
* hit zero.
*/
static __inline void kfree_skb(struct sk_buff *skb)
{
if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
__kfree_skb(skb);
}
/* Use this if you didn't touch the skb state [for fast switching] */
static __inline void kfree_skb_fast(struct sk_buff *skb)
{
if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
kfree_skbmem(skb);
}
/**
* skb_cloned - is the buffer a clone
* @skb: buffer to check
*
* Returns true if the buffer was generated with skb_clone() and is
* one of multiple shared copies of the buffer. Cloned buffers are
* shared data so must not be written to under normal circumstances.
*/
static __inline int skb_cloned(struct sk_buff *skb)
{
return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
}
/**
* skb_shared - is the buffer shared
* @skb: buffer to check
*
* Returns true if more than one person has a reference to this
* buffer.
*/
static __inline int skb_shared(struct sk_buff *skb)
{
return (atomic_read(&skb->users) != 1);
}
/**
* skb_share_check - check if buffer is shared and if so clone it
* @skb: buffer to check
* @pri: priority for memory allocation
*
* If the buffer is shared the buffer is cloned and the old copy
* drops a reference. A new clone with a single reference is returned.
* If the buffer is not shared the original buffer is returned. When
* being called from interrupt status or with spinlocks held pri must
* be GFP_ATOMIC.
*
* NULL is returned on a memory allocation failure.
*/
static __inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{
if (skb_shared(skb)) {
struct sk_buff *nskb;
nskb = skb_clone(skb, pri);
kfree_skb(skb);
return nskb;
}
return skb;
}
/*
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping
* a packet thats being forwarded.
*/
/**
* skb_unshare - make a copy of a shared buffer
* @skb: buffer to check
* @pri: priority for memory allocation
*
* If the socket buffer is a clone then this function creates a new
* copy of the data, drops a reference count on the old copy and returns
* the new copy with the reference count at 1. If the buffer is not a clone
* the original buffer is returned. When called with a spinlock held or
* from interrupt state @pri must be %GFP_ATOMIC
*
* %NULL is returned on a memory allocation failure.
*/
static __inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
{
struct sk_buff *nskb;
if(!skb_cloned(skb))
return skb;
nskb=skb_copy(skb, pri);
kfree_skb(skb); /* Free our shared copy */
return nskb;
}
/**
* skb_peek
* @list_: list to peek at
*
* Peek an &sk_buff. Unlike most other operations you _MUST_
* be careful with this one. A peek leaves the buffer on the
* list and someone else may run off with it. You must hold
* the appropriate locks or have a private queue to do this.
*
* Returns %NULL for an empty list or a pointer to the head element.
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
static __inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->next;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
}
/**
* skb_peek_tail
* @list_: list to peek at
*
* Peek an &sk_buff. Unlike most other operations you _MUST_
* be careful with this one. A peek leaves the buffer on the
* list and someone else may run off with it. You must hold
* the appropriate locks or have a private queue to do this.
*
* Returns %NULL for an empty list or a pointer to the tail element.
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
static __inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->prev;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
}
/**
* skb_queue_len - get queue length
* @list_: list to measure
*
* Return the length of an &sk_buff queue.
*/
static __inline __u32 skb_queue_len(struct sk_buff_head *list_)
{
return(list_->qlen);
}
static __inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -