⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nf_conntrack_core.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* Connection state tracking for netfilter.  This is separated from,   but required by, the NAT layer; it can also be used by an iptables   extension. *//* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */#include <linux/types.h>#include <linux/netfilter.h>#include <linux/module.h>#include <linux/skbuff.h>#include <linux/proc_fs.h>#include <linux/vmalloc.h>#include <linux/stddef.h>#include <linux/slab.h>#include <linux/random.h>#include <linux/jhash.h>#include <linux/err.h>#include <linux/percpu.h>#include <linux/moduleparam.h>#include <linux/notifier.h>#include <linux/kernel.h>#include <linux/netdevice.h>#include <linux/socket.h>#include <linux/mm.h>#include <net/netfilter/nf_conntrack.h>#include <net/netfilter/nf_conntrack_l3proto.h>#include <net/netfilter/nf_conntrack_l4proto.h>#include <net/netfilter/nf_conntrack_expect.h>#include <net/netfilter/nf_conntrack_helper.h>#include <net/netfilter/nf_conntrack_core.h>#include <net/netfilter/nf_conntrack_extend.h>#define NF_CONNTRACK_VERSION	"0.5.0"DEFINE_RWLOCK(nf_conntrack_lock);EXPORT_SYMBOL_GPL(nf_conntrack_lock);/* nf_conntrack_standalone needs this */atomic_t nf_conntrack_count = ATOMIC_INIT(0);EXPORT_SYMBOL_GPL(nf_conntrack_count);unsigned int nf_conntrack_htable_size __read_mostly;EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);int nf_conntrack_max __read_mostly;EXPORT_SYMBOL_GPL(nf_conntrack_max);struct hlist_head *nf_conntrack_hash __read_mostly;EXPORT_SYMBOL_GPL(nf_conntrack_hash);struct nf_conn nf_conntrack_untracked __read_mostly;EXPORT_SYMBOL_GPL(nf_conntrack_untracked);unsigned int nf_ct_log_invalid __read_mostly;HLIST_HEAD(unconfirmed);static int nf_conntrack_vmalloc __read_mostly;static struct kmem_cache *nf_conntrack_cachep __read_mostly;DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);static int nf_conntrack_hash_rnd_initted;static unsigned int nf_conntrack_hash_rnd;static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,				  unsigned int size, unsigned int rnd){	unsigned int a, b;	a = jhash2(tuple->src.u3.all, ARRAY_SIZE(tuple->src.u3.all),		   (tuple->src.l3num << 16) | tuple->dst.protonum);	b = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),		   ((__force __u16)tuple->src.u.all << 16) |		    (__force __u16)tuple->dst.u.all);	return jhash_2words(a, b, rnd) % size;}static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple){	return __hash_conntrack(tuple, nf_conntrack_htable_size,				nf_conntrack_hash_rnd);}intnf_ct_get_tuple(const struct sk_buff *skb,		unsigned int nhoff,		unsigned int dataoff,		u_int16_t l3num,		u_int8_t protonum,		struct nf_conntrack_tuple *tuple,		const struct nf_conntrack_l3proto *l3proto,		const struct nf_conntrack_l4proto *l4proto){	NF_CT_TUPLE_U_BLANK(tuple);	tuple->src.l3num = l3num;	if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)		return 0;	tuple->dst.protonum = protonum;	tuple->dst.dir = IP_CT_DIR_ORIGINAL;	return l4proto->pkt_to_tuple(skb, dataoff, tuple);}EXPORT_SYMBOL_GPL(nf_ct_get_tuple);int nf_ct_get_tuplepr(const struct sk_buff *skb,		      unsigned int nhoff,		      u_int16_t l3num,		      struct nf_conntrack_tuple *tuple){	struct nf_conntrack_l3proto *l3proto;	struct nf_conntrack_l4proto *l4proto;	unsigned int protoff;	u_int8_t protonum;	int ret;	rcu_read_lock();	l3proto = __nf_ct_l3proto_find(l3num);	ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);	if (ret != NF_ACCEPT) {		rcu_read_unlock();		return 0;	}	l4proto = __nf_ct_l4proto_find(l3num, protonum);	ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,			      l3proto, l4proto);	rcu_read_unlock();	return ret;}EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);intnf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,		   const struct nf_conntrack_tuple *orig,		   const struct nf_conntrack_l3proto *l3proto,		   const struct nf_conntrack_l4proto *l4proto){	NF_CT_TUPLE_U_BLANK(inverse);	inverse->src.l3num = orig->src.l3num;	if (l3proto->invert_tuple(inverse, orig) == 0)		return 0;	inverse->dst.dir = !orig->dst.dir;	inverse->dst.protonum = orig->dst.protonum;	return l4proto->invert_tuple(inverse, orig);}EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);static voidclean_from_lists(struct nf_conn *ct){	pr_debug("clean_from_lists(%p)\n", ct);	hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);	hlist_del(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);	/* Destroy all pending expectations */	nf_ct_remove_expectations(ct);}static voiddestroy_conntrack(struct nf_conntrack *nfct){	struct nf_conn *ct = (struct nf_conn *)nfct;	struct nf_conntrack_l4proto *l4proto;	pr_debug("destroy_conntrack(%p)\n", ct);	NF_CT_ASSERT(atomic_read(&nfct->use) == 0);	NF_CT_ASSERT(!timer_pending(&ct->timeout));	nf_conntrack_event(IPCT_DESTROY, ct);	set_bit(IPS_DYING_BIT, &ct->status);	/* To make sure we don't get any weird locking issues here:	 * destroy_conntrack() MUST NOT be called with a write lock	 * to nf_conntrack_lock!!! -HW */	rcu_read_lock();	l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,				       ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);	if (l4proto && l4proto->destroy)		l4proto->destroy(ct);	nf_ct_ext_destroy(ct);	rcu_read_unlock();	write_lock_bh(&nf_conntrack_lock);	/* Expectations will have been removed in clean_from_lists,	 * except TFTP can create an expectation on the first packet,	 * before connection is in the list, so we need to clean here,	 * too. */	nf_ct_remove_expectations(ct);	/* We overload first tuple to link into unconfirmed list. */	if (!nf_ct_is_confirmed(ct)) {		BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));		hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);	}	NF_CT_STAT_INC(delete);	write_unlock_bh(&nf_conntrack_lock);	if (ct->master)		nf_ct_put(ct->master);	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);	nf_conntrack_free(ct);}static void death_by_timeout(unsigned long ul_conntrack){	struct nf_conn *ct = (void *)ul_conntrack;	struct nf_conn_help *help = nfct_help(ct);	struct nf_conntrack_helper *helper;	if (help) {		rcu_read_lock();		helper = rcu_dereference(help->helper);		if (helper && helper->destroy)			helper->destroy(ct);		rcu_read_unlock();	}	write_lock_bh(&nf_conntrack_lock);	/* Inside lock so preempt is disabled on module removal path.	 * Otherwise we can get spurious warnings. */	NF_CT_STAT_INC(delete_list);	clean_from_lists(ct);	write_unlock_bh(&nf_conntrack_lock);	nf_ct_put(ct);}struct nf_conntrack_tuple_hash *__nf_conntrack_find(const struct nf_conntrack_tuple *tuple,		    const struct nf_conn *ignored_conntrack){	struct nf_conntrack_tuple_hash *h;	struct hlist_node *n;	unsigned int hash = hash_conntrack(tuple);	hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {		if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&		    nf_ct_tuple_equal(tuple, &h->tuple)) {			NF_CT_STAT_INC(found);			return h;		}		NF_CT_STAT_INC(searched);	}	return NULL;}EXPORT_SYMBOL_GPL(__nf_conntrack_find);/* Find a connection corresponding to a tuple. */struct nf_conntrack_tuple_hash *nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple){	struct nf_conntrack_tuple_hash *h;	read_lock_bh(&nf_conntrack_lock);	h = __nf_conntrack_find(tuple, NULL);	if (h)		atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);	read_unlock_bh(&nf_conntrack_lock);	return h;}EXPORT_SYMBOL_GPL(nf_conntrack_find_get);static void __nf_conntrack_hash_insert(struct nf_conn *ct,				       unsigned int hash,				       unsigned int repl_hash){	hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,		       &nf_conntrack_hash[hash]);	hlist_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,		       &nf_conntrack_hash[repl_hash]);}void nf_conntrack_hash_insert(struct nf_conn *ct){	unsigned int hash, repl_hash;	hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);	repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);	write_lock_bh(&nf_conntrack_lock);	__nf_conntrack_hash_insert(ct, hash, repl_hash);	write_unlock_bh(&nf_conntrack_lock);}EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);/* Confirm a connection given skb; places it in hash table */int__nf_conntrack_confirm(struct sk_buff *skb){	unsigned int hash, repl_hash;	struct nf_conntrack_tuple_hash *h;	struct nf_conn *ct;	struct nf_conn_help *help;	struct hlist_node *n;	enum ip_conntrack_info ctinfo;	ct = nf_ct_get(skb, &ctinfo);	/* ipt_REJECT uses nf_conntrack_attach to attach related	   ICMP/TCP RST packets in other direction.  Actual packet	   which created connection will be IP_CT_NEW or for an	   expected connection, IP_CT_RELATED. */	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)		return NF_ACCEPT;	hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);	repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);	/* We're not in hash table, and we refuse to set up related	   connections for unconfirmed conns.  But packet copies and	   REJECT will give spurious warnings here. */	/* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */	/* No external references means noone else could have	   confirmed us. */	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));	pr_debug("Confirming conntrack %p\n", ct);	write_lock_bh(&nf_conntrack_lock);	/* See if there's one in the list already, including reverse:	   NAT could have grabbed it without realizing, since we're	   not in the hash.  If there is, we lost race. */	hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode)		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,				      &h->tuple))			goto out;	hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode)		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,				      &h->tuple))			goto out;	/* Remove from unconfirmed list */	hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);	__nf_conntrack_hash_insert(ct, hash, repl_hash);	/* Timer relative to confirmation time, not original	   setting time, otherwise we'd get timer wrap in	   weird delay cases. */	ct->timeout.expires += jiffies;	add_timer(&ct->timeout);	atomic_inc(&ct->ct_general.use);	set_bit(IPS_CONFIRMED_BIT, &ct->status);	NF_CT_STAT_INC(insert);	write_unlock_bh(&nf_conntrack_lock);	help = nfct_help(ct);	if (help && help->helper)		nf_conntrack_event_cache(IPCT_HELPER, skb);#ifdef CONFIG_NF_NAT_NEEDED	if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||	    test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))		nf_conntrack_event_cache(IPCT_NATINFO, skb);#endif	nf_conntrack_event_cache(master_ct(ct) ?				 IPCT_RELATED : IPCT_NEW, skb);	return NF_ACCEPT;out:	NF_CT_STAT_INC(insert_failed);	write_unlock_bh(&nf_conntrack_lock);	return NF_DROP;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -