⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nf_conntrack_core.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
			  int do_acct){	int event = 0;	NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);	NF_CT_ASSERT(skb);	write_lock_bh(&nf_conntrack_lock);	/* Only update if this is not a fixed timeout */	if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {		write_unlock_bh(&nf_conntrack_lock);		return;	}	/* If not in hash table, timer will not be active yet */	if (!nf_ct_is_confirmed(ct)) {		ct->timeout.expires = extra_jiffies;		event = IPCT_REFRESH;	} else {		unsigned long newtime = jiffies + extra_jiffies;		/* Only update the timeout if the new timeout is at least		   HZ jiffies from the old timeout. Need del_timer for race		   avoidance (may already be dying). */		if (newtime - ct->timeout.expires >= HZ		    && del_timer(&ct->timeout)) {			ct->timeout.expires = newtime;			add_timer(&ct->timeout);			event = IPCT_REFRESH;		}	}#ifdef CONFIG_NF_CT_ACCT	if (do_acct) {		ct->counters[CTINFO2DIR(ctinfo)].packets++;		ct->counters[CTINFO2DIR(ctinfo)].bytes +=			skb->len - skb_network_offset(skb);		if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)		    || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))			event |= IPCT_COUNTER_FILLING;	}#endif	write_unlock_bh(&nf_conntrack_lock);	/* must be unlocked when calling event cache */	if (event)		nf_conntrack_event_cache(event, skb);}EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)#include <linux/netfilter/nfnetlink.h>#include <linux/netfilter/nfnetlink_conntrack.h>#include <linux/mutex.h>/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be * in ip_conntrack_core, since we don't want the protocols to autoload * or depend on ctnetlink */int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,			       const struct nf_conntrack_tuple *tuple){	NLA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t),		&tuple->src.u.tcp.port);	NLA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t),		&tuple->dst.u.tcp.port);	return 0;nla_put_failure:	return -1;}EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {	[CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },	[CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },};EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],			       struct nf_conntrack_tuple *t){	if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])		return -EINVAL;	t->src.u.tcp.port = *(__be16 *)nla_data(tb[CTA_PROTO_SRC_PORT]);	t->dst.u.tcp.port = *(__be16 *)nla_data(tb[CTA_PROTO_DST_PORT]);	return 0;}EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);#endif/* Used by ipt_REJECT and ip6t_REJECT. */void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb){	struct nf_conn *ct;	enum ip_conntrack_info ctinfo;	/* This ICMP is in reverse direction to the packet which caused it */	ct = nf_ct_get(skb, &ctinfo);	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)		ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;	else		ctinfo = IP_CT_RELATED;	/* Attach to new skbuff, and increment count */	nskb->nfct = &ct->ct_general;	nskb->nfctinfo = ctinfo;	nf_conntrack_get(nskb->nfct);}EXPORT_SYMBOL_GPL(__nf_conntrack_attach);static inline intdo_iter(const struct nf_conntrack_tuple_hash *i,	int (*iter)(struct nf_conn *i, void *data),	void *data){	return iter(nf_ct_tuplehash_to_ctrack(i), data);}/* Bring out ya dead! */static struct nf_conn *get_next_corpse(int (*iter)(struct nf_conn *i, void *data),		void *data, unsigned int *bucket){	struct nf_conntrack_tuple_hash *h;	struct nf_conn *ct;	struct hlist_node *n;	write_lock_bh(&nf_conntrack_lock);	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {		hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) {			ct = nf_ct_tuplehash_to_ctrack(h);			if (iter(ct, data))				goto found;		}	}	hlist_for_each_entry(h, n, &unconfirmed, hnode) {		ct = nf_ct_tuplehash_to_ctrack(h);		if (iter(ct, data))			set_bit(IPS_DYING_BIT, &ct->status);	}	write_unlock_bh(&nf_conntrack_lock);	return NULL;found:	atomic_inc(&ct->ct_general.use);	write_unlock_bh(&nf_conntrack_lock);	return ct;}voidnf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data){	struct nf_conn *ct;	unsigned int bucket = 0;	while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {		/* Time to push up daises... */		if (del_timer(&ct->timeout))			death_by_timeout((unsigned long)ct);		/* ... else the timer will get him soon. */		nf_ct_put(ct);	}}EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);static int kill_all(struct nf_conn *i, void *data){	return 1;}void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, int size){	if (vmalloced)		vfree(hash);	else		free_pages((unsigned long)hash,			   get_order(sizeof(struct hlist_head) * size));}EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);void nf_conntrack_flush(void){	nf_ct_iterate_cleanup(kill_all, NULL);}EXPORT_SYMBOL_GPL(nf_conntrack_flush);/* Mishearing the voices in his head, our hero wonders how he's   supposed to kill the mall. */void nf_conntrack_cleanup(void){	rcu_assign_pointer(ip_ct_attach, NULL);	/* This makes sure all current packets have passed through	   netfilter framework.  Roll on, two-stage module	   delete... */	synchronize_net();	nf_ct_event_cache_flush(); i_see_dead_people:	nf_conntrack_flush();	if (atomic_read(&nf_conntrack_count) != 0) {		schedule();		goto i_see_dead_people;	}	/* wait until all references to nf_conntrack_untracked are dropped */	while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)		schedule();	rcu_assign_pointer(nf_ct_destroy, NULL);	kmem_cache_destroy(nf_conntrack_cachep);	nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,			     nf_conntrack_htable_size);	nf_conntrack_proto_fini();	nf_conntrack_helper_fini();	nf_conntrack_expect_fini();}struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced){	struct hlist_head *hash;	unsigned int size, i;	*vmalloced = 0;	size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));	hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,				       get_order(sizeof(struct hlist_head)						 * size));	if (!hash) {		*vmalloced = 1;		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");		hash = vmalloc(sizeof(struct hlist_head) * size);	}	if (hash)		for (i = 0; i < size; i++)			INIT_HLIST_HEAD(&hash[i]);	return hash;}EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp){	int i, bucket, hashsize, vmalloced;	int old_vmalloced, old_size;	int rnd;	struct hlist_head *hash, *old_hash;	struct nf_conntrack_tuple_hash *h;	/* On boot, we can set this without any fancy locking. */	if (!nf_conntrack_htable_size)		return param_set_uint(val, kp);	hashsize = simple_strtol(val, NULL, 0);	if (!hashsize)		return -EINVAL;	hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);	if (!hash)		return -ENOMEM;	/* We have to rehahs for the new table anyway, so we also can	 * use a newrandom seed */	get_random_bytes(&rnd, 4);	write_lock_bh(&nf_conntrack_lock);	for (i = 0; i < nf_conntrack_htable_size; i++) {		while (!hlist_empty(&nf_conntrack_hash[i])) {			h = hlist_entry(nf_conntrack_hash[i].first,					struct nf_conntrack_tuple_hash, hnode);			hlist_del(&h->hnode);			bucket = __hash_conntrack(&h->tuple, hashsize, rnd);			hlist_add_head(&h->hnode, &hash[bucket]);		}	}	old_size = nf_conntrack_htable_size;	old_vmalloced = nf_conntrack_vmalloc;	old_hash = nf_conntrack_hash;	nf_conntrack_htable_size = hashsize;	nf_conntrack_vmalloc = vmalloced;	nf_conntrack_hash = hash;	nf_conntrack_hash_rnd = rnd;	write_unlock_bh(&nf_conntrack_lock);	nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);	return 0;}EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,		  &nf_conntrack_htable_size, 0600);int __init nf_conntrack_init(void){	int max_factor = 8;	int ret;	/* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB	 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */	if (!nf_conntrack_htable_size) {		nf_conntrack_htable_size			= (((num_physpages << PAGE_SHIFT) / 16384)			   / sizeof(struct hlist_head));		if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))			nf_conntrack_htable_size = 16384;		if (nf_conntrack_htable_size < 32)			nf_conntrack_htable_size = 32;		/* Use a max. factor of four by default to get the same max as		 * with the old struct list_heads. When a table size is given		 * we use the old value of 8 to avoid reducing the max.		 * entries. */		max_factor = 4;	}	nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,						  &nf_conntrack_vmalloc);	if (!nf_conntrack_hash) {		printk(KERN_ERR "Unable to create nf_conntrack_hash\n");		goto err_out;	}	nf_conntrack_max = max_factor * nf_conntrack_htable_size;	printk("nf_conntrack version %s (%u buckets, %d max)\n",	       NF_CONNTRACK_VERSION, nf_conntrack_htable_size,	       nf_conntrack_max);	nf_conntrack_cachep = kmem_cache_create("nf_conntrack",						sizeof(struct nf_conn),						0, 0, NULL);	if (!nf_conntrack_cachep) {		printk(KERN_ERR "Unable to create nf_conn slab cache\n");		goto err_free_hash;	}	ret = nf_conntrack_proto_init();	if (ret < 0)		goto err_free_conntrack_slab;	ret = nf_conntrack_expect_init();	if (ret < 0)		goto out_fini_proto;	ret = nf_conntrack_helper_init();	if (ret < 0)		goto out_fini_expect;	/* For use by REJECT target */	rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach);	rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);	/* Set up fake conntrack:	    - to never be deleted, not in any hashes */	atomic_set(&nf_conntrack_untracked.ct_general.use, 1);	/*  - and look it like as a confirmed connection */	set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);	return ret;out_fini_expect:	nf_conntrack_expect_fini();out_fini_proto:	nf_conntrack_proto_fini();err_free_conntrack_slab:	kmem_cache_destroy(nf_conntrack_cachep);err_free_hash:	nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,			     nf_conntrack_htable_size);err_out:	return -ENOMEM;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -