⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nf_conntrack_core.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);/* Returns true if a connection correspondings to the tuple (required   for NAT). */intnf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,			 const struct nf_conn *ignored_conntrack){	struct nf_conntrack_tuple_hash *h;	read_lock_bh(&nf_conntrack_lock);	h = __nf_conntrack_find(tuple, ignored_conntrack);	read_unlock_bh(&nf_conntrack_lock);	return h != NULL;}EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);#define NF_CT_EVICTION_RANGE	8/* There's a small race here where we may free a just-assured   connection.  Too bad: we're in trouble anyway. */static int early_drop(unsigned int hash){	/* Use oldest entry, which is roughly LRU */	struct nf_conntrack_tuple_hash *h;	struct nf_conn *ct = NULL, *tmp;	struct hlist_node *n;	unsigned int i, cnt = 0;	int dropped = 0;	read_lock_bh(&nf_conntrack_lock);	for (i = 0; i < nf_conntrack_htable_size; i++) {		hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {			tmp = nf_ct_tuplehash_to_ctrack(h);			if (!test_bit(IPS_ASSURED_BIT, &tmp->status))				ct = tmp;			cnt++;		}		if (ct || cnt >= NF_CT_EVICTION_RANGE)			break;		hash = (hash + 1) % nf_conntrack_htable_size;	}	if (ct)		atomic_inc(&ct->ct_general.use);	read_unlock_bh(&nf_conntrack_lock);	if (!ct)		return dropped;	if (del_timer(&ct->timeout)) {		death_by_timeout((unsigned long)ct);		dropped = 1;		NF_CT_STAT_INC_ATOMIC(early_drop);	}	nf_ct_put(ct);	return dropped;}struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,				   const struct nf_conntrack_tuple *repl){	struct nf_conn *conntrack = NULL;	if (unlikely(!nf_conntrack_hash_rnd_initted)) {		get_random_bytes(&nf_conntrack_hash_rnd, 4);		nf_conntrack_hash_rnd_initted = 1;	}	/* We don't want any race condition at early drop stage */	atomic_inc(&nf_conntrack_count);	if (nf_conntrack_max	    && atomic_read(&nf_conntrack_count) > nf_conntrack_max) {		unsigned int hash = hash_conntrack(orig);		if (!early_drop(hash)) {			atomic_dec(&nf_conntrack_count);			if (net_ratelimit())				printk(KERN_WARNING				       "nf_conntrack: table full, dropping"				       " packet.\n");			return ERR_PTR(-ENOMEM);		}	}	conntrack = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC);	if (conntrack == NULL) {		pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");		atomic_dec(&nf_conntrack_count);		return ERR_PTR(-ENOMEM);	}	atomic_set(&conntrack->ct_general.use, 1);	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;	conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;	/* Don't set timer yet: wait for confirmation */	setup_timer(&conntrack->timeout, death_by_timeout,		    (unsigned long)conntrack);	return conntrack;}EXPORT_SYMBOL_GPL(nf_conntrack_alloc);void nf_conntrack_free(struct nf_conn *conntrack){	nf_ct_ext_free(conntrack);	kmem_cache_free(nf_conntrack_cachep, conntrack);	atomic_dec(&nf_conntrack_count);}EXPORT_SYMBOL_GPL(nf_conntrack_free);/* Allocate a new conntrack: we return -ENOMEM if classification   failed due to stress.  Otherwise it really is unclassifiable. */static struct nf_conntrack_tuple_hash *init_conntrack(const struct nf_conntrack_tuple *tuple,	       struct nf_conntrack_l3proto *l3proto,	       struct nf_conntrack_l4proto *l4proto,	       struct sk_buff *skb,	       unsigned int dataoff){	struct nf_conn *conntrack;	struct nf_conn_help *help;	struct nf_conntrack_tuple repl_tuple;	struct nf_conntrack_expect *exp;	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {		pr_debug("Can't invert tuple.\n");		return NULL;	}	conntrack = nf_conntrack_alloc(tuple, &repl_tuple);	if (conntrack == NULL || IS_ERR(conntrack)) {		pr_debug("Can't allocate conntrack.\n");		return (struct nf_conntrack_tuple_hash *)conntrack;	}	if (!l4proto->new(conntrack, skb, dataoff)) {		nf_conntrack_free(conntrack);		pr_debug("init conntrack: can't track with proto module\n");		return NULL;	}	write_lock_bh(&nf_conntrack_lock);	exp = nf_ct_find_expectation(tuple);	if (exp) {		pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",			 conntrack, exp);		/* Welcome, Mr. Bond.  We've been expecting you... */		__set_bit(IPS_EXPECTED_BIT, &conntrack->status);		conntrack->master = exp->master;		if (exp->helper) {			help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);			if (help)				rcu_assign_pointer(help->helper, exp->helper);		}#ifdef CONFIG_NF_CONNTRACK_MARK		conntrack->mark = exp->master->mark;#endif#ifdef CONFIG_NF_CONNTRACK_SECMARK		conntrack->secmark = exp->master->secmark;#endif		nf_conntrack_get(&conntrack->master->ct_general);		NF_CT_STAT_INC(expect_new);	} else {		struct nf_conntrack_helper *helper;		helper = __nf_ct_helper_find(&repl_tuple);		if (helper) {			help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);			if (help)				rcu_assign_pointer(help->helper, helper);		}		NF_CT_STAT_INC(new);	}	/* Overload tuple linked list to put us in unconfirmed list. */	hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode,		       &unconfirmed);	write_unlock_bh(&nf_conntrack_lock);	if (exp) {		if (exp->expectfn)			exp->expectfn(conntrack, exp);		nf_ct_expect_put(exp);	}	return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];}/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */static inline struct nf_conn *resolve_normal_ct(struct sk_buff *skb,		  unsigned int dataoff,		  u_int16_t l3num,		  u_int8_t protonum,		  struct nf_conntrack_l3proto *l3proto,		  struct nf_conntrack_l4proto *l4proto,		  int *set_reply,		  enum ip_conntrack_info *ctinfo){	struct nf_conntrack_tuple tuple;	struct nf_conntrack_tuple_hash *h;	struct nf_conn *ct;	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),			     dataoff, l3num, protonum, &tuple, l3proto,			     l4proto)) {		pr_debug("resolve_normal_ct: Can't get tuple\n");		return NULL;	}	/* look for tuple match */	h = nf_conntrack_find_get(&tuple);	if (!h) {		h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff);		if (!h)			return NULL;		if (IS_ERR(h))			return (void *)h;	}	ct = nf_ct_tuplehash_to_ctrack(h);	/* It exists; we have (non-exclusive) reference. */	if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {		*ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;		/* Please set reply bit if this packet OK */		*set_reply = 1;	} else {		/* Once we've had two way comms, always ESTABLISHED. */		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {			pr_debug("nf_conntrack_in: normal packet for %p\n", ct);			*ctinfo = IP_CT_ESTABLISHED;		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {			pr_debug("nf_conntrack_in: related packet for %p\n",				 ct);			*ctinfo = IP_CT_RELATED;		} else {			pr_debug("nf_conntrack_in: new packet for %p\n", ct);			*ctinfo = IP_CT_NEW;		}		*set_reply = 0;	}	skb->nfct = &ct->ct_general;	skb->nfctinfo = *ctinfo;	return ct;}unsigned intnf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff *skb){	struct nf_conn *ct;	enum ip_conntrack_info ctinfo;	struct nf_conntrack_l3proto *l3proto;	struct nf_conntrack_l4proto *l4proto;	unsigned int dataoff;	u_int8_t protonum;	int set_reply = 0;	int ret;	/* Previously seen (loopback or untracked)?  Ignore. */	if (skb->nfct) {		NF_CT_STAT_INC_ATOMIC(ignore);		return NF_ACCEPT;	}	/* rcu_read_lock()ed by nf_hook_slow */	l3proto = __nf_ct_l3proto_find((u_int16_t)pf);	ret = l3proto->get_l4proto(skb, skb_network_offset(skb),				   &dataoff, &protonum);	if (ret <= 0) {		pr_debug("not prepared to track yet or error occured\n");		NF_CT_STAT_INC_ATOMIC(error);		NF_CT_STAT_INC_ATOMIC(invalid);		return -ret;	}	l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum);	/* It may be an special packet, error, unclean...	 * inverse of the return code tells to the netfilter	 * core what to do with the packet. */	if (l4proto->error != NULL &&	    (ret = l4proto->error(skb, dataoff, &ctinfo, pf, hooknum)) <= 0) {		NF_CT_STAT_INC_ATOMIC(error);		NF_CT_STAT_INC_ATOMIC(invalid);		return -ret;	}	ct = resolve_normal_ct(skb, dataoff, pf, protonum, l3proto, l4proto,			       &set_reply, &ctinfo);	if (!ct) {		/* Not valid part of a connection */		NF_CT_STAT_INC_ATOMIC(invalid);		return NF_ACCEPT;	}	if (IS_ERR(ct)) {		/* Too stressed to deal. */		NF_CT_STAT_INC_ATOMIC(drop);		return NF_DROP;	}	NF_CT_ASSERT(skb->nfct);	ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);	if (ret < 0) {		/* Invalid: inverse of the return code tells		 * the netfilter core what to do */		pr_debug("nf_conntrack_in: Can't track with proto module\n");		nf_conntrack_put(skb->nfct);		skb->nfct = NULL;		NF_CT_STAT_INC_ATOMIC(invalid);		return -ret;	}	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))		nf_conntrack_event_cache(IPCT_STATUS, skb);	return ret;}EXPORT_SYMBOL_GPL(nf_conntrack_in);int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,			 const struct nf_conntrack_tuple *orig){	int ret;	rcu_read_lock();	ret = nf_ct_invert_tuple(inverse, orig,				 __nf_ct_l3proto_find(orig->src.l3num),				 __nf_ct_l4proto_find(orig->src.l3num,						      orig->dst.protonum));	rcu_read_unlock();	return ret;}EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);/* Alter reply tuple (maybe alter helper).  This is for NAT, and is   implicitly racy: see __nf_conntrack_confirm */void nf_conntrack_alter_reply(struct nf_conn *ct,			      const struct nf_conntrack_tuple *newreply){	struct nf_conn_help *help = nfct_help(ct);	struct nf_conntrack_helper *helper;	write_lock_bh(&nf_conntrack_lock);	/* Should be unconfirmed, so not in hash table yet */	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));	pr_debug("Altering reply tuple of %p to ", ct);	NF_CT_DUMP_TUPLE(newreply);	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;	if (ct->master || (help && help->expecting != 0))		goto out;	helper = __nf_ct_helper_find(newreply);	if (helper == NULL) {		if (help)			rcu_assign_pointer(help->helper, NULL);		goto out;	}	if (help == NULL) {		help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);		if (help == NULL)			goto out;	} else {		memset(&help->help, 0, sizeof(help->help));	}	rcu_assign_pointer(help->helper, helper);out:	write_unlock_bh(&nf_conntrack_lock);}EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */void __nf_ct_refresh_acct(struct nf_conn *ct,			  enum ip_conntrack_info ctinfo,			  const struct sk_buff *skb,			  unsigned long extra_jiffies,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -