⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ip_conntrack_core.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 3 页
字号:
ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple,			 const struct ip_conntrack *ignored_conntrack){	struct ip_conntrack_tuple_hash *h;	read_lock_bh(&ip_conntrack_lock);	h = __ip_conntrack_find(tuple, ignored_conntrack);	read_unlock_bh(&ip_conntrack_lock);	return h != NULL;}/* There's a small race here where we may free a just-assured   connection.  Too bad: we're in trouble anyway. */static inline int unreplied(const struct ip_conntrack_tuple_hash *i){	return !(test_bit(IPS_ASSURED_BIT, &tuplehash_to_ctrack(i)->status));}static int early_drop(struct list_head *chain){	/* Traverse backwards: gives us oldest, which is roughly LRU */	struct ip_conntrack_tuple_hash *h;	struct ip_conntrack *ct = NULL;	int dropped = 0;	read_lock_bh(&ip_conntrack_lock);	h = LIST_FIND_B(chain, unreplied, struct ip_conntrack_tuple_hash *);	if (h) {		ct = tuplehash_to_ctrack(h);		atomic_inc(&ct->ct_general.use);	}	read_unlock_bh(&ip_conntrack_lock);	if (!ct)		return dropped;	if (del_timer(&ct->timeout)) {		death_by_timeout((unsigned long)ct);		dropped = 1;		CONNTRACK_STAT_INC(early_drop);	}	ip_conntrack_put(ct);	return dropped;}static inline int helper_cmp(const struct ip_conntrack_helper *i,			     const struct ip_conntrack_tuple *rtuple){	return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);}static struct ip_conntrack_helper *__ip_conntrack_helper_find( const struct ip_conntrack_tuple *tuple){	return LIST_FIND(&helpers, helper_cmp,			 struct ip_conntrack_helper *,			 tuple);}struct ip_conntrack_helper *ip_conntrack_helper_find_get( const struct ip_conntrack_tuple *tuple){	struct ip_conntrack_helper *helper;	/* need ip_conntrack_lock to assure that helper exists until	 * try_module_get() is called */	read_lock_bh(&ip_conntrack_lock);	helper = __ip_conntrack_helper_find(tuple);	if (helper) {		/* need to increase module usage count to assure helper will		 * not go away while the caller is e.g. busy putting a		 * conntrack in the hash that uses the helper */		if (!try_module_get(helper->me))			helper = NULL;	}	read_unlock_bh(&ip_conntrack_lock);	return helper;}void ip_conntrack_helper_put(struct ip_conntrack_helper *helper){	module_put(helper->me);}struct ip_conntrack_protocol *__ip_conntrack_proto_find(u_int8_t protocol){	return ip_ct_protos[protocol];}/* this is guaranteed to always return a valid protocol helper, since * it falls back to generic_protocol */struct ip_conntrack_protocol *ip_conntrack_proto_find_get(u_int8_t protocol){	struct ip_conntrack_protocol *p;	preempt_disable();	p = __ip_conntrack_proto_find(protocol);	if (p) {		if (!try_module_get(p->me))			p = &ip_conntrack_generic_protocol;	}	preempt_enable();		return p;}void ip_conntrack_proto_put(struct ip_conntrack_protocol *p){	module_put(p->me);}struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,					struct ip_conntrack_tuple *repl){	struct ip_conntrack *conntrack;	if (!ip_conntrack_hash_rnd_initted) {		get_random_bytes(&ip_conntrack_hash_rnd, 4);		ip_conntrack_hash_rnd_initted = 1;	}	if (ip_conntrack_max	    && atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {		unsigned int hash = hash_conntrack(orig);		/* Try dropping from this hash chain. */		if (!early_drop(&ip_conntrack_hash[hash])) {			if (net_ratelimit())				printk(KERN_WARNING				       "ip_conntrack: table full, dropping"				       " packet.\n");			return ERR_PTR(-ENOMEM);		}	}	conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);	if (!conntrack) {		DEBUGP("Can't allocate conntrack.\n");		return ERR_PTR(-ENOMEM);	}	memset(conntrack, 0, sizeof(*conntrack));	atomic_set(&conntrack->ct_general.use, 1);	conntrack->ct_general.destroy = destroy_conntrack;	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;	conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;	/* Don't set timer yet: wait for confirmation */	init_timer(&conntrack->timeout);	conntrack->timeout.data = (unsigned long)conntrack;	conntrack->timeout.function = death_by_timeout;	atomic_inc(&ip_conntrack_count);	return conntrack;}voidip_conntrack_free(struct ip_conntrack *conntrack){	atomic_dec(&ip_conntrack_count);	kmem_cache_free(ip_conntrack_cachep, conntrack);}/* Allocate a new conntrack: we return -ENOMEM if classification * failed due to stress.   Otherwise it really is unclassifiable */static struct ip_conntrack_tuple_hash *init_conntrack(struct ip_conntrack_tuple *tuple,	       struct ip_conntrack_protocol *protocol,	       struct sk_buff *skb){	struct ip_conntrack *conntrack;	struct ip_conntrack_tuple repl_tuple;	struct ip_conntrack_expect *exp;	if (!ip_ct_invert_tuple(&repl_tuple, tuple, protocol)) {		DEBUGP("Can't invert tuple.\n");		return NULL;	}	conntrack = ip_conntrack_alloc(tuple, &repl_tuple);	if (conntrack == NULL || IS_ERR(conntrack))		return (struct ip_conntrack_tuple_hash *)conntrack;	if (!protocol->new(conntrack, skb)) {		ip_conntrack_free(conntrack);		return NULL;	}	write_lock_bh(&ip_conntrack_lock);	exp = find_expectation(tuple);	if (exp) {		DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",			conntrack, exp);		/* Welcome, Mr. Bond.  We've been expecting you... */		__set_bit(IPS_EXPECTED_BIT, &conntrack->status);		conntrack->master = exp->master;#ifdef CONFIG_IP_NF_CONNTRACK_MARK		conntrack->mark = exp->master->mark;#endif#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \    defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)		/* this is ugly, but there is no other place where to put it */		conntrack->nat.masq_index = exp->master->nat.masq_index;#endif		nf_conntrack_get(&conntrack->master->ct_general);		CONNTRACK_STAT_INC(expect_new);	} else {		conntrack->helper = __ip_conntrack_helper_find(&repl_tuple);		CONNTRACK_STAT_INC(new);	}	/* Overload tuple linked list to put us in unconfirmed list. */	list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);	write_unlock_bh(&ip_conntrack_lock);	if (exp) {		if (exp->expectfn)			exp->expectfn(conntrack, exp);		ip_conntrack_expect_put(exp);	}	return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];}/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */static inline struct ip_conntrack *resolve_normal_ct(struct sk_buff *skb,		  struct ip_conntrack_protocol *proto,		  int *set_reply,		  unsigned int hooknum,		  enum ip_conntrack_info *ctinfo){	struct ip_conntrack_tuple tuple;	struct ip_conntrack_tuple_hash *h;	struct ip_conntrack *ct;	IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);	if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, 				&tuple,proto))		return NULL;	/* look for tuple match */	h = ip_conntrack_find_get(&tuple, NULL);	if (!h) {		h = init_conntrack(&tuple, proto, skb);		if (!h)			return NULL;		if (IS_ERR(h))			return (void *)h;	}	ct = tuplehash_to_ctrack(h);	/* It exists; we have (non-exclusive) reference. */	if (DIRECTION(h) == IP_CT_DIR_REPLY) {		*ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;		/* Please set reply bit if this packet OK */		*set_reply = 1;	} else {		/* Once we've had two way comms, always ESTABLISHED. */		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {			DEBUGP("ip_conntrack_in: normal packet for %p\n",			       ct);		        *ctinfo = IP_CT_ESTABLISHED;		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {			DEBUGP("ip_conntrack_in: related packet for %p\n",			       ct);			*ctinfo = IP_CT_RELATED;		} else {			DEBUGP("ip_conntrack_in: new packet for %p\n",			       ct);			*ctinfo = IP_CT_NEW;		}		*set_reply = 0;	}	skb->nfct = &ct->ct_general;	skb->nfctinfo = *ctinfo;	return ct;}/* Netfilter hook itself. */unsigned int ip_conntrack_in(unsigned int hooknum,			     struct sk_buff **pskb,			     const struct net_device *in,			     const struct net_device *out,			     int (*okfn)(struct sk_buff *)){	struct ip_conntrack *ct;	enum ip_conntrack_info ctinfo;	struct ip_conntrack_protocol *proto;	int set_reply = 0;	int ret;	/* Previously seen (loopback or untracked)?  Ignore. */	if ((*pskb)->nfct) {		CONNTRACK_STAT_INC(ignore);		return NF_ACCEPT;	}	/* Never happen */	if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) {		if (net_ratelimit()) {		printk(KERN_ERR "ip_conntrack_in: Frag of proto %u (hook=%u)\n",		       (*pskb)->nh.iph->protocol, hooknum);		}		return NF_DROP;	}/* Doesn't cover locally-generated broadcast, so not worth it. */#if 0	/* Ignore broadcast: no `connection'. */	if ((*pskb)->pkt_type == PACKET_BROADCAST) {		printk("Broadcast packet!\n");		return NF_ACCEPT;	} else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) 		   == htonl(0x000000FF)) {		printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",		       NIPQUAD((*pskb)->nh.iph->saddr),		       NIPQUAD((*pskb)->nh.iph->daddr),		       (*pskb)->sk, (*pskb)->pkt_type);	}#endif	proto = __ip_conntrack_proto_find((*pskb)->nh.iph->protocol);	/* It may be an special packet, error, unclean...	 * inverse of the return code tells to the netfilter	 * core what to do with the packet. */	if (proto->error != NULL 	    && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) {		CONNTRACK_STAT_INC(error);		CONNTRACK_STAT_INC(invalid);		return -ret;	}	if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo))) {		/* Not valid part of a connection */		CONNTRACK_STAT_INC(invalid);		return NF_ACCEPT;	}	if (IS_ERR(ct)) {		/* Too stressed to deal. */		CONNTRACK_STAT_INC(drop);		return NF_DROP;	}	IP_NF_ASSERT((*pskb)->nfct);	ret = proto->packet(ct, *pskb, ctinfo);	if (ret < 0) {		/* Invalid: inverse of the return code tells		 * the netfilter core what to do*/		nf_conntrack_put((*pskb)->nfct);		(*pskb)->nfct = NULL;		CONNTRACK_STAT_INC(invalid);		return -ret;	}	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))		ip_conntrack_event_cache(IPCT_STATUS, *pskb);	return ret;}int invert_tuplepr(struct ip_conntrack_tuple *inverse,		   const struct ip_conntrack_tuple *orig){	return ip_ct_invert_tuple(inverse, orig, 				  __ip_conntrack_proto_find(orig->dst.protonum));}/* Would two expected things clash? */static inline int expect_clash(const struct ip_conntrack_expect *a,			       const struct ip_conntrack_expect *b){	/* Part covered by intersection of masks must be unequal,           otherwise they clash */	struct ip_conntrack_tuple intersect_mask		= { { a->mask.src.ip & b->mask.src.ip,		      { a->mask.src.u.all & b->mask.src.u.all } },		    { a->mask.dst.ip & b->mask.dst.ip,		      { a->mask.dst.u.all & b->mask.dst.u.all },		      a->mask.dst.protonum & b->mask.dst.protonum } };	return ip_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);}static inline int expect_matches(const struct ip_conntrack_expect *a,				 const struct ip_conntrack_expect *b){	return a->master == b->master		&& ip_ct_tuple_equal(&a->tuple, &b->tuple)		&& ip_ct_tuple_equal(&a->mask, &b->mask);}/* Generally a bad idea to call this: could have matched already. */void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp){	struct ip_conntrack_expect *i;	write_lock_bh(&ip_conntrack_lock);	/* choose the the oldest expectation to evict */	list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) {		if (expect_matches(i, exp) && del_timer(&i->timeout)) {			ip_ct_unlink_expect(i);			write_unlock_bh(&ip_conntrack_lock);			ip_conntrack_expect_put(i);			return;		}	}	write_unlock_bh(&ip_conntrack_lock);}/* We don't increase the master conntrack refcount for non-fulfilled * conntracks. During the conntrack destruction, the expectations are  * always killed before the conntrack itself */struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me){	struct ip_conntrack_expect *new;	new = kmem_cache_alloc(ip_conntrack_expect_cachep, GFP_ATOMIC);	if (!new) {		DEBUGP("expect_related: OOM allocating expect\n");		return NULL;	}	new->master = me;	atomic_set(&new->use, 1);	return new;}void ip_conntrack_expect_put(struct ip_conntrack_expect *exp){	if (atomic_dec_and_test(&exp->use))		kmem_cache_free(ip_conntrack_expect_cachep, exp);}static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp){	atomic_inc(&exp->use);	exp->master->expecting++;	list_add(&exp->list, &ip_conntrack_expect_list);	init_timer(&exp->timeout);	exp->timeout.data = (unsigned long)exp;	exp->timeout.function = expectation_timed_out;	exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ;	add_timer(&exp->timeout);	exp->id = ++ip_conntrack_expect_next_id;	atomic_inc(&exp->use);	CONNTRACK_STAT_INC(expect_create);}/* Race with expectations being used means we could have none to find; OK. */static void evict_oldest_expect(struct ip_conntrack *master){	struct ip_conntrack_expect *i;	list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) {		if (i->master == master) {			if (del_timer(&i->timeout)) {				ip_ct_unlink_expect(i);				ip_conntrack_expect_put(i);			}			break;		}	}}static inline int refresh_timer(struct ip_conntrack_expect *i){	if (!del_timer(&i->timeout))		return 0;	i->timeout.expires = jiffies + i->master->helper->timeout*HZ;	add_timer(&i->timeout);	return 1;}int ip_conntrack_expect_related(struct ip_conntrack_expect *expect){	struct ip_conntrack_expect *i;	int ret;	DEBUGP("ip_conntrack_expect_related %p\n", related_to);	DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple);	DEBUGP("mask:  "); DUMP_TUPLE(&expect->mask);	write_lock_bh(&ip_conntrack_lock);	list_for_each_entry(i, &ip_conntrack_expect_list, list) {		if (expect_matches(i, expect)) {			/* Refresh timer: if it's dying, ignore.. */			if (refresh_timer(i)) {				ret = 0;				goto out;			}		} else if (expect_clash(i, expect)) {			ret = -EBUSY;			goto out;		}	}	/* Will be over limit? */	if (expect->master->helper->max_expected && 	    expect->master->expecting >= expect->master->helper->max_expected)		evict_oldest_expect(expect->master);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -