⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ip_conntrack_core.c

📁 该文件是rt_linux
💻 C
📖 第 1 页 / 共 3 页
字号:
		 enum ip_conntrack_info *ctinfo,		 unsigned int hooknum){	const struct iphdr *iph;	struct icmphdr *hdr;	struct ip_conntrack_tuple innertuple, origtuple;	struct iphdr *inner;	size_t datalen;	struct ip_conntrack_protocol *innerproto;	struct ip_conntrack_tuple_hash *h;	IP_NF_ASSERT(iph->protocol == IPPROTO_ICMP);	IP_NF_ASSERT(skb->nfct == NULL);	iph = skb->nh.iph;	hdr = (struct icmphdr *)((u_int32_t *)iph + iph->ihl);	inner = (struct iphdr *)(hdr + 1);	datalen = skb->len - iph->ihl*4 - sizeof(*hdr);	if (skb->len < iph->ihl * 4 + sizeof(*hdr) + sizeof(*iph)) {		DEBUGP("icmp_error_track: too short\n");		return NULL;	}	if (hdr->type != ICMP_DEST_UNREACH	    && hdr->type != ICMP_SOURCE_QUENCH	    && hdr->type != ICMP_TIME_EXCEEDED	    && hdr->type != ICMP_PARAMETERPROB	    && hdr->type != ICMP_REDIRECT)		return NULL;	/* Ignore ICMP's containing fragments (shouldn't happen) */	if (inner->frag_off & htons(IP_OFFSET)) {		DEBUGP("icmp_error_track: fragment of proto %u\n",		       inner->protocol);		return NULL;	}	/* Ignore it if the checksum's bogus. */	if (ip_compute_csum((unsigned char *)hdr, sizeof(*hdr) + datalen)) {		DEBUGP("icmp_error_track: bad csum\n");		return NULL;	}	innerproto = ip_ct_find_proto(inner->protocol);	/* Are they talking about one of our connections? */	if (inner->ihl * 4 + 8 > datalen	    || !get_tuple(inner, datalen, &origtuple, innerproto)) {		DEBUGP("icmp_error: ! get_tuple p=%u (%u*4+%u dlen=%u)\n",		       inner->protocol, inner->ihl, 8,		       datalen);		return NULL;	}	/* Ordinarily, we'd expect the inverted tupleproto, but it's	   been preserved inside the ICMP. */	if (!invert_tuple(&innertuple, &origtuple, innerproto)) {		DEBUGP("icmp_error_track: Can't invert tuple\n");		return NULL;	}	*ctinfo = IP_CT_RELATED;	h = ip_conntrack_find_get(&innertuple, NULL);	if (!h) {		/* Locally generated ICMPs will match inverted if they		   haven't been SNAT'ed yet */		/* FIXME: NAT code has to handle half-done double NAT --RR */		if (hooknum == NF_IP_LOCAL_OUT)			h = ip_conntrack_find_get(&origtuple, NULL);		if (!h) {			DEBUGP("icmp_error_track: no match\n");			return NULL;		}		/* Reverse direction from that found */		if (DIRECTION(h) != IP_CT_DIR_REPLY)			*ctinfo += IP_CT_IS_REPLY;	} else {		if (DIRECTION(h) == IP_CT_DIR_REPLY)			*ctinfo += IP_CT_IS_REPLY;	}	/* Update skb to refer to this connection */	skb->nfct = &h->ctrack->infos[*ctinfo];	return h->ctrack;}/* There's a small race here where we may free a just-assured   connection.  Too bad: we're in trouble anyway. */static inline int unreplied(const struct ip_conntrack_tuple_hash *i){	return !(i->ctrack->status & IPS_ASSURED);}static int early_drop(struct list_head *chain){	/* Traverse backwards: gives us oldest, which is roughly LRU */	struct ip_conntrack_tuple_hash *h;	int dropped = 0;	READ_LOCK(&ip_conntrack_lock);	h = LIST_FIND(chain, unreplied, struct ip_conntrack_tuple_hash *);	if (h)		atomic_inc(&h->ctrack->ct_general.use);	READ_UNLOCK(&ip_conntrack_lock);	if (!h)		return dropped;	if (del_timer(&h->ctrack->timeout)) {		death_by_timeout((unsigned long)h->ctrack);		dropped = 1;	}	ip_conntrack_put(h->ctrack);	return dropped;}static inline int helper_cmp(const struct ip_conntrack_helper *i,			     const struct ip_conntrack_tuple *rtuple){	return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);}struct ip_conntrack_helper *ip_ct_find_helper(const struct ip_conntrack_tuple *tuple){	return LIST_FIND(&helpers, helper_cmp,			 struct ip_conntrack_helper *,			 tuple);}/* Allocate a new conntrack: we return -ENOMEM if classification   failed due to stress.  Otherwise it really is unclassifiable. */static struct ip_conntrack_tuple_hash *init_conntrack(const struct ip_conntrack_tuple *tuple,	       struct ip_conntrack_protocol *protocol,	       struct sk_buff *skb){	struct ip_conntrack *conntrack;	struct ip_conntrack_tuple repl_tuple;	size_t hash, repl_hash;	struct ip_conntrack_expect *expected;	int i;	static unsigned int drop_next = 0;	hash = hash_conntrack(tuple);	if (ip_conntrack_max &&	    atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {		/* Try dropping from random chain, or else from the                   chain about to put into (in case they're trying to                   bomb one hash chain). */		unsigned int next = (drop_next++)%ip_conntrack_htable_size;		if (!early_drop(&ip_conntrack_hash[next])		    && !early_drop(&ip_conntrack_hash[hash])) {			if (net_ratelimit())				printk(KERN_WARNING				       "ip_conntrack: table full, dropping"				       " packet.\n");			return ERR_PTR(-ENOMEM);		}	}	if (!invert_tuple(&repl_tuple, tuple, protocol)) {		DEBUGP("Can't invert tuple.\n");		return NULL;	}	repl_hash = hash_conntrack(&repl_tuple);	conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);	if (!conntrack) {		DEBUGP("Can't allocate conntrack.\n");		return ERR_PTR(-ENOMEM);	}	memset(conntrack, 0, sizeof(*conntrack));	atomic_set(&conntrack->ct_general.use, 1);	conntrack->ct_general.destroy = destroy_conntrack;	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple;	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].ctrack = conntrack;	conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = repl_tuple;	conntrack->tuplehash[IP_CT_DIR_REPLY].ctrack = conntrack;	for (i=0; i < IP_CT_NUMBER; i++)		conntrack->infos[i].master = &conntrack->ct_general;	if (!protocol->new(conntrack, skb->nh.iph, skb->len)) {		kmem_cache_free(ip_conntrack_cachep, conntrack);		return NULL;	}	/* Don't set timer yet: wait for confirmation */	init_timer(&conntrack->timeout);	conntrack->timeout.data = (unsigned long)conntrack;	conntrack->timeout.function = death_by_timeout;	INIT_LIST_HEAD(&conntrack->sibling_list);	/* Mark clearly that it's not in the hash table. */	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list.next = NULL;	WRITE_LOCK(&ip_conntrack_lock);	/* Need finding and deleting of expected ONLY if we win race */	READ_LOCK(&ip_conntrack_expect_tuple_lock);	expected = LIST_FIND(&ip_conntrack_expect_list, expect_cmp,			     struct ip_conntrack_expect *, tuple);	READ_UNLOCK(&ip_conntrack_expect_tuple_lock);	/* Look up the conntrack helper for master connections only */	if (!expected)		conntrack->helper = ip_ct_find_helper(&repl_tuple);	/* If the expectation is dying, then this is a looser. */	if (expected	    && expected->expectant->helper->timeout	    && ! del_timer(&expected->timeout))		expected = NULL;	/* If master is not in hash table yet (ie. packet hasn't left	   this machine yet), how can other end know about expected?	   Hence these are not the droids you are looking for (if	   master ct never got confirmed, we'd hold a reference to it	   and weird things would happen to future packets). */	if (expected && is_confirmed(expected->expectant)) {		DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",			conntrack, expected);		/* Welcome, Mr. Bond.  We've been expecting you... */		IP_NF_ASSERT(master_ct(conntrack));		conntrack->status = IPS_EXPECTED;		conntrack->master = expected;		expected->sibling = conntrack;		LIST_DELETE(&ip_conntrack_expect_list, expected);		expected->expectant->expecting--;		nf_conntrack_get(&master_ct(conntrack)->infos[0]);	}	atomic_inc(&ip_conntrack_count);	WRITE_UNLOCK(&ip_conntrack_lock);	if (expected && expected->expectfn)		expected->expectfn(conntrack);	return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];}/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */static inline struct ip_conntrack *resolve_normal_ct(struct sk_buff *skb,		  struct ip_conntrack_protocol *proto,		  int *set_reply,		  unsigned int hooknum,		  enum ip_conntrack_info *ctinfo){	struct ip_conntrack_tuple tuple;	struct ip_conntrack_tuple_hash *h;	IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);	if (!get_tuple(skb->nh.iph, skb->len, &tuple, proto))		return NULL;	/* look for tuple match */	h = ip_conntrack_find_get(&tuple, NULL);	if (!h) {		h = init_conntrack(&tuple, proto, skb);		if (!h)			return NULL;		if (IS_ERR(h))			return (void *)h;	}	/* It exists; we have (non-exclusive) reference. */	if (DIRECTION(h) == IP_CT_DIR_REPLY) {		*ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;		/* Please set reply bit if this packet OK */		*set_reply = 1;	} else {		/* Once we've had two way comms, always ESTABLISHED. */		if (h->ctrack->status & IPS_SEEN_REPLY) {			DEBUGP("ip_conntrack_in: normal packet for %p\n",			       h->ctrack);		        *ctinfo = IP_CT_ESTABLISHED;		} else if (h->ctrack->status & IPS_EXPECTED) {			DEBUGP("ip_conntrack_in: related packet for %p\n",			       h->ctrack);			*ctinfo = IP_CT_RELATED;		} else {			DEBUGP("ip_conntrack_in: new packet for %p\n",			       h->ctrack);			*ctinfo = IP_CT_NEW;		}		*set_reply = 0;	}	skb->nfct = &h->ctrack->infos[*ctinfo];	return h->ctrack;}/* Netfilter hook itself. */unsigned int ip_conntrack_in(unsigned int hooknum,			     struct sk_buff **pskb,			     const struct net_device *in,			     const struct net_device *out,			     int (*okfn)(struct sk_buff *)){	struct ip_conntrack *ct;	enum ip_conntrack_info ctinfo;	struct ip_conntrack_protocol *proto;	int set_reply;	int ret;	/* FIXME: Do this right please. --RR */	(*pskb)->nfcache |= NFC_UNKNOWN;/* Doesn't cover locally-generated broadcast, so not worth it. */#if 0	/* Ignore broadcast: no `connection'. */	if ((*pskb)->pkt_type == PACKET_BROADCAST) {		printk("Broadcast packet!\n");		return NF_ACCEPT;	} else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) 		   == htonl(0x000000FF)) {		printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",		       NIPQUAD((*pskb)->nh.iph->saddr),		       NIPQUAD((*pskb)->nh.iph->daddr),		       (*pskb)->sk, (*pskb)->pkt_type);	}#endif	/* Previously seen (loopback)?  Ignore.  Do this before           fragment check. */	if ((*pskb)->nfct)		return NF_ACCEPT;	/* Gather fragments. */	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {		*pskb = ip_ct_gather_frags(*pskb);		if (!*pskb)			return NF_STOLEN;	}	proto = ip_ct_find_proto((*pskb)->nh.iph->protocol);	/* It may be an icmp error... */	if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP 	    && icmp_error_track(*pskb, &ctinfo, hooknum))		return NF_ACCEPT;	if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo)))		/* Not valid part of a connection */		return NF_ACCEPT;	if (IS_ERR(ct))		/* Too stressed to deal. */		return NF_DROP;	IP_NF_ASSERT((*pskb)->nfct);	ret = proto->packet(ct, (*pskb)->nh.iph, (*pskb)->len, ctinfo);	if (ret == -1) {		/* Invalid */		nf_conntrack_put((*pskb)->nfct);		(*pskb)->nfct = NULL;		return NF_ACCEPT;	}	if (ret != NF_DROP && ct->helper) {		ret = ct->helper->help((*pskb)->nh.iph, (*pskb)->len,				       ct, ctinfo);		if (ret == -1) {			/* Invalid */			nf_conntrack_put((*pskb)->nfct);			(*pskb)->nfct = NULL;			return NF_ACCEPT;		}	}	if (set_reply)		set_bit(IPS_SEEN_REPLY_BIT, &ct->status);	return ret;}int invert_tuplepr(struct ip_conntrack_tuple *inverse,		   const struct ip_conntrack_tuple *orig){	return invert_tuple(inverse, orig, ip_ct_find_proto(orig->dst.protonum));}static inline int resent_expect(const struct ip_conntrack_expect *i,			        const struct ip_conntrack_tuple *tuple,			        const struct ip_conntrack_tuple *mask){	DEBUGP("resent_expect\n");	DEBUGP("   tuple:   "); DUMP_TUPLE(&i->tuple);	DEBUGP("ct_tuple:   "); DUMP_TUPLE(&i->ct_tuple);	DEBUGP("test tuple: "); DUMP_TUPLE(tuple);	return (((i->ct_tuple.dst.protonum == 0 && ip_ct_tuple_equal(&i->tuple, tuple))	         || (i->ct_tuple.dst.protonum && ip_ct_tuple_equal(&i->ct_tuple, tuple)))		&& ip_ct_tuple_equal(&i->mask, mask));}/* Would two expected things clash? */static inline int expect_clash(const struct ip_conntrack_expect *i,			       const struct ip_conntrack_tuple *tuple,			       const struct ip_conntrack_tuple *mask){	/* Part covered by intersection of masks must be unequal,           otherwise they clash */	struct ip_conntrack_tuple intersect_mask		= { { i->mask.src.ip & mask->src.ip,		      { i->mask.src.u.all & mask->src.u.all } },		    { i->mask.dst.ip & mask->dst.ip,		      { i->mask.dst.u.all & mask->dst.u.all },		      i->mask.dst.protonum & mask->dst.protonum } };	return ip_ct_tuple_mask_cmp(&i->tuple, tuple, &intersect_mask);}inline void ip_conntrack_unexpect_related(struct ip_conntrack_expect *expect){	WRITE_LOCK(&ip_conntrack_lock);	unexpect_related(expect);	WRITE_UNLOCK(&ip_conntrack_lock);}	static void expectation_timed_out(unsigned long ul_expect){	struct ip_conntrack_expect *expect = (void *) ul_expect;	DEBUGP("expectation %p timed out\n", expect);		WRITE_LOCK(&ip_conntrack_lock);	__unexpect_related(expect);	WRITE_UNLOCK(&ip_conntrack_lock);}/* Add a related connection. */int ip_conntrack_expect_related(struct ip_conntrack *related_to,				struct ip_conntrack_expect *expect){	struct ip_conntrack_expect *old, *new;	int ret = 0;	WRITE_LOCK(&ip_conntrack_lock);	/* Because of the write lock, no reader can walk the lists,	 * so there is no need to use the tuple lock too */	DEBUGP("ip_conntrack_expect_related %p\n", related_to);	DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple);	DEBUGP("mask:  "); DUMP_TUPLE(&expect->mask);	old = LIST_FIND(&ip_conntrack_expect_list, resent_expect,		        struct ip_conntrack_expect *, &expect->tuple, 			&expect->mask);	if (old) {		/* Helper private data may contain offsets but no pointers		   pointing into the payload - otherwise we should have to copy 		   the data filled out by the helper over the old one */		DEBUGP("expect_related: resent packet\n");		if (related_to->helper->timeout) {			if (!del_timer(&old->timeout)) {				/* expectation is dying. Fall through */				old = NULL;			} else {				old->timeout.expires = jiffies + 					related_to->helper->timeout * HZ;				add_timer(&old->timeout);			}		}		if (old) {			WRITE_UNLOCK(&ip_conntrack_lock);			return -EEXIST;		}	} else if (related_to->helper->max_expected && 		   related_to->expecting >= related_to->helper->max_expected) {		struct list_head *cur_item;		/* old == NULL */	    	if (net_ratelimit())		    	printk(KERN_WARNING 		    	       "ip_conntrack: max number of expected "			       "connections %i of %s reached for "			       "%u.%u.%u.%u->%u.%u.%u.%u%s\n",		    	       related_to->helper->max_expected, 		    	       related_to->helper->name,		    	       NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip),		    	       NIPQUAD(related_to->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip),		    	       related_to->helper->flags & IP_CT_HELPER_F_REUSE_EXPECT ?		    	       ", reusing" : "");		if (!(related_to->helper->flags & 		      IP_CT_HELPER_F_REUSE_EXPECT)) {			WRITE_UNLOCK(&ip_conntrack_lock);			return -EPERM;		}		/* choose the the oldest expectation to evict */		list_for_each(cur_item, &related_to->sibling_list) { 			struct ip_conntrack_expect *cur;			cur = list_entry(cur_item, 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -