⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ip_conntrack_core.c

📁 上传linux-jx2410的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	innerproto = find_proto(inner->protocol);	/* Are they talking about one of our connections? */	if (inner->ihl * 4 + 8 > datalen	    || !get_tuple(inner, datalen, &origtuple, innerproto)) {		DEBUGP("icmp_error: ! get_tuple p=%u (%u*4+%u dlen=%u)\n",		       inner->protocol, inner->ihl, 8,		       datalen);		return NULL;	}	/* Ordinarily, we'd expect the inverted tupleproto, but it's	   been preserved inside the ICMP. */	if (!invert_tuple(&innertuple, &origtuple, innerproto)) {		DEBUGP("icmp_error_track: Can't invert tuple\n");		return NULL;	}	*ctinfo = IP_CT_RELATED;	h = ip_conntrack_find_get(&innertuple, NULL);	if (!h) {		/* Locally generated ICMPs will match inverted if they		   haven't been SNAT'ed yet */		/* FIXME: NAT code has to handle half-done double NAT --RR */		if (hooknum == NF_IP_LOCAL_OUT)			h = ip_conntrack_find_get(&origtuple, NULL);		if (!h) {			DEBUGP("icmp_error_track: no match\n");			return NULL;		}		/* Reverse direction from that found */		if (DIRECTION(h) != IP_CT_DIR_REPLY)			*ctinfo += IP_CT_IS_REPLY;	} else {		if (DIRECTION(h) == IP_CT_DIR_REPLY)			*ctinfo += IP_CT_IS_REPLY;	}	/* Update skb to refer to this connection */	skb->nfct = &h->ctrack->infos[*ctinfo];	return h->ctrack;}/* There's a small race here where we may free a just-assured   connection.  Too bad: we're in trouble anyway. */static inline int unreplied(const struct ip_conntrack_tuple_hash *i){	return !(i->ctrack->status & IPS_ASSURED);}static int early_drop(struct list_head *chain){	/* Traverse backwards: gives us oldest, which is roughly LRU */	struct ip_conntrack_tuple_hash *h;	int dropped = 0;	READ_LOCK(&ip_conntrack_lock);	h = LIST_FIND(chain, unreplied, struct ip_conntrack_tuple_hash *);	if (h)		atomic_inc(&h->ctrack->ct_general.use);	READ_UNLOCK(&ip_conntrack_lock);	if (!h)		return dropped;	if (del_timer(&h->ctrack->timeout)) {		death_by_timeout((unsigned long)h->ctrack);		dropped = 1;	}	ip_conntrack_put(h->ctrack);	return dropped;}static inline int helper_cmp(const struct ip_conntrack_helper *i,			     const struct ip_conntrack_tuple *rtuple){	return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);}/* Compare parts depending on mask. */static inline int expect_cmp(const struct ip_conntrack_expect *i,			     const struct ip_conntrack_tuple *tuple){	return ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask);}/* Allocate a new conntrack: we return -ENOMEM if classification   failed due to stress.  Otherwise it really is unclassifiable. */static struct ip_conntrack_tuple_hash *init_conntrack(const struct ip_conntrack_tuple *tuple,	       struct ip_conntrack_protocol *protocol,	       struct sk_buff *skb){	struct ip_conntrack *conntrack;	struct ip_conntrack_tuple repl_tuple;	size_t hash, repl_hash;	struct ip_conntrack_expect *expected;	int i;	static unsigned int drop_next = 0;	hash = hash_conntrack(tuple);	if (ip_conntrack_max &&	    atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {		/* Try dropping from random chain, or else from the                   chain about to put into (in case they're trying to                   bomb one hash chain). */		if (drop_next >= ip_conntrack_htable_size)			drop_next = 0;		if (!early_drop(&ip_conntrack_hash[drop_next++])		    && !early_drop(&ip_conntrack_hash[hash])) {			if (net_ratelimit())				printk(KERN_WARNING				       "ip_conntrack: table full, dropping"				       " packet.\n");			return ERR_PTR(-ENOMEM);		}	}	if (!invert_tuple(&repl_tuple, tuple, protocol)) {		DEBUGP("Can't invert tuple.\n");		return NULL;	}	repl_hash = hash_conntrack(&repl_tuple);	conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);	if (!conntrack) {		DEBUGP("Can't allocate conntrack.\n");		return ERR_PTR(-ENOMEM);	}	memset(conntrack, 0, sizeof(struct ip_conntrack));	atomic_set(&conntrack->ct_general.use, 1);	conntrack->ct_general.destroy = destroy_conntrack;	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *tuple;	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].ctrack = conntrack;	conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = repl_tuple;	conntrack->tuplehash[IP_CT_DIR_REPLY].ctrack = conntrack;	for (i=0; i < IP_CT_NUMBER; i++)		conntrack->infos[i].master = &conntrack->ct_general;	if (!protocol->new(conntrack, skb->nh.iph, skb->len)) {		kmem_cache_free(ip_conntrack_cachep, conntrack);		return NULL;	}	/* Don't set timer yet: wait for confirmation */	init_timer(&conntrack->timeout);	conntrack->timeout.data = (unsigned long)conntrack;	conntrack->timeout.function = death_by_timeout;	/* Mark clearly that it's not in the hash table. */	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list.next = NULL;	/* Write lock required for deletion of expected.  Without           this, a read-lock would do. */	WRITE_LOCK(&ip_conntrack_lock);	conntrack->helper = LIST_FIND(&helpers, helper_cmp,				      struct ip_conntrack_helper *,				      &repl_tuple);	/* Need finding and deleting of expected ONLY if we win race */	expected = LIST_FIND(&expect_list, expect_cmp,			     struct ip_conntrack_expect *, tuple);	/* If master is not in hash table yet (ie. packet hasn't left	   this machine yet), how can other end know about expected?	   Hence these are not the droids you are looking for (if	   master ct never got confirmed, we'd hold a reference to it	   and weird things would happen to future packets). */	if (expected && is_confirmed(expected->expectant)) {		/* Welcome, Mr. Bond.  We've been expecting you... */		conntrack->status = IPS_EXPECTED;		conntrack->master.master = &expected->expectant->ct_general;		IP_NF_ASSERT(conntrack->master.master);		LIST_DELETE(&expect_list, expected);		expected->expectant = NULL;		nf_conntrack_get(&conntrack->master);	}	atomic_inc(&ip_conntrack_count);	WRITE_UNLOCK(&ip_conntrack_lock);	if (expected && expected->expectfn)		expected->expectfn(conntrack);	return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];}/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */static inline struct ip_conntrack *resolve_normal_ct(struct sk_buff *skb,		  struct ip_conntrack_protocol *proto,		  int *set_reply,		  unsigned int hooknum,		  enum ip_conntrack_info *ctinfo){	struct ip_conntrack_tuple tuple;	struct ip_conntrack_tuple_hash *h;	IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);	if (!get_tuple(skb->nh.iph, skb->len, &tuple, proto))		return NULL;	/* look for tuple match */	h = ip_conntrack_find_get(&tuple, NULL);	if (!h) {		h = init_conntrack(&tuple, proto, skb);		if (!h)			return NULL;		if (IS_ERR(h))			return (void *)h;	}	/* It exists; we have (non-exclusive) reference. */	if (DIRECTION(h) == IP_CT_DIR_REPLY) {		*ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;		/* Please set reply bit if this packet OK */		*set_reply = 1;	} else {		/* Once we've had two way comms, always ESTABLISHED. */		if (h->ctrack->status & IPS_SEEN_REPLY) {			DEBUGP("ip_conntrack_in: normal packet for %p\n",			       h->ctrack);		        *ctinfo = IP_CT_ESTABLISHED;		} else if (h->ctrack->status & IPS_EXPECTED) {			DEBUGP("ip_conntrack_in: related packet for %p\n",			       h->ctrack);			*ctinfo = IP_CT_RELATED;		} else {			DEBUGP("ip_conntrack_in: new packet for %p\n",			       h->ctrack);			*ctinfo = IP_CT_NEW;		}		*set_reply = 0;	}	skb->nfct = &h->ctrack->infos[*ctinfo];	return h->ctrack;}/* Netfilter hook itself. */unsigned int ip_conntrack_in(unsigned int hooknum,			     struct sk_buff **pskb,			     const struct net_device *in,			     const struct net_device *out,			     int (*okfn)(struct sk_buff *)){	struct ip_conntrack *ct;	enum ip_conntrack_info ctinfo;	struct ip_conntrack_protocol *proto;	int set_reply;	int ret;	/* FIXME: Do this right please. --RR */	(*pskb)->nfcache |= NFC_UNKNOWN;/* Doesn't cover locally-generated broadcast, so not worth it. */#if 0	/* Ignore broadcast: no `connection'. */	if ((*pskb)->pkt_type == PACKET_BROADCAST) {		printk("Broadcast packet!\n");		return NF_ACCEPT;	} else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) 		   == htonl(0x000000FF)) {		printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",		       NIPQUAD((*pskb)->nh.iph->saddr),		       NIPQUAD((*pskb)->nh.iph->daddr),		       (*pskb)->sk, (*pskb)->pkt_type);	}#endif	/* Previously seen (loopback)?  Ignore.  Do this before           fragment check. */	if ((*pskb)->nfct)		return NF_ACCEPT;	/* Gather fragments. */	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {		*pskb = ip_ct_gather_frags(*pskb);		if (!*pskb)			return NF_STOLEN;	}	proto = find_proto((*pskb)->nh.iph->protocol);	/* It may be an icmp error... */	if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP 	    && icmp_error_track(*pskb, &ctinfo, hooknum))		return NF_ACCEPT;	if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo)))		/* Not valid part of a connection */		return NF_ACCEPT;	if (IS_ERR(ct))		/* Too stressed to deal. */		return NF_DROP;	IP_NF_ASSERT((*pskb)->nfct);	ret = proto->packet(ct, (*pskb)->nh.iph, (*pskb)->len, ctinfo);	if (ret == -1) {		/* Invalid */		nf_conntrack_put((*pskb)->nfct);		(*pskb)->nfct = NULL;		return NF_ACCEPT;	}	if (ret != NF_DROP && ct->helper) {		ret = ct->helper->help((*pskb)->nh.iph, (*pskb)->len,				       ct, ctinfo);		if (ret == -1) {			/* Invalid */			nf_conntrack_put((*pskb)->nfct);			(*pskb)->nfct = NULL;			return NF_ACCEPT;		}	}	if (set_reply)		set_bit(IPS_SEEN_REPLY_BIT, &ct->status);	return ret;}int invert_tuplepr(struct ip_conntrack_tuple *inverse,		   const struct ip_conntrack_tuple *orig){	return invert_tuple(inverse, orig, find_proto(orig->dst.protonum));}static void unexpect_related(struct ip_conntrack *related_to){	MUST_BE_WRITE_LOCKED(&ip_conntrack_lock);	list_del(&related_to->expected.list);	related_to->expected.expectant = NULL;}/* Would two expected things clash? */static inline int expect_clash(const struct ip_conntrack_expect *i,			       const struct ip_conntrack_expect *new){	/* Part covered by intersection of masks must be unequal,           otherwise they clash */	struct ip_conntrack_tuple intersect_mask		= { { i->mask.src.ip & new->mask.src.ip,		      { i->mask.src.u.all & new->mask.src.u.all } },		    { i->mask.dst.ip & new->mask.dst.ip,		      { i->mask.dst.u.all & new->mask.dst.u.all },		      i->mask.dst.protonum & new->mask.dst.protonum } };	return ip_ct_tuple_mask_cmp(&i->tuple, &new->tuple, &intersect_mask);}/* Add a related connection. */int ip_conntrack_expect_related(struct ip_conntrack *related_to,				const struct ip_conntrack_tuple *tuple,				const struct ip_conntrack_tuple *mask,				int (*expectfn)(struct ip_conntrack *)){	WRITE_LOCK(&ip_conntrack_lock);	if (related_to->expected.expectant)		unexpect_related(related_to);	related_to->expected.tuple = *tuple;	related_to->expected.mask = *mask;	related_to->expected.expectfn = expectfn;	if (LIST_FIND(&expect_list, expect_clash,		      struct ip_conntrack_expect *, &related_to->expected)) {		WRITE_UNLOCK(&ip_conntrack_lock);		return -EBUSY;	}	list_prepend(&expect_list, &related_to->expected);	related_to->expected.expectant = related_to;	WRITE_UNLOCK(&ip_conntrack_lock);	return 0;}void ip_conntrack_unexpect_related(struct ip_conntrack *related_to){	WRITE_LOCK(&ip_conntrack_lock);	unexpect_related(related_to);	WRITE_UNLOCK(&ip_conntrack_lock);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -