⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 route.c

📁 GNU Hurd 源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
work_done:	expire += ip_rt_gc_min_interval;	if (expire > ip_rt_gc_timeout ||	    atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)		expire = ip_rt_gc_timeout;#if RT_CACHE_DEBUG >= 2	printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire, atomic_read(&ipv4_dst_ops.entries), goal, rover);#endif	return 0;}static int rt_intern_hash(unsigned hash, struct rtable * rt, struct rtable ** rp){	struct rtable	*rth, **rthp;	unsigned long	now = jiffies;	int attempts = !in_interrupt();restart:	start_bh_atomic();	rthp = &rt_hash_table[hash];	while ((rth = *rthp) != NULL) {		if (memcmp(&rth->key, &rt->key, sizeof(rt->key)) == 0) {			/* Put it first */			*rthp = rth->u.rt_next;			rth->u.rt_next = rt_hash_table[hash];			rt_hash_table[hash] = rth;			atomic_inc(&rth->u.dst.refcnt);			atomic_inc(&rth->u.dst.use);			rth->u.dst.lastuse = now;			end_bh_atomic();			rt_drop(rt);			*rp = rth;			return 0;		}		rthp = &rth->u.rt_next;	}	/* Try to bind route to arp only if it is output	   route or unicast forwarding path.	 */	if (rt->rt_type == RTN_UNICAST || rt->key.iif == 0) {		if (!arp_bind_neighbour(&rt->u.dst)) {			end_bh_atomic();			/* Neighbour tables are full and nothing			   can be released. Try to shrink route cache,			   it is most likely it holds some neighbour records.			 */			if (attempts-- > 0) {				int saved_elasticity = ip_rt_gc_elasticity;				int saved_int = ip_rt_gc_min_interval;				ip_rt_gc_elasticity = 1;				ip_rt_gc_min_interval = 0;				rt_garbage_collect();				ip_rt_gc_min_interval = saved_int;				ip_rt_gc_elasticity = saved_elasticity;				goto restart;			}			rt_drop(rt);			if (net_ratelimit())				printk("neighbour table overflow\n");			return -ENOBUFS;		}	}	rt->u.rt_next = rt_hash_table[hash];#if RT_CACHE_DEBUG >= 2	if (rt->u.rt_next) {		struct rtable * trt;		printk("rt_cache @%02x: %08x", hash, rt->rt_dst);		for (trt=rt->u.rt_next; trt; trt=trt->u.rt_next)			printk(" . %08x", trt->rt_dst);		printk("\n");	}#endif	rt_hash_table[hash] = rt;	end_bh_atomic();	*rp = rt;	return 0;}static void rt_del(unsigned hash, struct rtable *rt){	struct rtable **rthp;	start_bh_atomic();	ip_rt_put(rt);	for (rthp = &rt_hash_table[hash]; *rthp; rthp = &(*rthp)->u.rt_next) {		if (*rthp == rt) {			*rthp = rt->u.rt_next;			rt_free(rt);			break;		}	}	end_bh_atomic();}void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,		    u32 saddr, u8 tos, struct device *dev){	int i, k;	struct in_device *in_dev = dev->ip_ptr;	struct rtable *rth, **rthp;	u32  skeys[2] = { saddr, 0 };	int  ikeys[2] = { dev->ifindex, 0 };	tos &= IPTOS_TOS_MASK;	if (!in_dev)		return;	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)	    || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))		goto reject_redirect;	if (!IN_DEV_SHARED_MEDIA(in_dev)) {		if (!inet_addr_onlink(in_dev, new_gw, old_gw))			goto reject_redirect;		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))			goto reject_redirect;	} else {		if (inet_addr_type(new_gw) != RTN_UNICAST)			goto reject_redirect;	}	for (i=0; i<2; i++) {		for (k=0; k<2; k++) {			unsigned hash = rt_hash_code(daddr, skeys[i]^(ikeys[k]<<5), tos);			rthp=&rt_hash_table[hash];			while ( (rth = *rthp) != NULL) {				struct rtable *rt;				if (rth->key.dst != daddr ||				    rth->key.src != skeys[i] ||				    rth->key.tos != tos ||				    rth->key.oif != ikeys[k] ||				    rth->key.iif != 0) {					rthp = &rth->u.rt_next;					continue;				}				if (rth->rt_dst != daddr ||				    rth->rt_src != saddr ||				    rth->u.dst.error ||				    rth->rt_gateway != old_gw ||				    rth->u.dst.dev != dev)					break;				dst_clone(&rth->u.dst);				rt = dst_alloc(sizeof(struct rtable), &ipv4_dst_ops);				if (rt == NULL) {					ip_rt_put(rth);					return;				}				/*				 * Copy all the information.				 */				*rt = *rth;				atomic_set(&rt->u.dst.refcnt, 1);				atomic_set(&rt->u.dst.use, 1);				rt->u.dst.lastuse = jiffies;				rt->u.dst.neighbour = NULL;				rt->u.dst.hh = NULL;				rt->u.dst.obsolete = 0;				rt->rt_flags |= RTCF_REDIRECTED;				/* Gateway is different ... */				rt->rt_gateway = new_gw;				/* Redirect received -> path was valid */				dst_confirm(&rth->u.dst);				if (!arp_bind_neighbour(&rt->u.dst) ||				    !(rt->u.dst.neighbour->nud_state&NUD_VALID)) {					if (rt->u.dst.neighbour)						neigh_event_send(rt->u.dst.neighbour, NULL);					ip_rt_put(rth);					rt_drop(rt);					break;				}				rt_del(hash, rth);				if (!rt_intern_hash(hash, rt, &rt))					ip_rt_put(rt);				break;			}		}	}	return;reject_redirect:#ifdef CONFIG_IP_ROUTE_VERBOSE	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())		printk(KERN_INFO "Redirect from %X/%s to %X ignored."		       "Path = %X -> %X, tos %02x\n",		       ntohl(old_gw), dev->name, ntohl(new_gw),		       ntohl(saddr), ntohl(daddr), tos);#else	; /* Do nothing.  */#endif}static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst){	struct rtable *rt = (struct rtable*)dst;	if (rt != NULL) {		if (dst->obsolete) {			ip_rt_put(rt);			return NULL;		}		if ((rt->rt_flags&RTCF_REDIRECTED) || rt->u.dst.expires) {			unsigned hash = rt_hash_code(rt->key.dst, rt->key.src^(rt->key.oif<<5), rt->key.tos);#if RT_CACHE_DEBUG >= 1			printk(KERN_DEBUG "ip_rt_advice: redirect to %d.%d.%d.%d/%02x dropped\n", NIPQUAD(rt->rt_dst), rt->key.tos);#endif			rt_del(hash, rt);			return NULL;		}	}	return dst;}/* * Algorithm: *	1. The first ip_rt_redirect_number redirects are sent *	   with exponential backoff, then we stop sending them at all, *	   assuming that the host ignores our redirects. *	2. If we did not see packets requiring redirects *	   during ip_rt_redirect_silence, we assume that the host *	   forgot redirected route and start to send redirects again. * * This algorithm is much cheaper and more intelligent than dumb load limiting * in icmp.c. * * NOTE. Do not forget to inhibit load limiting for redirects (redundant) * and "frag. need" (breaks PMTU discovery) in icmp.c. */void ip_rt_send_redirect(struct sk_buff *skb){	struct rtable *rt = (struct rtable*)skb->dst;	struct in_device *in_dev = (struct in_device*)rt->u.dst.dev->ip_ptr;	if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev))		return;	/* No redirected packets during ip_rt_redirect_silence;	 * reset the algorithm.	 */	if (jiffies - rt->u.dst.rate_last > ip_rt_redirect_silence)		rt->u.dst.rate_tokens = 0;	/* Too many ignored redirects; do not send anything	 * set u.dst.rate_last to the last seen redirected packet.	 */	if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {		rt->u.dst.rate_last = jiffies;		return;	}	/* Check for load limit; set rate_last to the latest sent	 * redirect.	 */	if (jiffies - rt->u.dst.rate_last > (ip_rt_redirect_load<<rt->u.dst.rate_tokens)) {		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);		rt->u.dst.rate_last = jiffies;		++rt->u.dst.rate_tokens;#ifdef CONFIG_IP_ROUTE_VERBOSE		if (IN_DEV_LOG_MARTIANS(in_dev) &&		    rt->u.dst.rate_tokens == ip_rt_redirect_number && net_ratelimit())			printk(KERN_WARNING "host %08x/if%d ignores redirects for %08x to %08x.\n",			       rt->rt_src, rt->rt_iif, rt->rt_dst, rt->rt_gateway);#endif	}}static int ip_error(struct sk_buff *skb){	struct rtable *rt = (struct rtable*)skb->dst;	unsigned long now;	int code;	switch (rt->u.dst.error) {	case EINVAL:	default:		kfree_skb(skb);		return 0;	case EHOSTUNREACH:		code = ICMP_HOST_UNREACH;		break;	case ENETUNREACH:		code = ICMP_NET_UNREACH;		break;	case EACCES:		code = ICMP_PKT_FILTERED;		break;	}	now = jiffies;	if ((rt->u.dst.rate_tokens += (now - rt->u.dst.rate_last)) > ip_rt_error_burst)		rt->u.dst.rate_tokens = ip_rt_error_burst;	rt->u.dst.rate_last = now;	if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {		rt->u.dst.rate_tokens -= ip_rt_error_cost;		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);	}	kfree_skb(skb);	return 0;} /* *	The last two values are not from the RFC but *	are needed for AMPRnet AX.25 paths. */static unsigned short mtu_plateau[] ={32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };static __inline__ unsigned short guess_mtu(unsigned short old_mtu){	int i;		for (i = 0; i < sizeof(mtu_plateau)/sizeof(mtu_plateau[0]); i++)		if (old_mtu > mtu_plateau[i])			return mtu_plateau[i];	return 68;}unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu){	int i;	unsigned short old_mtu = ntohs(iph->tot_len);	struct rtable *rth;	u32  skeys[2] = { iph->saddr, 0, };	u32  daddr = iph->daddr;	u8   tos = iph->tos & IPTOS_TOS_MASK;	unsigned short est_mtu = 0;	if (ipv4_config.no_pmtu_disc)		return 0;	for (i=0; i<2; i++) {		unsigned hash = rt_hash_code(daddr, skeys[i], tos);		for (rth = rt_hash_table[hash]; rth; rth = rth->u.rt_next) {			if (rth->key.dst == daddr &&			    rth->key.src == skeys[i] &&			    rth->rt_dst == daddr &&			    rth->rt_src == iph->saddr &&			    rth->key.tos == tos &&			    rth->key.iif == 0 &&			    !(rth->u.dst.mxlock&(1<<RTAX_MTU))) {				unsigned short mtu = new_mtu;				if (new_mtu < 68 || new_mtu >= old_mtu) {					/* BSD 4.2 compatibility hack :-( */					if (mtu == 0 && old_mtu >= rth->u.dst.pmtu &&					    old_mtu >= 68 + (iph->ihl<<2))						old_mtu -= iph->ihl<<2;					mtu = guess_mtu(old_mtu);				}				if (mtu <= rth->u.dst.pmtu) {					if (mtu < rth->u.dst.pmtu) { 						dst_confirm(&rth->u.dst);						rth->u.dst.pmtu = mtu;						dst_set_expires(&rth->u.dst, ip_rt_mtu_expires);					}					est_mtu = mtu;				}			}		}	}	return est_mtu ? : new_mtu;}void ip_rt_update_pmtu(struct dst_entry *dst, unsigned mtu){	if (dst->pmtu > mtu && mtu >= 68 &&	    !(dst->mxlock&(1<<RTAX_MTU))) {		dst->pmtu = mtu;		dst_set_expires(dst, ip_rt_mtu_expires);	}}static struct dst_entry * ipv4_dst_check(struct dst_entry * dst, u32 cookie){	dst_release(dst);	return NULL;}static struct dst_entry * ipv4_dst_reroute(struct dst_entry * dst,					   struct sk_buff *skb){	return NULL;}static void ipv4_link_failure(struct sk_buff *skb){	struct rtable *rt;	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);	rt = (struct rtable *) skb->dst;	if (rt)		dst_set_expires(&rt->u.dst, 0);}static int ip_rt_bug(struct sk_buff *skb){	printk(KERN_DEBUG "ip_rt_bug: %08x -> %08x, %s\n", skb->nh.iph->saddr,	       skb->nh.iph->daddr, skb->dev ? skb->dev->name : "?");	kfree_skb(skb);	return 0;}/*   We do not cache source address of outgoing interface,   because it is used only by IP RR, TS and SRR options,   so that it out of fast path.   BTW remember: "addr" is allowed to be not aligned   in IP options! */void ip_rt_get_source(u8 *addr, struct rtable *rt){	u32 src;	struct fib_result res;	if (rt->key.iif == 0)		src = rt->rt_src;	else if (fib_lookup(&rt->key, &res) == 0 && res.type != RTN_NAT)		src = FIB_RES_PREFSRC(res);	else		src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway, RT_SCOPE_UNIVERSE);	memcpy(addr, &src, 4);}#ifdef CONFIG_NET_CLS_ROUTEstatic void set_class_tag(struct rtable *rt, u32 tag){	if (!(rt->u.dst.tclassid&0xFFFF))		rt->u.dst.tclassid |= tag&0xFFFF;	if (!(rt->u.dst.tclassid&0xFFFF0000))		rt->u.dst.tclassid |= tag&0xFFFF0000;}#endifstatic void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag){	struct fib_info *fi = res->fi;	if (fi) {		if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)			rt->rt_gateway = FIB_RES_GW(*res);		rt->u.dst.mxlock = fi->fib_metrics[RTAX_LOCK-1];		rt->u.dst.pmtu = fi->fib_mtu;		if (fi->fib_mtu == 0) {			rt->u.dst.pmtu = rt->u.dst.dev->mtu;			if (rt->u.dst.pmtu > IP_MAX_MTU)				rt->u.dst.pmtu = IP_MAX_MTU;			if (rt->u.dst.pmtu < 68)				rt->u.dst.pmtu = 68;			if (rt->u.dst.mxlock&(1<<RTAX_MTU) &&			    rt->rt_gateway != rt->rt_dst &&			    rt->u.dst.pmtu > 576)				rt->u.dst.pmtu = 576;		}		rt->u.dst.window= fi->fib_window ? : 0;		rt->u.dst.rtt	= fi->fib_rtt ? : TCP_TIMEOUT_INIT;#ifdef CONFIG_NET_CLS_ROUTE		rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;#endif	} else {		rt->u.dst.pmtu	= rt->u.dst.dev->mtu;		if (rt->u.dst.pmtu > IP_MAX_MTU)			rt->u.dst.pmtu = IP_MAX_MTU;		if (rt->u.dst.pmtu < 68)			rt->u.dst.pmtu = 68;		rt->u.dst.window= 0;		rt->u.dst.rtt	= TCP_TIMEOUT_INIT;	}#ifdef CONFIG_NET_CLS_ROUTE#ifdef CONFIG_IP_MULTIPLE_TABLES	set_class_tag(rt, fib_rules_tclass(res));#endif	set_class_tag(rt, itag);#endif        rt->rt_type = res->type;}static intip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,		  u8 tos, struct device *dev, int our){	unsigned hash;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -