⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 route.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	   route or unicast forwarding path.	 */	if (rt->rt_type == RTN_UNICAST || rt->key.iif == 0) {		int err = arp_bind_neighbour(&rt->u.dst);		if (err) {			write_unlock_bh(&rt_hash_table[hash].lock);			if (err != -ENOBUFS) {				rt_drop(rt);				return err;			}			/* Neighbour tables are full and nothing			   can be released. Try to shrink route cache,			   it is most likely it holds some neighbour records.			 */			if (attempts-- > 0) {				int saved_elasticity = ip_rt_gc_elasticity;				int saved_int = ip_rt_gc_min_interval;				ip_rt_gc_elasticity = 1;				ip_rt_gc_min_interval = 0;				rt_garbage_collect();				ip_rt_gc_min_interval = saved_int;				ip_rt_gc_elasticity = saved_elasticity;				goto restart;			}			if (net_ratelimit())				printk("Neighbour table overflow.\n");			rt_drop(rt);			return -ENOBUFS;		}	}	rt->u.rt_next = rt_hash_table[hash].chain;#if RT_CACHE_DEBUG >= 2	if (rt->u.rt_next) {		struct rtable * trt;		printk("rt_cache @%02x: %u.%u.%u.%u", hash, NIPQUAD(rt->rt_dst));		for (trt=rt->u.rt_next; trt; trt=trt->u.rt_next)			printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));		printk("\n");	}#endif	rt_hash_table[hash].chain = rt;	write_unlock_bh(&rt_hash_table[hash].lock);	*rp = rt;	return 0;}void rt_bind_peer(struct rtable *rt, int create){	static spinlock_t rt_peer_lock = SPIN_LOCK_UNLOCKED;	struct inet_peer *peer;	peer = inet_getpeer(rt->rt_dst, create);	spin_lock_bh(&rt_peer_lock);	if (rt->peer == NULL) {		rt->peer = peer;		peer = NULL;	}	spin_unlock_bh(&rt_peer_lock);	if (peer)		inet_putpeer(peer);}/* * Peer allocation may fail only in serious out-of-memory conditions.  However * we still can generate some output. * Random ID selection looks a bit dangerous because we have no chances to * select ID being unique in a reasonable period of time. * But broken packet identifier may be better than no packet at all. */static void ip_select_fb_ident(struct iphdr *iph){	static spinlock_t ip_fb_id_lock = SPIN_LOCK_UNLOCKED;	static u32 ip_fallback_id;	u32 salt;	spin_lock_bh(&ip_fb_id_lock);	salt = secure_ip_id(ip_fallback_id ^ iph->daddr);	iph->id = salt & 0xFFFF;	ip_fallback_id = salt;	spin_unlock_bh(&ip_fb_id_lock);}void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst){	struct rtable *rt = (struct rtable *) dst;	if (rt) {		if (rt->peer == NULL)			rt_bind_peer(rt, 1);		/* If peer is attached to destination, it is never detached,		   so that we need not to grab a lock to dereference it.		 */		if (rt->peer) {			iph->id = inet_getid(rt->peer);			return;		}	} else {		printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", NET_CALLER(iph));	}	ip_select_fb_ident(iph);}static void rt_del(unsigned hash, struct rtable *rt){	struct rtable **rthp;	write_lock_bh(&rt_hash_table[hash].lock);	ip_rt_put(rt);	for (rthp = &rt_hash_table[hash].chain; *rthp; rthp = &(*rthp)->u.rt_next) {		if (*rthp == rt) {			*rthp = rt->u.rt_next;			rt_free(rt);			break;		}	}	write_unlock_bh(&rt_hash_table[hash].lock);}void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,		    u32 saddr, u8 tos, struct net_device *dev){	int i, k;	struct in_device *in_dev = in_dev_get(dev);	struct rtable *rth, **rthp;	u32  skeys[2] = { saddr, 0 };	int  ikeys[2] = { dev->ifindex, 0 };	tos &= IPTOS_RT_MASK;	if (!in_dev)		return;	if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)	    || MULTICAST(new_gw) || BADCLASS(new_gw) || ZERONET(new_gw))		goto reject_redirect;	if (!IN_DEV_SHARED_MEDIA(in_dev)) {		if (!inet_addr_onlink(in_dev, new_gw, old_gw))			goto reject_redirect;		if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))			goto reject_redirect;	} else {		if (inet_addr_type(new_gw) != RTN_UNICAST)			goto reject_redirect;	}	for (i=0; i<2; i++) {		for (k=0; k<2; k++) {			unsigned hash = rt_hash_code(daddr, skeys[i]^(ikeys[k]<<5), tos);			rthp=&rt_hash_table[hash].chain;			read_lock(&rt_hash_table[hash].lock);			while ( (rth = *rthp) != NULL) {				struct rtable *rt;				if (rth->key.dst != daddr ||				    rth->key.src != skeys[i] ||				    rth->key.tos != tos ||				    rth->key.oif != ikeys[k] ||				    rth->key.iif != 0) {					rthp = &rth->u.rt_next;					continue;				}				if (rth->rt_dst != daddr ||				    rth->rt_src != saddr ||				    rth->u.dst.error ||				    rth->rt_gateway != old_gw ||				    rth->u.dst.dev != dev)					break;				dst_clone(&rth->u.dst);				read_unlock(&rt_hash_table[hash].lock);				rt = dst_alloc(&ipv4_dst_ops);				if (rt == NULL) {					ip_rt_put(rth);					in_dev_put(in_dev);					return;				}				/*				 * Copy all the information.				 */				*rt = *rth;				rt->u.dst.__use = 1;				atomic_set(&rt->u.dst.__refcnt, 1);				if (rt->u.dst.dev)					dev_hold(rt->u.dst.dev);				rt->u.dst.lastuse = jiffies;				rt->u.dst.neighbour = NULL;				rt->u.dst.hh = NULL;				rt->u.dst.obsolete = 0;				rt->rt_flags |= RTCF_REDIRECTED;				/* Gateway is different ... */				rt->rt_gateway = new_gw;				/* Redirect received -> path was valid */				dst_confirm(&rth->u.dst);				if (rt->peer)					atomic_inc(&rt->peer->refcnt);				if (arp_bind_neighbour(&rt->u.dst) ||				    !(rt->u.dst.neighbour->nud_state&NUD_VALID)) {					if (rt->u.dst.neighbour)						neigh_event_send(rt->u.dst.neighbour, NULL);					ip_rt_put(rth);					rt_drop(rt);					goto do_next;				}				rt_del(hash, rth);				if (!rt_intern_hash(hash, rt, &rt))					ip_rt_put(rt);				goto do_next;			}			read_unlock(&rt_hash_table[hash].lock);		do_next:			;		}	}	in_dev_put(in_dev);	return;reject_redirect:#ifdef CONFIG_IP_ROUTE_VERBOSE	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())		printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about %u.%u.%u.%u ignored.\n"		       "  Advised path = %u.%u.%u.%u -> %u.%u.%u.%u, tos %02x\n",		       NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),		       NIPQUAD(saddr), NIPQUAD(daddr), tos);#endif	in_dev_put(in_dev);}static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst){	struct rtable *rt = (struct rtable*)dst;	if (rt != NULL) {		if (dst->obsolete) {			ip_rt_put(rt);			return NULL;		}		if ((rt->rt_flags&RTCF_REDIRECTED) || rt->u.dst.expires) {			unsigned hash = rt_hash_code(rt->key.dst, rt->key.src^(rt->key.oif<<5), rt->key.tos);#if RT_CACHE_DEBUG >= 1			printk(KERN_DEBUG "ip_rt_advice: redirect to %u.%u.%u.%u/%02x dropped\n",				NIPQUAD(rt->rt_dst), rt->key.tos);#endif			rt_del(hash, rt);			return NULL;		}	}	return dst;}/* * Algorithm: *	1. The first ip_rt_redirect_number redirects are sent *	   with exponential backoff, then we stop sending them at all, *	   assuming that the host ignores our redirects. *	2. If we did not see packets requiring redirects *	   during ip_rt_redirect_silence, we assume that the host *	   forgot redirected route and start to send redirects again. * * This algorithm is much cheaper and more intelligent than dumb load limiting * in icmp.c. * * NOTE. Do not forget to inhibit load limiting for redirects (redundant) * and "frag. need" (breaks PMTU discovery) in icmp.c. */void ip_rt_send_redirect(struct sk_buff *skb){	struct rtable *rt = (struct rtable*)skb->dst;	struct in_device *in_dev = in_dev_get(rt->u.dst.dev);	if (!in_dev)		return;	if (!IN_DEV_TX_REDIRECTS(in_dev))		goto out;	/* No redirected packets during ip_rt_redirect_silence;	 * reset the algorithm.	 */	if (jiffies - rt->u.dst.rate_last > ip_rt_redirect_silence)		rt->u.dst.rate_tokens = 0;	/* Too many ignored redirects; do not send anything	 * set u.dst.rate_last to the last seen redirected packet.	 */	if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {		rt->u.dst.rate_last = jiffies;		goto out;	}	/* Check for load limit; set rate_last to the latest sent	 * redirect.	 */	if (jiffies - rt->u.dst.rate_last > (ip_rt_redirect_load<<rt->u.dst.rate_tokens)) {		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);		rt->u.dst.rate_last = jiffies;		++rt->u.dst.rate_tokens;#ifdef CONFIG_IP_ROUTE_VERBOSE		if (IN_DEV_LOG_MARTIANS(in_dev) &&		    rt->u.dst.rate_tokens == ip_rt_redirect_number && net_ratelimit())			printk(KERN_WARNING "host %u.%u.%u.%u/if%d ignores redirects for "				"%u.%u.%u.%u to %u.%u.%u.%u.\n",				NIPQUAD(rt->rt_src), rt->rt_iif,				NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));#endif	}out:        in_dev_put(in_dev);}static int ip_error(struct sk_buff *skb){	struct rtable *rt = (struct rtable*)skb->dst;	unsigned long now;	int code;	switch (rt->u.dst.error) {	case EINVAL:	default:		kfree_skb(skb);		return 0;	case EHOSTUNREACH:		code = ICMP_HOST_UNREACH;		break;	case ENETUNREACH:		code = ICMP_NET_UNREACH;		break;	case EACCES:		code = ICMP_PKT_FILTERED;		break;	}	now = jiffies;	if ((rt->u.dst.rate_tokens += (now - rt->u.dst.rate_last)) > ip_rt_error_burst)		rt->u.dst.rate_tokens = ip_rt_error_burst;	rt->u.dst.rate_last = now;	if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {		rt->u.dst.rate_tokens -= ip_rt_error_cost;		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);	}	kfree_skb(skb);	return 0;} /* *	The last two values are not from the RFC but *	are needed for AMPRnet AX.25 paths. */static unsigned short mtu_plateau[] ={32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };static __inline__ unsigned short guess_mtu(unsigned short old_mtu){	int i;		for (i = 0; i < sizeof(mtu_plateau)/sizeof(mtu_plateau[0]); i++)		if (old_mtu > mtu_plateau[i])			return mtu_plateau[i];	return 68;}unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu){	int i;	unsigned short old_mtu = ntohs(iph->tot_len);	struct rtable *rth;	u32  skeys[2] = { iph->saddr, 0, };	u32  daddr = iph->daddr;	u8   tos = iph->tos & IPTOS_RT_MASK;	unsigned short est_mtu = 0;	if (ipv4_config.no_pmtu_disc)		return 0;	for (i=0; i<2; i++) {		unsigned hash = rt_hash_code(daddr, skeys[i], tos);		read_lock(&rt_hash_table[hash].lock);		for (rth = rt_hash_table[hash].chain; rth; rth = rth->u.rt_next) {			if (rth->key.dst == daddr &&			    rth->key.src == skeys[i] &&			    rth->rt_dst == daddr &&			    rth->rt_src == iph->saddr &&			    rth->key.tos == tos &&			    rth->key.iif == 0 &&			    !(rth->u.dst.mxlock&(1<<RTAX_MTU))) {				unsigned short mtu = new_mtu;				if (new_mtu < 68 || new_mtu >= old_mtu) {					/* BSD 4.2 compatibility hack :-( */					if (mtu == 0 && old_mtu >= rth->u.dst.pmtu &&					    old_mtu >= 68 + (iph->ihl<<2))						old_mtu -= iph->ihl<<2;					mtu = guess_mtu(old_mtu);				}				if (mtu <= rth->u.dst.pmtu) {					if (mtu < rth->u.dst.pmtu) { 						dst_confirm(&rth->u.dst);						if (mtu < ip_rt_min_pmtu) {							mtu = ip_rt_min_pmtu;							rth->u.dst.mxlock |= (1<<RTAX_MTU);						}						rth->u.dst.pmtu = mtu;						dst_set_expires(&rth->u.dst, ip_rt_mtu_expires);					}					est_mtu = mtu;				}			}		}		read_unlock(&rt_hash_table[hash].lock);	}	return est_mtu ? : new_mtu;}void ip_rt_update_pmtu(struct dst_entry *dst, unsigned mtu){	if (dst->pmtu > mtu && mtu >= 68 &&	    !(dst->mxlock&(1<<RTAX_MTU))) {		if (mtu < ip_rt_min_pmtu) {			mtu = ip_rt_min_pmtu;			dst->mxlock |= (1<<RTAX_MTU);		}		dst->pmtu = mtu;		dst_set_expires(dst, ip_rt_mtu_expires);	}}static struct dst_entry * ipv4_dst_check(struct dst_entry * dst, u32 cookie){	dst_release(dst);	return NULL;}static struct dst_entry * ipv4_dst_reroute(struct dst_entry * dst,					   struct sk_buff *skb){	return NULL;}static void ipv4_dst_destroy(struct dst_entry * dst){	struct rtable *rt = (struct rtable *) dst;	struct inet_peer *peer = rt->peer;	if (peer) {		rt->peer = NULL;		inet_putpeer(peer);	}}static void ipv4_link_failure(struct sk_buff *skb){	struct rtable *rt;	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);	rt = (struct rtable *) skb->dst;	if (rt)		dst_set_expires(&rt->u.dst, 0);}static int ip_rt_bug(struct sk_buff *skb){	printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",		NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr),		skb->dev ? skb->dev->name : "?");	kfree_skb(skb);	return 0;}/*   We do not cache source address of outgoing interface,   because it is used only by IP RR, TS and SRR options,   so that it out of fast path.   BTW remember: "addr" is allowed to be not aligned   in IP options! */void ip_rt_get_source(u8 *addr, struct rtable *rt){	u32 src;	struct fib_result res;	if (rt->key.iif == 0)		src = rt->rt_src;	else if (fib_lookup(&rt->key, &res) == 0) {#ifdef CONFIG_IP_ROUTE_NAT		if (res.type == RTN_NAT)			src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway, RT_SCOPE_UNIVERSE);		else#endif			src = FIB_RES_PREFSRC(res);		fib_res_put(&res);	} else		src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway, RT_SCOPE_UNIVERSE);	memcpy(addr, &src, 4);}#ifdef CONFIG_NET_CLS_ROUTEstatic void set_class_tag(struct rtable *rt, u32 tag){	if (!(rt->u.dst.tclassid&0xFFFF))		rt->u.dst.tclassid |= tag&0xFFFF;	if (!(rt->u.dst.tclassid&0xFFFF0000))		rt->u.dst.tclassid |= tag&0xFFFF0000;}#endifstatic void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag){	struct fib_info *fi = res->fi;	if (fi) {		if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)			rt->rt_gateway = FIB_RES_GW(*res);		memcpy(&rt->u.dst.mxlock, fi->fib_metrics, sizeof(fi->fib_metrics));		if (fi->fib_mtu == 0) {			rt->u.dst.pmtu = rt->u.dst.dev->mtu;			if (rt->u.dst.mxlock&(1<<RTAX_MTU) &&			    rt->rt_gateway != rt->rt_dst &&			    rt->u.dst.pmtu > 576)				rt->u.dst.pmtu = 576;		}#ifdef CONFIG_NET_CLS_ROUTE		rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;#endif	} else {		rt->u.dst.pmtu	= rt->u.dst.dev->mtu;	}	if (rt->u.dst.pmtu > IP_MAX_MTU)		rt->u.dst.pmtu = IP_MAX_MTU;	if (rt->u.dst.advmss == 0)		rt->u.dst.advmss = max(rt->u.dst.dev->mtu-40, ip_rt_min_advmss);	if (rt->u.dst.advmss > 65535-40)		rt->u.dst.advmss = 65535-40;#ifdef CONFIG_NET_CLS_ROUTE#ifdef CONFIG_IP_MULTIPLE_TABLES	set_class_tag(rt, fib_rules_tclass(res));#endif	set_class_tag(rt, itag);#endif        rt->rt_type = res->type;}static intip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,		  u8 tos, struct net_device *dev, int our){	unsigned hash;	struct rtable *rth;	u32 spec_dst;	struct in_device *in_dev = in_dev_get(dev);	u32 itag = 0;	/* Primary sanity checks. */	if (in_dev == NULL)		return -EINVAL;	if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr) ||	    skb->protocol != __constant_htons(ETH_P_IP))		goto e_inval;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -