⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fib_semantics.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 2 页
字号:
	fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);	if (fi == NULL)		goto failure;	fib_info_cnt++;	memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct fib_nh));	fi->fib_protocol = r->rtm_protocol;	fi->fib_nhs = nhs;	change_nexthops(fi) {		nh->nh_parent = fi;	} endfor_nexthops(fi)	fi->fib_flags = r->rtm_flags;	if (rta->rta_priority)		fi->fib_priority = *rta->rta_priority;	if (rta->rta_mx) {		int attrlen = RTA_PAYLOAD(rta->rta_mx);		struct rtattr *attr = RTA_DATA(rta->rta_mx);		while (RTA_OK(attr, attrlen)) {			unsigned flavor = attr->rta_type;			if (flavor) {				if (flavor > RTAX_MAX)					goto err_inval;				fi->fib_metrics[flavor-1] = *(unsigned*)RTA_DATA(attr);			}			attr = RTA_NEXT(attr, attrlen);		}	}	if (rta->rta_prefsrc)		memcpy(&fi->fib_prefsrc, rta->rta_prefsrc, 4);	if (rta->rta_mp) {#ifdef CONFIG_IP_ROUTE_MULTIPATH		if ((err = fib_get_nhs(fi, rta->rta_mp, r)) != 0)			goto failure;		if (rta->rta_oif && fi->fib_nh->nh_oif != *rta->rta_oif)			goto err_inval;		if (rta->rta_gw && memcmp(&fi->fib_nh->nh_gw, rta->rta_gw, 4))			goto err_inval;#ifdef CONFIG_NET_CLS_ROUTE		if (rta->rta_flow && memcmp(&fi->fib_nh->nh_tclassid, rta->rta_flow, 4))			goto err_inval;#endif#else		goto err_inval;#endif	} else {		struct fib_nh *nh = fi->fib_nh;		if (rta->rta_oif)			nh->nh_oif = *rta->rta_oif;		if (rta->rta_gw)			memcpy(&nh->nh_gw, rta->rta_gw, 4);#ifdef CONFIG_NET_CLS_ROUTE		if (rta->rta_flow)			memcpy(&nh->nh_tclassid, rta->rta_flow, 4);#endif		nh->nh_flags = r->rtm_flags;#ifdef CONFIG_IP_ROUTE_MULTIPATH		nh->nh_weight = 1;#endif	}	if (fib_props[r->rtm_type].error) {		if (rta->rta_gw || rta->rta_oif || rta->rta_mp)			goto err_inval;		goto link_it;	}	if (r->rtm_scope > RT_SCOPE_HOST)		goto err_inval;	if (r->rtm_scope == RT_SCOPE_HOST) {		struct fib_nh *nh = fi->fib_nh;		/* Local address is added. */		if (nhs != 1 || nh->nh_gw)			goto err_inval;		nh->nh_scope = RT_SCOPE_NOWHERE;		nh->nh_dev = dev_get_by_index(fi->fib_nh->nh_oif);		err = -ENODEV;		if (nh->nh_dev == NULL)			goto failure;	} else {		change_nexthops(fi) {			if ((err = fib_check_nh(r, fi, nh)) != 0)				goto failure;		} endfor_nexthops(fi)	}	if (fi->fib_prefsrc) {		if (r->rtm_type != RTN_LOCAL || rta->rta_dst == NULL ||		    memcmp(&fi->fib_prefsrc, rta->rta_dst, 4))			if (inet_addr_type(fi->fib_prefsrc) != RTN_LOCAL)				goto err_inval;	}link_it:	if ((ofi = fib_find_info(fi)) != NULL) {		fi->fib_dead = 1;		free_fib_info(fi);		ofi->fib_treeref++;		return ofi;	}	fi->fib_treeref++;	atomic_inc(&fi->fib_clntref);	write_lock(&fib_info_lock);	hlist_add_head(&fi->fib_hash,		       &fib_info_hash[fib_info_hashfn(fi)]);	if (fi->fib_prefsrc) {		struct hlist_head *head;		head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];		hlist_add_head(&fi->fib_lhash, head);	}	change_nexthops(fi) {		struct hlist_head *head;		unsigned int hash;		if (!nh->nh_dev)			continue;		hash = fib_devindex_hashfn(nh->nh_dev->ifindex);		head = &fib_info_devhash[hash];		hlist_add_head(&nh->nh_hash, head);	} endfor_nexthops(fi)	write_unlock(&fib_info_lock);	return fi;err_inval:	err = -EINVAL;failure:        *errp = err;        if (fi) {		fi->fib_dead = 1;		free_fib_info(fi);	}	return NULL;}int fib_semantic_match(struct list_head *head, const struct flowi *flp,		       struct fib_result *res, int prefixlen){	struct fib_alias *fa;	int nh_sel = 0;	list_for_each_entry(fa, head, fa_list) {		int err;		if (fa->fa_tos &&		    fa->fa_tos != flp->fl4_tos)			continue;		if (fa->fa_scope < flp->fl4_scope)			continue;		fa->fa_state |= FA_S_ACCESSED;		err = fib_props[fa->fa_type].error;		if (err == 0) {			struct fib_info *fi = fa->fa_info;			if (fi->fib_flags & RTNH_F_DEAD)				continue;			switch (fa->fa_type) {			case RTN_UNICAST:			case RTN_LOCAL:			case RTN_BROADCAST:			case RTN_ANYCAST:			case RTN_MULTICAST:				for_nexthops(fi) {					if (nh->nh_flags&RTNH_F_DEAD)						continue;					if (!flp->oif || flp->oif == nh->nh_oif)						break;				}#ifdef CONFIG_IP_ROUTE_MULTIPATH				if (nhsel < fi->fib_nhs) {					nh_sel = nhsel;					goto out_fill_res;				}#else				if (nhsel < 1) {					goto out_fill_res;				}#endif				endfor_nexthops(fi);				continue;			default:				printk(KERN_DEBUG "impossible 102\n");				return -EINVAL;			};		}		return err;	}	return 1;out_fill_res:	res->prefixlen = prefixlen;	res->nh_sel = nh_sel;	res->type = fa->fa_type;	res->scope = fa->fa_scope;	res->fi = fa->fa_info;	atomic_inc(&res->fi->fib_clntref);	return 0;}/* Find appropriate source address to this destination */u32 __fib_res_prefsrc(struct fib_result *res){	return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope);}intfib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,	      u8 tb_id, u8 type, u8 scope, void *dst, int dst_len, u8 tos,	      struct fib_info *fi){	struct rtmsg *rtm;	struct nlmsghdr  *nlh;	unsigned char	 *b = skb->tail;	nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*rtm));	rtm = NLMSG_DATA(nlh);	rtm->rtm_family = AF_INET;	rtm->rtm_dst_len = dst_len;	rtm->rtm_src_len = 0;	rtm->rtm_tos = tos;	rtm->rtm_table = tb_id;	rtm->rtm_type = type;	rtm->rtm_flags = fi->fib_flags;	rtm->rtm_scope = scope;	if (rtm->rtm_dst_len)		RTA_PUT(skb, RTA_DST, 4, dst);	rtm->rtm_protocol = fi->fib_protocol;	if (fi->fib_priority)		RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);#ifdef CONFIG_NET_CLS_ROUTE	if (fi->fib_nh[0].nh_tclassid)		RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);#endif	if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)		goto rtattr_failure;	if (fi->fib_prefsrc)		RTA_PUT(skb, RTA_PREFSRC, 4, &fi->fib_prefsrc);	if (fi->fib_nhs == 1) {		if (fi->fib_nh->nh_gw)			RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw);		if (fi->fib_nh->nh_oif)			RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);	}#ifdef CONFIG_IP_ROUTE_MULTIPATH	if (fi->fib_nhs > 1) {		struct rtnexthop *nhp;		struct rtattr *mp_head;		if (skb_tailroom(skb) <= RTA_SPACE(0))			goto rtattr_failure;		mp_head = (struct rtattr*)skb_put(skb, RTA_SPACE(0));		for_nexthops(fi) {			if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))				goto rtattr_failure;			nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));			nhp->rtnh_flags = nh->nh_flags & 0xFF;			nhp->rtnh_hops = nh->nh_weight-1;			nhp->rtnh_ifindex = nh->nh_oif;			if (nh->nh_gw)				RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw);			nhp->rtnh_len = skb->tail - (unsigned char*)nhp;		} endfor_nexthops(fi);		mp_head->rta_type = RTA_MULTIPATH;		mp_head->rta_len = skb->tail - (u8*)mp_head;	}#endif	nlh->nlmsg_len = skb->tail - b;	return skb->len;nlmsg_failure:rtattr_failure:	skb_trim(skb, b - skb->data);	return -1;}#ifndef CONFIG_IP_NOSIOCRTintfib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,		    struct kern_rta *rta, struct rtentry *r){	int    plen;	u32    *ptr;	memset(rtm, 0, sizeof(*rtm));	memset(rta, 0, sizeof(*rta));	if (r->rt_dst.sa_family != AF_INET)		return -EAFNOSUPPORT;	/* Check mask for validity:	   a) it must be contiguous.	   b) destination must have all host bits clear.	   c) if application forgot to set correct family (AF_INET),	      reject request unless it is absolutely clear i.e.	      both family and mask are zero.	 */	plen = 32;	ptr = &((struct sockaddr_in*)&r->rt_dst)->sin_addr.s_addr;	if (!(r->rt_flags&RTF_HOST)) {		u32 mask = ((struct sockaddr_in*)&r->rt_genmask)->sin_addr.s_addr;		if (r->rt_genmask.sa_family != AF_INET) {			if (mask || r->rt_genmask.sa_family)				return -EAFNOSUPPORT;		}		if (bad_mask(mask, *ptr))			return -EINVAL;		plen = inet_mask_len(mask);	}	nl->nlmsg_flags = NLM_F_REQUEST;	nl->nlmsg_pid = 0;	nl->nlmsg_seq = 0;	nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm));	if (cmd == SIOCDELRT) {		nl->nlmsg_type = RTM_DELROUTE;		nl->nlmsg_flags = 0;	} else {		nl->nlmsg_type = RTM_NEWROUTE;		nl->nlmsg_flags = NLM_F_REQUEST|NLM_F_CREATE;		rtm->rtm_protocol = RTPROT_BOOT;	}	rtm->rtm_dst_len = plen;	rta->rta_dst = ptr;	if (r->rt_metric) {		*(u32*)&r->rt_pad3 = r->rt_metric - 1;		rta->rta_priority = (u32*)&r->rt_pad3;	}	if (r->rt_flags&RTF_REJECT) {		rtm->rtm_scope = RT_SCOPE_HOST;		rtm->rtm_type = RTN_UNREACHABLE;		return 0;	}	rtm->rtm_scope = RT_SCOPE_NOWHERE;	rtm->rtm_type = RTN_UNICAST;	if (r->rt_dev) {		char *colon;		struct net_device *dev;		char   devname[IFNAMSIZ];		if (copy_from_user(devname, r->rt_dev, IFNAMSIZ-1))			return -EFAULT;		devname[IFNAMSIZ-1] = 0;		colon = strchr(devname, ':');		if (colon)			*colon = 0;		dev = __dev_get_by_name(devname);		if (!dev)			return -ENODEV;		rta->rta_oif = &dev->ifindex;		if (colon) {			struct in_ifaddr *ifa;			struct in_device *in_dev = __in_dev_get(dev);			if (!in_dev)				return -ENODEV;			*colon = ':';			for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)				if (strcmp(ifa->ifa_label, devname) == 0)					break;			if (ifa == NULL)				return -ENODEV;			rta->rta_prefsrc = &ifa->ifa_local;		}	}	ptr = &((struct sockaddr_in*)&r->rt_gateway)->sin_addr.s_addr;	if (r->rt_gateway.sa_family == AF_INET && *ptr) {		rta->rta_gw = ptr;		if (r->rt_flags&RTF_GATEWAY && inet_addr_type(*ptr) == RTN_UNICAST)			rtm->rtm_scope = RT_SCOPE_UNIVERSE;	}	if (cmd == SIOCDELRT)		return 0;	if (r->rt_flags&RTF_GATEWAY && rta->rta_gw == NULL)		return -EINVAL;	if (rtm->rtm_scope == RT_SCOPE_NOWHERE)		rtm->rtm_scope = RT_SCOPE_LINK;	if (r->rt_flags&(RTF_MTU|RTF_WINDOW|RTF_IRTT)) {		struct rtattr *rec;		struct rtattr *mx = kmalloc(RTA_LENGTH(3*RTA_LENGTH(4)), GFP_KERNEL);		if (mx == NULL)			return -ENOMEM;		rta->rta_mx = mx;		mx->rta_type = RTA_METRICS;		mx->rta_len  = RTA_LENGTH(0);		if (r->rt_flags&RTF_MTU) {			rec = (void*)((char*)mx + RTA_ALIGN(mx->rta_len));			rec->rta_type = RTAX_ADVMSS;			rec->rta_len = RTA_LENGTH(4);			mx->rta_len += RTA_LENGTH(4);			*(u32*)RTA_DATA(rec) = r->rt_mtu - 40;		}		if (r->rt_flags&RTF_WINDOW) {			rec = (void*)((char*)mx + RTA_ALIGN(mx->rta_len));			rec->rta_type = RTAX_WINDOW;			rec->rta_len = RTA_LENGTH(4);			mx->rta_len += RTA_LENGTH(4);			*(u32*)RTA_DATA(rec) = r->rt_window;		}		if (r->rt_flags&RTF_IRTT) {			rec = (void*)((char*)mx + RTA_ALIGN(mx->rta_len));			rec->rta_type = RTAX_RTT;			rec->rta_len = RTA_LENGTH(4);			mx->rta_len += RTA_LENGTH(4);			*(u32*)RTA_DATA(rec) = r->rt_irtt<<3;		}	}	return 0;}#endif/*   Update FIB if:   - local address disappeared -> we must delete all the entries     referring to it.   - device went down -> we must shutdown all nexthops going via it. */int fib_sync_down(u32 local, struct net_device *dev, int force){	int ret = 0;	int scope = RT_SCOPE_NOWHERE;		if (force)		scope = -1;	if (local && fib_info_laddrhash) {		unsigned int hash = fib_laddr_hashfn(local);		struct hlist_head *head = &fib_info_laddrhash[hash];		struct hlist_node *node;		struct fib_info *fi;		hlist_for_each_entry(fi, node, head, fib_lhash) {			if (fi->fib_prefsrc == local) {				fi->fib_flags |= RTNH_F_DEAD;				ret++;			}		}	}	if (dev) {		struct fib_info *prev_fi = NULL;		unsigned int hash = fib_devindex_hashfn(dev->ifindex);		struct hlist_head *head = &fib_info_devhash[hash];		struct hlist_node *node;		struct fib_nh *nh;		hlist_for_each_entry(nh, node, head, nh_hash) {			struct fib_info *fi = nh->nh_parent;			int dead;			BUG_ON(!fi->fib_nhs);			if (nh->nh_dev != dev || fi == prev_fi)				continue;			prev_fi = fi;			dead = 0;			change_nexthops(fi) {				if (nh->nh_flags&RTNH_F_DEAD)					dead++;				else if (nh->nh_dev == dev &&					 nh->nh_scope != scope) {					nh->nh_flags |= RTNH_F_DEAD;#ifdef CONFIG_IP_ROUTE_MULTIPATH					spin_lock_bh(&fib_multipath_lock);					fi->fib_power -= nh->nh_power;					nh->nh_power = 0;					spin_unlock_bh(&fib_multipath_lock);#endif					dead++;				}#ifdef CONFIG_IP_ROUTE_MULTIPATH				if (force > 1 && nh->nh_dev == dev) {					dead = fi->fib_nhs;					break;				}#endif			} endfor_nexthops(fi)			if (dead == fi->fib_nhs) {				fi->fib_flags |= RTNH_F_DEAD;				ret++;			}		}	}	return ret;}#ifdef CONFIG_IP_ROUTE_MULTIPATH/*   Dead device goes up. We wake up dead nexthops.   It takes sense only on multipath routes. */int fib_sync_up(struct net_device *dev){	struct fib_info *prev_fi;	unsigned int hash;	struct hlist_head *head;	struct hlist_node *node;	struct fib_nh *nh;	int ret;	if (!(dev->flags&IFF_UP))		return 0;	prev_fi = NULL;	hash = fib_devindex_hashfn(dev->ifindex);	head = &fib_info_devhash[hash];	ret = 0;	hlist_for_each_entry(nh, node, head, nh_hash) {		struct fib_info *fi = nh->nh_parent;		int alive;		BUG_ON(!fi->fib_nhs);		if (nh->nh_dev != dev || fi == prev_fi)			continue;		prev_fi = fi;		alive = 0;		change_nexthops(fi) {			if (!(nh->nh_flags&RTNH_F_DEAD)) {				alive++;				continue;			}			if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))				continue;			if (nh->nh_dev != dev || __in_dev_get(dev) == NULL)				continue;			alive++;			spin_lock_bh(&fib_multipath_lock);			nh->nh_power = 0;			nh->nh_flags &= ~RTNH_F_DEAD;			spin_unlock_bh(&fib_multipath_lock);		} endfor_nexthops(fi)		if (alive > 0) {			fi->fib_flags &= ~RTNH_F_DEAD;			ret++;		}	}	return ret;}/*   The algorithm is suboptimal, but it provides really   fair weighted route distribution. */void fib_select_multipath(const struct flowi *flp, struct fib_result *res){	struct fib_info *fi = res->fi;	int w;	spin_lock_bh(&fib_multipath_lock);	if (fi->fib_power <= 0) {		int power = 0;		change_nexthops(fi) {			if (!(nh->nh_flags&RTNH_F_DEAD)) {				power += nh->nh_weight;				nh->nh_power = nh->nh_weight;			}		} endfor_nexthops(fi);		fi->fib_power = power;		if (power <= 0) {			spin_unlock_bh(&fib_multipath_lock);			/* Race condition: route has just become dead. */			res->nh_sel = 0;			return;		}	}	/* w should be random number [0..fi->fib_power-1],	   it is pretty bad approximation.	 */	w = jiffies % fi->fib_power;	change_nexthops(fi) {		if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) {			if ((w -= nh->nh_power) <= 0) {				nh->nh_power--;				fi->fib_power--;				res->nh_sel = nhsel;				spin_unlock_bh(&fib_multipath_lock);				return;			}		}	} endfor_nexthops(fi);	/* Race condition: route has just become dead. */	res->nh_sel = 0;	spin_unlock_bh(&fib_multipath_lock);}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -