📄 fib_semantics.c
字号:
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IPv4 Forwarding Information Base: semantics. * * Version: $Id: fib_semantics.c,v 1.19 2002/01/12 07:54:56 davem Exp $ * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#include <linux/config.h>#include <asm/uaccess.h>#include <asm/system.h>#include <asm/bitops.h>#include <linux/types.h>#include <linux/kernel.h>#include <linux/jiffies.h>#include <linux/mm.h>#include <linux/string.h>#include <linux/socket.h>#include <linux/sockios.h>#include <linux/errno.h>#include <linux/in.h>#include <linux/inet.h>#include <linux/netdevice.h>#include <linux/if_arp.h>#include <linux/proc_fs.h>#include <linux/skbuff.h>#include <linux/netlink.h>#include <linux/init.h>#include <net/ip.h>#include <net/protocol.h>#include <net/route.h>#include <net/tcp.h>#include <net/sock.h>#include <net/ip_fib.h>#include "fib_lookup.h"#define FSprintk(a...)static rwlock_t fib_info_lock = RW_LOCK_UNLOCKED;static struct hlist_head *fib_info_hash;static struct hlist_head *fib_info_laddrhash;static unsigned int fib_hash_size;static unsigned int fib_info_cnt;#define DEVINDEX_HASHBITS 8#define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];#ifdef CONFIG_IP_ROUTE_MULTIPATHstatic spinlock_t fib_multipath_lock = SPIN_LOCK_UNLOCKED;#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)#define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \for (nhsel=0, nh = (struct fib_nh*)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++)#else /* CONFIG_IP_ROUTE_MULTIPATH *//* Hope, that gcc will optimize it to get rid of dummy loop */#define for_nexthops(fi) { int nhsel=0; const struct fib_nh * nh = (fi)->fib_nh; \for (nhsel=0; nhsel < 1; nhsel++)#define change_nexthops(fi) { int nhsel=0; struct fib_nh * nh = (struct fib_nh*)((fi)->fib_nh); \for (nhsel=0; nhsel < 1; nhsel++)#endif /* CONFIG_IP_ROUTE_MULTIPATH */#define endfor_nexthops(fi) }static struct { int error; u8 scope;} fib_props[RTA_MAX + 1] = { { .error = 0, .scope = RT_SCOPE_NOWHERE, }, /* RTN_UNSPEC */ { .error = 0, .scope = RT_SCOPE_UNIVERSE, }, /* RTN_UNICAST */ { .error = 0, .scope = RT_SCOPE_HOST, }, /* RTN_LOCAL */ { .error = 0, .scope = RT_SCOPE_LINK, }, /* RTN_BROADCAST */ { .error = 0, .scope = RT_SCOPE_LINK, }, /* RTN_ANYCAST */ { .error = 0, .scope = RT_SCOPE_UNIVERSE, }, /* RTN_MULTICAST */ { .error = -EINVAL, .scope = RT_SCOPE_UNIVERSE, }, /* RTN_BLACKHOLE */ { .error = -EHOSTUNREACH, .scope = RT_SCOPE_UNIVERSE, }, /* RTN_UNREACHABLE */ { .error = -EACCES, .scope = RT_SCOPE_UNIVERSE, }, /* RTN_PROHIBIT */ { .error = -EAGAIN, .scope = RT_SCOPE_UNIVERSE, }, /* RTN_THROW */ { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE, }, /* RTN_NAT */ { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE, }, /* RTN_XRESOLVE */};/* Release a nexthop info record */void free_fib_info(struct fib_info *fi){ if (fi->fib_dead == 0) { printk("Freeing alive fib_info %p\n", fi); return; } change_nexthops(fi) { if (nh->nh_dev) dev_put(nh->nh_dev); nh->nh_dev = NULL; } endfor_nexthops(fi); fib_info_cnt--; kfree(fi);}void fib_release_info(struct fib_info *fi){ write_lock(&fib_info_lock); if (fi && --fi->fib_treeref == 0) { hlist_del(&fi->fib_hash); if (fi->fib_prefsrc) hlist_del(&fi->fib_lhash); change_nexthops(fi) { hlist_del(&nh->nh_hash); } endfor_nexthops(fi) fi->fib_dead = 1; fib_info_put(fi); } write_unlock(&fib_info_lock);}static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi){ const struct fib_nh *onh = ofi->fib_nh; for_nexthops(fi) { if (nh->nh_oif != onh->nh_oif || nh->nh_gw != onh->nh_gw || nh->nh_scope != onh->nh_scope ||#ifdef CONFIG_IP_ROUTE_MULTIPATH nh->nh_weight != onh->nh_weight ||#endif#ifdef CONFIG_NET_CLS_ROUTE nh->nh_tclassid != onh->nh_tclassid ||#endif ((nh->nh_flags^onh->nh_flags)&~RTNH_F_DEAD)) return -1; onh++; } endfor_nexthops(fi); return 0;}static inline unsigned int fib_info_hashfn(const struct fib_info *fi){ unsigned int mask = (fib_hash_size - 1); unsigned int val = fi->fib_nhs; val ^= fi->fib_protocol; val ^= fi->fib_prefsrc; val ^= fi->fib_priority; return (val ^ (val >> 7) ^ (val >> 12)) & mask;}static struct fib_info *fib_find_info(const struct fib_info *nfi){ struct hlist_head *head; struct hlist_node *node; struct fib_info *fi; unsigned int hash; hash = fib_info_hashfn(nfi); head = &fib_info_hash[hash]; hlist_for_each_entry(fi, node, head, fib_hash) { if (fi->fib_nhs != nfi->fib_nhs) continue; if (nfi->fib_protocol == fi->fib_protocol && nfi->fib_prefsrc == fi->fib_prefsrc && nfi->fib_priority == fi->fib_priority && memcmp(nfi->fib_metrics, fi->fib_metrics, sizeof(fi->fib_metrics)) == 0 && ((nfi->fib_flags^fi->fib_flags)&~RTNH_F_DEAD) == 0 && (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) return fi; } return NULL;}static inline unsigned int fib_devindex_hashfn(unsigned int val){ unsigned int mask = DEVINDEX_HASHSIZE - 1; return (val ^ (val >> DEVINDEX_HASHBITS) ^ (val >> (DEVINDEX_HASHBITS * 2))) & mask;}/* Check, that the gateway is already configured. Used only by redirect accept routine. */int ip_fib_check_default(u32 gw, struct net_device *dev){ struct hlist_head *head; struct hlist_node *node; struct fib_nh *nh; unsigned int hash; read_lock(&fib_info_lock); hash = fib_devindex_hashfn(dev->ifindex); head = &fib_info_devhash[hash]; hlist_for_each_entry(nh, node, head, nh_hash) { if (nh->nh_dev == dev && nh->nh_gw == gw && !(nh->nh_flags&RTNH_F_DEAD)) { read_unlock(&fib_info_lock); return 0; } } read_unlock(&fib_info_lock); return -1;}#ifdef CONFIG_IP_ROUTE_MULTIPATHstatic u32 fib_get_attr32(struct rtattr *attr, int attrlen, int type){ while (RTA_OK(attr,attrlen)) { if (attr->rta_type == type) return *(u32*)RTA_DATA(attr); attr = RTA_NEXT(attr, attrlen); } return 0;}static intfib_count_nexthops(struct rtattr *rta){ int nhs = 0; struct rtnexthop *nhp = RTA_DATA(rta); int nhlen = RTA_PAYLOAD(rta); while (nhlen >= (int)sizeof(struct rtnexthop)) { if ((nhlen -= nhp->rtnh_len) < 0) return 0; nhs++; nhp = RTNH_NEXT(nhp); }; return nhs;}static intfib_get_nhs(struct fib_info *fi, const struct rtattr *rta, const struct rtmsg *r){ struct rtnexthop *nhp = RTA_DATA(rta); int nhlen = RTA_PAYLOAD(rta); change_nexthops(fi) { int attrlen = nhlen - sizeof(struct rtnexthop); if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) return -EINVAL; nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags; nh->nh_oif = nhp->rtnh_ifindex; nh->nh_weight = nhp->rtnh_hops + 1; if (attrlen) { nh->nh_gw = fib_get_attr32(RTNH_DATA(nhp), attrlen, RTA_GATEWAY);#ifdef CONFIG_NET_CLS_ROUTE nh->nh_tclassid = fib_get_attr32(RTNH_DATA(nhp), attrlen, RTA_FLOW);#endif } nhp = RTNH_NEXT(nhp); } endfor_nexthops(fi); return 0;}#endifint fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct kern_rta *rta, struct fib_info *fi){#ifdef CONFIG_IP_ROUTE_MULTIPATH struct rtnexthop *nhp; int nhlen;#endif if (rta->rta_priority && *rta->rta_priority != fi->fib_priority) return 1; if (rta->rta_oif || rta->rta_gw) { if ((!rta->rta_oif || *rta->rta_oif == fi->fib_nh->nh_oif) && (!rta->rta_gw || memcmp(rta->rta_gw, &fi->fib_nh->nh_gw, 4) == 0)) return 0; return 1; }#ifdef CONFIG_IP_ROUTE_MULTIPATH if (rta->rta_mp == NULL) return 0; nhp = RTA_DATA(rta->rta_mp); nhlen = RTA_PAYLOAD(rta->rta_mp); for_nexthops(fi) { int attrlen = nhlen - sizeof(struct rtnexthop); u32 gw; if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) return -EINVAL; if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif) return 1; if (attrlen) { gw = fib_get_attr32(RTNH_DATA(nhp), attrlen, RTA_GATEWAY); if (gw && gw != nh->nh_gw) return 1;#ifdef CONFIG_NET_CLS_ROUTE gw = fib_get_attr32(RTNH_DATA(nhp), attrlen, RTA_FLOW); if (gw && gw != nh->nh_tclassid) return 1;#endif } nhp = RTNH_NEXT(nhp); } endfor_nexthops(fi);#endif return 0;}/* Picture ------- Semantics of nexthop is very messy by historical reasons. We have to take into account, that: a) gateway can be actually local interface address, so that gatewayed route is direct. b) gateway must be on-link address, possibly described not by an ifaddr, but also by a direct route. c) If both gateway and interface are specified, they should not contradict. d) If we use tunnel routes, gateway could be not on-link. Attempt to reconcile all of these (alas, self-contradictory) conditions results in pretty ugly and hairy code with obscure logic. I chose to generalized it instead, so that the size of code does not increase practically, but it becomes much more general. Every prefix is assigned a "scope" value: "host" is local address, "link" is direct route, [ ... "site" ... "interior" ... ] and "universe" is true gateway route with global meaning. Every prefix refers to a set of "nexthop"s (gw, oif), where gw must have narrower scope. This recursion stops when gw has LOCAL scope or if "nexthop" is declared ONLINK, which means that gw is forced to be on link. Code is still hairy, but now it is apparently logically consistent and very flexible. F.e. as by-product it allows to co-exists in peace independent exterior and interior routing processes. Normally it looks as following. {universe prefix} -> (gw, oif) [scope link] | |-> {link prefix} -> (gw, oif) [scope local] | |-> {local prefix} (terminal node) */static int fib_check_nh(const struct rtmsg *r, struct fib_info *fi, struct fib_nh *nh){ int err; if (nh->nh_gw) { struct fib_result res;#ifdef CONFIG_IP_ROUTE_PERVASIVE if (nh->nh_flags&RTNH_F_PERVASIVE) return 0;#endif if (nh->nh_flags&RTNH_F_ONLINK) { struct net_device *dev; if (r->rtm_scope >= RT_SCOPE_LINK) return -EINVAL; if (inet_addr_type(nh->nh_gw) != RTN_UNICAST) return -EINVAL; if ((dev = __dev_get_by_index(nh->nh_oif)) == NULL) return -ENODEV; if (!(dev->flags&IFF_UP)) return -ENETDOWN; nh->nh_dev = dev; dev_hold(dev); nh->nh_scope = RT_SCOPE_LINK; return 0; } { struct flowi fl = { .nl_u = { .ip4_u = { .daddr = nh->nh_gw, .scope = r->rtm_scope + 1 } }, .oif = nh->nh_oif }; /* It is not necessary, but requires a bit of thinking */ if (fl.fl4_scope < RT_SCOPE_LINK) fl.fl4_scope = RT_SCOPE_LINK; if ((err = fib_lookup(&fl, &res)) != 0) return err; } err = -EINVAL; if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) goto out; nh->nh_scope = res.scope; nh->nh_oif = FIB_RES_OIF(res); if ((nh->nh_dev = FIB_RES_DEV(res)) == NULL) goto out; dev_hold(nh->nh_dev); err = -ENETDOWN; if (!(nh->nh_dev->flags & IFF_UP)) goto out; err = 0;out: fib_res_put(&res); return err; } else { struct in_device *in_dev; if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK)) return -EINVAL; in_dev = inetdev_by_index(nh->nh_oif); if (in_dev == NULL) return -ENODEV; if (!(in_dev->dev->flags&IFF_UP)) { in_dev_put(in_dev); return -ENETDOWN; } nh->nh_dev = in_dev->dev; dev_hold(nh->nh_dev); nh->nh_scope = RT_SCOPE_HOST; in_dev_put(in_dev); } return 0;}static inline unsigned int fib_laddr_hashfn(u32 val){ unsigned int mask = (fib_hash_size - 1); return (val ^ (val >> 7) ^ (val >> 14)) & mask;}static struct hlist_head *fib_hash_alloc(int bytes){ if (bytes <= PAGE_SIZE) return kmalloc(bytes, GFP_KERNEL); else return (struct hlist_head *) __get_free_pages(GFP_KERNEL, get_order(bytes));}static void fib_hash_free(struct hlist_head *hash, int bytes){ if (!hash) return; if (bytes <= PAGE_SIZE) kfree(hash); else free_pages((unsigned long) hash, get_order(bytes));}static void fib_hash_move(struct hlist_head *new_info_hash, struct hlist_head *new_laddrhash, unsigned int new_size){ unsigned int old_size = fib_hash_size; unsigned int i; write_lock(&fib_info_lock); fib_hash_size = new_size; for (i = 0; i < old_size; i++) { struct hlist_head *head = &fib_info_hash[i]; struct hlist_node *node, *n; struct fib_info *fi; hlist_for_each_entry_safe(fi, node, n, head, fib_hash) { struct hlist_head *dest; unsigned int new_hash; hlist_del(&fi->fib_hash); new_hash = fib_info_hashfn(fi); dest = &new_info_hash[new_hash]; hlist_add_head(&fi->fib_hash, dest); } } fib_info_hash = new_info_hash; for (i = 0; i < old_size; i++) { struct hlist_head *lhead = &fib_info_laddrhash[i]; struct hlist_node *node, *n; struct fib_info *fi; hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) { struct hlist_head *ldest; unsigned int new_hash; hlist_del(&fi->fib_lhash); new_hash = fib_laddr_hashfn(fi->fib_prefsrc); ldest = &new_laddrhash[new_hash]; hlist_add_head(&fi->fib_lhash, ldest); } } fib_info_laddrhash = new_laddrhash; write_unlock(&fib_info_lock);}struct fib_info *fib_create_info(const struct rtmsg *r, struct kern_rta *rta, const struct nlmsghdr *nlh, int *errp){ int err; struct fib_info *fi = NULL; struct fib_info *ofi;#ifdef CONFIG_IP_ROUTE_MULTIPATH int nhs = 1;#else const int nhs = 1;#endif /* Fast check to catch the most weird cases */ if (fib_props[r->rtm_type].scope > r->rtm_scope) goto err_inval;#ifdef CONFIG_IP_ROUTE_MULTIPATH if (rta->rta_mp) { nhs = fib_count_nexthops(rta->rta_mp); if (nhs == 0) goto err_inval; }#endif err = -ENOBUFS; if (fib_info_cnt >= fib_hash_size) { unsigned int new_size = fib_hash_size << 1; struct hlist_head *new_info_hash; struct hlist_head *new_laddrhash; unsigned int bytes; if (!new_size) new_size = 1; bytes = new_size * sizeof(struct hlist_head *); new_info_hash = fib_hash_alloc(bytes); new_laddrhash = fib_hash_alloc(bytes); if (!new_info_hash || !new_laddrhash) { fib_hash_free(new_info_hash, bytes); fib_hash_free(new_laddrhash, bytes); } else { memset(new_info_hash, 0, bytes); memset(new_laddrhash, 0, bytes); fib_hash_move(new_info_hash, new_laddrhash, new_size); } if (!fib_hash_size) goto failure; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -