📄 fib_frontend.c
字号:
};static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh, struct fib_config *cfg){ struct nlattr *attr; int err, remaining; struct rtmsg *rtm; err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy); if (err < 0) goto errout; memset(cfg, 0, sizeof(*cfg)); rtm = nlmsg_data(nlh); cfg->fc_dst_len = rtm->rtm_dst_len; cfg->fc_tos = rtm->rtm_tos; cfg->fc_table = rtm->rtm_table; cfg->fc_protocol = rtm->rtm_protocol; cfg->fc_scope = rtm->rtm_scope; cfg->fc_type = rtm->rtm_type; cfg->fc_flags = rtm->rtm_flags; cfg->fc_nlflags = nlh->nlmsg_flags; cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid; cfg->fc_nlinfo.nlh = nlh; if (cfg->fc_type > RTN_MAX) { err = -EINVAL; goto errout; } nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) { switch (nla_type(attr)) { case RTA_DST: cfg->fc_dst = nla_get_be32(attr); break; case RTA_OIF: cfg->fc_oif = nla_get_u32(attr); break; case RTA_GATEWAY: cfg->fc_gw = nla_get_be32(attr); break; case RTA_PRIORITY: cfg->fc_priority = nla_get_u32(attr); break; case RTA_PREFSRC: cfg->fc_prefsrc = nla_get_be32(attr); break; case RTA_METRICS: cfg->fc_mx = nla_data(attr); cfg->fc_mx_len = nla_len(attr); break; case RTA_MULTIPATH: cfg->fc_mp = nla_data(attr); cfg->fc_mp_len = nla_len(attr); break; case RTA_FLOW: cfg->fc_flow = nla_get_u32(attr); break; case RTA_TABLE: cfg->fc_table = nla_get_u32(attr); break; } } return 0;errout: return err;}static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg){ struct fib_config cfg; struct fib_table *tb; int err; err = rtm_to_fib_config(skb, nlh, &cfg); if (err < 0) goto errout; tb = fib_get_table(cfg.fc_table); if (tb == NULL) { err = -ESRCH; goto errout; } err = tb->tb_delete(tb, &cfg);errout: return err;}static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg){ struct fib_config cfg; struct fib_table *tb; int err; err = rtm_to_fib_config(skb, nlh, &cfg); if (err < 0) goto errout; tb = fib_new_table(cfg.fc_table); if (tb == NULL) { err = -ENOBUFS; goto errout; } err = tb->tb_insert(tb, &cfg);errout: return err;}static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb){ unsigned int h, s_h; unsigned int e = 0, s_e; struct fib_table *tb; struct hlist_node *node; int dumped = 0; if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) return ip_rt_dump(skb, cb); s_h = cb->args[0]; s_e = cb->args[1]; for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { e = 0; hlist_for_each_entry(tb, node, &fib_table_hash[h], tb_hlist) { if (e < s_e) goto next; if (dumped) memset(&cb->args[2], 0, sizeof(cb->args) - 2 * sizeof(cb->args[0])); if (tb->tb_dump(tb, skb, cb) < 0) goto out; dumped = 1;next: e++; } }out: cb->args[1] = e; cb->args[0] = h; return skb->len;}/* Prepare and feed intra-kernel routing request. Really, it should be netlink message, but :-( netlink can be not configured, so that we feed it directly to fib engine. It is legal, because all events occur only when netlink is already locked. */static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa){ struct fib_table *tb; struct fib_config cfg = { .fc_protocol = RTPROT_KERNEL, .fc_type = type, .fc_dst = dst, .fc_dst_len = dst_len, .fc_prefsrc = ifa->ifa_local, .fc_oif = ifa->ifa_dev->dev->ifindex, .fc_nlflags = NLM_F_CREATE | NLM_F_APPEND, }; if (type == RTN_UNICAST) tb = fib_new_table(RT_TABLE_MAIN); else tb = fib_new_table(RT_TABLE_LOCAL); if (tb == NULL) return; cfg.fc_table = tb->tb_id; if (type != RTN_LOCAL) cfg.fc_scope = RT_SCOPE_LINK; else cfg.fc_scope = RT_SCOPE_HOST; if (cmd == RTM_NEWROUTE) tb->tb_insert(tb, &cfg); else tb->tb_delete(tb, &cfg);}void fib_add_ifaddr(struct in_ifaddr *ifa){ struct in_device *in_dev = ifa->ifa_dev; struct net_device *dev = in_dev->dev; struct in_ifaddr *prim = ifa; __be32 mask = ifa->ifa_mask; __be32 addr = ifa->ifa_local; __be32 prefix = ifa->ifa_address&mask; if (ifa->ifa_flags&IFA_F_SECONDARY) { prim = inet_ifa_byprefix(in_dev, prefix, mask); if (prim == NULL) { printk(KERN_DEBUG "fib_add_ifaddr: bug: prim == NULL\n"); return; } } fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim); if (!(dev->flags&IFF_UP)) return; /* Add broadcast address, if it is explicitly assigned. */ if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF)) fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim); if (!ZERONET(prefix) && !(ifa->ifa_flags&IFA_F_SECONDARY) && (prefix != addr || ifa->ifa_prefixlen < 32)) { fib_magic(RTM_NEWROUTE, dev->flags&IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, prefix, ifa->ifa_prefixlen, prim); /* Add network specific broadcasts, when it takes a sense */ if (ifa->ifa_prefixlen < 31) { fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim); fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix|~mask, 32, prim); } }}static void fib_del_ifaddr(struct in_ifaddr *ifa){ struct in_device *in_dev = ifa->ifa_dev; struct net_device *dev = in_dev->dev; struct in_ifaddr *ifa1; struct in_ifaddr *prim = ifa; __be32 brd = ifa->ifa_address|~ifa->ifa_mask; __be32 any = ifa->ifa_address&ifa->ifa_mask;#define LOCAL_OK 1#define BRD_OK 2#define BRD0_OK 4#define BRD1_OK 8 unsigned ok = 0; if (!(ifa->ifa_flags&IFA_F_SECONDARY)) fib_magic(RTM_DELROUTE, dev->flags&IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, any, ifa->ifa_prefixlen, prim); else { prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); if (prim == NULL) { printk(KERN_DEBUG "fib_del_ifaddr: bug: prim == NULL\n"); return; } } /* Deletion is more complicated than add. We should take care of not to delete too much :-) Scan address list to be sure that addresses are really gone. */ for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { if (ifa->ifa_local == ifa1->ifa_local) ok |= LOCAL_OK; if (ifa->ifa_broadcast == ifa1->ifa_broadcast) ok |= BRD_OK; if (brd == ifa1->ifa_broadcast) ok |= BRD1_OK; if (any == ifa1->ifa_broadcast) ok |= BRD0_OK; } if (!(ok&BRD_OK)) fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim); if (!(ok&BRD1_OK)) fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim); if (!(ok&BRD0_OK)) fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim); if (!(ok&LOCAL_OK)) { fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim); /* Check, that this local address finally disappeared. */ if (inet_addr_type(ifa->ifa_local) != RTN_LOCAL) { /* And the last, but not the least thing. We must flush stray FIB entries. First of all, we scan fib_info list searching for stray nexthop entries, then ignite fib_flush. */ if (fib_sync_down(ifa->ifa_local, NULL, 0)) fib_flush(); } }#undef LOCAL_OK#undef BRD_OK#undef BRD0_OK#undef BRD1_OK}static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ){ struct fib_result res; struct flowi fl = { .mark = frn->fl_mark, .nl_u = { .ip4_u = { .daddr = frn->fl_addr, .tos = frn->fl_tos, .scope = frn->fl_scope } } };#ifdef CONFIG_IP_MULTIPLE_TABLES res.r = NULL;#endif frn->err = -ENOENT; if (tb) { local_bh_disable(); frn->tb_id = tb->tb_id; frn->err = tb->tb_lookup(tb, &fl, &res); if (!frn->err) { frn->prefixlen = res.prefixlen; frn->nh_sel = res.nh_sel; frn->type = res.type; frn->scope = res.scope; fib_res_put(&res); } local_bh_enable(); }}static void nl_fib_input(struct sk_buff *skb){ struct fib_result_nl *frn; struct nlmsghdr *nlh; struct fib_table *tb; u32 pid; nlh = nlmsg_hdr(skb); if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len || nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn))) return; skb = skb_clone(skb, GFP_KERNEL); if (skb == NULL) return; nlh = nlmsg_hdr(skb); frn = (struct fib_result_nl *) NLMSG_DATA(nlh); tb = fib_get_table(frn->tb_id_in); nl_fib_lookup(frn, tb); pid = NETLINK_CB(skb).pid; /* pid of sending process */ NETLINK_CB(skb).pid = 0; /* from kernel */ NETLINK_CB(skb).dst_group = 0; /* unicast */ netlink_unicast(fibnl, skb, pid, MSG_DONTWAIT);}static void nl_fib_lookup_init(void){ fibnl = netlink_kernel_create(&init_net, NETLINK_FIB_LOOKUP, 0, nl_fib_input, NULL, THIS_MODULE);}static void fib_disable_ip(struct net_device *dev, int force){ if (fib_sync_down(0, dev, force)) fib_flush(); rt_cache_flush(0); arp_ifdown(dev);}static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr){ struct in_ifaddr *ifa = (struct in_ifaddr*)ptr; switch (event) { case NETDEV_UP: fib_add_ifaddr(ifa);#ifdef CONFIG_IP_ROUTE_MULTIPATH fib_sync_up(ifa->ifa_dev->dev);#endif rt_cache_flush(-1); break; case NETDEV_DOWN: fib_del_ifaddr(ifa); if (ifa->ifa_dev->ifa_list == NULL) { /* Last address was deleted from this interface. Disable IP. */ fib_disable_ip(ifa->ifa_dev->dev, 1); } else { rt_cache_flush(-1); } break; } return NOTIFY_DONE;}static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr){ struct net_device *dev = ptr; struct in_device *in_dev = __in_dev_get_rtnl(dev); if (dev->nd_net != &init_net) return NOTIFY_DONE; if (event == NETDEV_UNREGISTER) { fib_disable_ip(dev, 2); return NOTIFY_DONE; } if (!in_dev) return NOTIFY_DONE; switch (event) { case NETDEV_UP: for_ifa(in_dev) { fib_add_ifaddr(ifa); } endfor_ifa(in_dev);#ifdef CONFIG_IP_ROUTE_MULTIPATH fib_sync_up(dev);#endif rt_cache_flush(-1); break; case NETDEV_DOWN: fib_disable_ip(dev, 0); break; case NETDEV_CHANGEMTU: case NETDEV_CHANGE: rt_cache_flush(0); break; } return NOTIFY_DONE;}static struct notifier_block fib_inetaddr_notifier = { .notifier_call =fib_inetaddr_event,};static struct notifier_block fib_netdev_notifier = { .notifier_call =fib_netdev_event,};void __init ip_fib_init(void){ unsigned int i; for (i = 0; i < FIB_TABLE_HASHSZ; i++) INIT_HLIST_HEAD(&fib_table_hash[i]); fib4_rules_init(); register_netdevice_notifier(&fib_netdev_notifier); register_inetaddr_notifier(&fib_inetaddr_notifier); nl_fib_lookup_init(); rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL); rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL); rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib);}EXPORT_SYMBOL(inet_addr_type);EXPORT_SYMBOL(ip_dev_find);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -