📄 my_route.c
字号:
return xfrm_lookup((struct dst_entry **)rp, flp, sk, flags); } return 0;}int myip_route_output_key(struct rtable **rp, struct flowi *flp){ return myip_route_output_flow(rp, flp, NULL, 0);}static int myrt_garbage_collect(void){ return 0;}static struct dst_entry *myipv4_dst_check(struct dst_entry *dst, u32 cookie){ return NULL;}static void myipv4_dst_destroy(struct dst_entry *dst){}static void myipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how){}static struct dst_entry *myipv4_negative_advice(struct dst_entry *dst){ return NULL;}static void myipv4_link_failure(struct sk_buff *skb){}static void myip_rt_update_pmtu(struct dst_entry *dst, u32 mtu){}static struct dst_ops myipv4_dst_ops = { .family = MY_AF_INET, .protocol = __constant_htons(ETH_P_IP), .gc = myrt_garbage_collect, .check = myipv4_dst_check, .destroy = myipv4_dst_destroy, .ifdown = myipv4_dst_ifdown, .negative_advice = myipv4_negative_advice, .link_failure = myipv4_link_failure, .update_pmtu = myip_rt_update_pmtu, .entry_size = sizeof(struct rtable),};static void * myalloc_large_system_hash(const char *tablename, unsigned long bucketsize, unsigned long numentries, int scale, int flags, unsigned int *_hash_shift, unsigned int *_hash_mask, unsigned long limit, unsigned long *table_order ){ unsigned long long max = limit; unsigned long log2qty, size; void *table = NULL; /* allow the kernel cmdline to have a say */ if (!numentries) { /* round applicable memory size up to nearest megabyte */ numentries = num_physpages; numentries += (1UL << (20 - PAGE_SHIFT)) - 1; numentries >>= 20 - PAGE_SHIFT; numentries <<= 20 - PAGE_SHIFT; /* limit to 1 bucket per 2^scale bytes of low memory */ if (scale > PAGE_SHIFT) numentries >>= (scale - PAGE_SHIFT); else numentries <<= (PAGE_SHIFT - scale); } /* rounded up to nearest power of 2 in size */ numentries = 1UL << (long_log2(numentries) + 1); /* limit allocation size to 1/16 total memory by default */ if (max == 0) { max = ((unsigned long long)num_physpages << PAGE_SHIFT) >> 4; do_div(max, bucketsize); } if (numentries > max) numentries = max; log2qty = long_log2(numentries); do { size = bucketsize << log2qty; printk(KERN_INFO "bucketsize: %lu, %lu\n", size, bucketsize ); { unsigned long order; for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) ; table = (void*) __get_free_pages(GFP_ATOMIC, order); *table_order = order; } } while (!table && size > PAGE_SIZE && --log2qty); if (!table) panic("Failed to allocate %s hash table\n", tablename); printk("%s hash table entries: %d (order: %d, %lu bytes)\n", tablename, (1U << log2qty), long_log2(size) - PAGE_SHIFT, size); if (_hash_shift) *_hash_shift = log2qty; if (_hash_mask) *_hash_mask = (1 << log2qty) - 1; return table;}static void myrt_run_flush(unsigned long dummy){ return; int i; struct rtable *rth, *next; myrt_deadline = 0; get_random_bytes( &myrt_hash_rnd, 4 ); for( i = myrt_hash_mask; i >= 0; i--){ //spin_lock_bh( myrt_hash_lock_addr(i) ); rth = myrt_hash_table[i].chain; if( rth ) myrt_hash_table[i].chain = NULL; //spin_unlock_bh( myrt_hash_lock_addr(i) ); for( ; rth; rth = next ){ next = rth->u.rt_next; myrt_free(rth); } }}static void myrt_check_expire(unsigned long dummy){ printk(KERN_INFO "check expire!\n"); mod_timer(&myrt_periodic_timer, jiffies + myip_rt_gc_interval);}static void myrt_secret_rebuild(unsigned long dummy){ printk(KERN_INFO "secret rebuild!\n"); mod_timer(&myrt_secret_timer, jiffies + myip_rt_secret_interval);}static void myip_handle_martian_source(struct net_device *dev, struct in_device *in_dev, struct sk_buff *skb, u32 daddr, u32 saddr) { MYRT_CACHE_STAT_INC(in_martian_src);#ifdef CONFIG_IP_ROUTE_VERBOSE if( MYIN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit() ) { printk(KERN_WARNING "martian source %u.%u.%u.%u from " "%u.%u.%u.%u, on dev %s\n", NIPQUAD(daddr), NIPQUAD(saddr), dev->name); if (dev->hard_header_len && skb->mac.raw) { int i; unsigned char *p = skb->mac.raw; printk(KERN_WARNING "ll header: "); for (i = 0; i < dev->hard_header_len; i++, p++) { printk("%02x", *p); if (i < (dev->hard_header_len - 1)) printk(":"); } printk("\n"); } }#endif}static int myip_rt_bug(struct sk_buff *skb){ printk(KERN_DEBUG "ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n", NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr), skb->dev ? skb->dev->name : "?"); kfree_skb(skb); return 0;}#ifdef CONFIG_NET_CLS_ROUTEstatic void myset_class_tag(struct rtable *rt, u32 tag){ if (!(rt->u.dst.tclassid & 0xFFFF)) rt->u.dst.tclassid |= tag & 0xFFFF; if (!(rt->u.dst.tclassid & 0xFFFF0000)) rt->u.dst.tclassid |= tag & 0xFFFF0000;}#endifstatic void myrt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag){ struct fib_info *fi = res->fi; if (fi) { if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) rt->rt_gateway = FIB_RES_GW(*res); memcpy(rt->u.dst.metrics, fi->fib_metrics,sizeof(rt->u.dst.metrics)); if (fi->fib_mtu == 0) { rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; if (rt->u.dst.metrics[RTAX_LOCK-1] & (1 << RTAX_MTU) && rt->rt_gateway != rt->rt_dst && rt->u.dst.dev->mtu > 576) rt->u.dst.metrics[RTAX_MTU-1] = 576; }#ifdef CONFIG_NET_CLS_ROUTE rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;#endif } else rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu; if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0) rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; if (rt->u.dst.metrics[RTAX_MTU-1] > IP_MAX_MTU) rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0) rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, myip_rt_min_advmss); if (rt->u.dst.metrics[RTAX_ADVMSS-1] > 65535 - 40) rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;#ifdef CONFIG_NET_CLS_ROUTE#ifdef CONFIG_IP_MULTIPLE_TABLES myset_class_tag(rt, myfib_rules_tclass(res));#endif myset_class_tag(rt, itag);#endif rt->rt_type = res->type;}static inline int compare_keys(struct flowi *fl1, struct flowi *fl2){ return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 && fl1->oif == fl2->oif && fl1->iif == fl2->iif;}static __inline__ void myrt_drop(struct rtable *rt){ multipath_remove(rt); ip_rt_put(rt); call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);}static __inline__ int myrt_valuable(struct rtable *rth){ return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || rth->u.dst.expires;}static inline u32 myrt_score(struct rtable *rt){ u32 score = jiffies - rt->u.dst.lastuse; score = ~score & ~(3<<30); if( myrt_valuable(rt) ) score |= (1<<31); if( !rt->fl.iif || !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)) ) score |= (1<<30); return score;}static int myrt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp){ struct rtable *rth, **rthp; unsigned long now; struct rtable *cand, **candp; u32 min_score; int chain_length; int attempts = !in_softirq();restart: chain_length = 0; min_score = ~(u32)0; cand = NULL; candp = NULL; now = jiffies; rthp = &myrt_hash_table[hash].chain; //spin_lock_bh( myrt_hash_lock_addr(hash) ); while( (rth = *rthp) != NULL ){#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED if( !(rth->u.dst.flags & DST_BALANCED) && compare_keys(&rth->fl, &rt->fl) ){#else if (compare_keys(&rth->fl, &rt->fl)) {#endif *rthp = rth->u.rt_next; rcu_assign_pointer( rth->u.rt_next, myrt_hash_table[hash].chain ); rcu_assign_pointer( myrt_hash_table[hash].chain, rth ); rth->u.dst.__use++; dst_hold(&rth->u.dst); rth->u.dst.lastuse = now; //spin_unlock_bh( myrt_hash_lock_addr(hash) ); myrt_drop(rt); *rp = rth; return 0; } if (!atomic_read(&rth->u.dst.__refcnt)) { u32 score = myrt_score(rth); if (score <= min_score) { cand = rth; candp = rthp; min_score = score; } } chain_length++; rthp = &rth->u.rt_next; } if( cand ){ if (chain_length > myip_rt_gc_elasticity) { *candp = cand->u.rt_next; myrt_free(cand); } } if( rt->rt_type == RTN_UNICAST || rt->fl.iif == 0 ){ int err = myarp_bind_neighbour(&rt->u.dst); if (err) { //spin_unlock_bh( myrt_hash_lock_addr(hash) ); if (err != -ENOBUFS) { myrt_drop(rt); return err; } if (attempts-- > 0) { int saved_elasticity = myip_rt_gc_elasticity; int saved_int = myip_rt_gc_min_interval; myip_rt_gc_elasticity = 1; myip_rt_gc_min_interval = 0; myrt_garbage_collect(); myip_rt_gc_min_interval = saved_int; myip_rt_gc_elasticity = saved_elasticity; goto restart; } if (net_ratelimit()) printk(KERN_WARNING "Neighbour table overflow.\n"); myrt_drop(rt); return -ENOBUFS; } } rt->u.rt_next = myrt_hash_table[hash].chain;#if 1 if (rt->u.rt_next) { struct rtable *trt; printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash, NIPQUAD(rt->rt_dst)); for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next) printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst)); printk("\n"); }#endif myrt_hash_table[hash].chain = rt; //spin_unlock_bh( myrt_hash_lock_addr(hash) ); *rp = rt; return 0;}static inline int __mymkroute_input(struct sk_buff *skb, struct fib_result* res, struct in_device *in_dev, u32 daddr, u32 saddr, u32 tos, struct rtable **result) { struct rtable *rth; int err; struct in_device *out_dev; unsigned flags = 0; u32 spec_dst, itag; out_dev = in_dev_get(FIB_RES_DEV(*res)); if (out_dev == NULL) { if (net_ratelimit()) printk(KERN_CRIT "Bug in ip_route_input" \ "_slow(). Please, report\n"); return -EINVAL; } err = myfib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), in_dev->dev, &spec_dst, &itag); if (err < 0) { myip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, saddr); err = -EINVAL; goto cleanup; } if (err) flags |= RTCF_DIRECTSRC; if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) && ( MYIN_DEV_SHARED_MEDIA(out_dev) || myinet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) flags |= RTCF_DOREDIRECT; if (skb->protocol != htons(ETH_P_IP)) { if (out_dev == in_dev && !(flags & RTCF_DNAT)) { err = -EINVAL; goto cleanup; } } rth = dst_alloc( &myipv4_dst_ops ); if (!rth) { err = -ENOBUFS; goto cleanup; } atomic_set(&rth->u.dst.__refcnt, 1); rth->u.dst.flags= DST_HOST;#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED if (res->fi->fib_nhs > 1) rth->u.dst.flags |= DST_BALANCED;#endif if (in_dev->cnf.no_policy) rth->u.dst.flags |= DST_NOPOLICY; if (in_dev->cnf.no_xfrm) rth->u.dst.flags |= DST_NOXFRM; rth->fl.fl4_dst = daddr; rth->rt_dst = daddr; rth->fl.fl4_tos = tos;#ifdef CONFIG_IP_ROUTE_FWMARK rth->fl.fl4_fwmark= skb->nfmark;#endif rth->fl.fl4_src = saddr; rth->rt_src = saddr; rth->rt_gateway = daddr; rth->rt_iif = rth->fl.iif = in_dev->dev->ifindex; rth->u.dst.dev = (out_dev)->dev; dev_hold(rth->u.dst.dev); rth->idev = in_dev_get(rth->u.dst.dev); rth->fl.oif = 0; rth->rt_spec_dst= spec_dst; rth->u.dst.input = myip_forward; rth->u.dst.output = myip_output; myrt_set_nexthop(rth, res, itag); rth->rt_flags = flags; *result = rth; err = 0; cleanup: in_dev_put(out_dev); return err;} static inline int myip_mkroute_input_def(struct sk_buff *skb, struct fib_result* res, const struct flowi *fl, struct in_device *in_dev, u32 daddr, u32 saddr, u32 tos){ struct rtable* rth = NULL; int err; unsigned hash;#ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0) myfib_select_multipath(fl, res);#endif err = __mymkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth); if (err) return err; hash = myrt_hash_code(daddr, saddr ^ (fl->iif << 5), tos); return myrt_intern_hash(hash, rth, (struct rtable**)&skb->dst); }static inline int myip_mkroute_input(struct sk_buff *skb, struct fib_result* res, const struct flowi *fl,struct in_device *in_dev, u32 daddr, u32 saddr, u32 tos){#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED struct rtable* rth = NULL, *rtres; unsigned char hop, hopcount; int err = -EINVAL; unsigned int hash; if (res->fi) hopcount = res->fi->fib_nhs; else hopcount = 1; if (hopcount < 2) return myip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos); for (hop = 0; hop < hopcount; hop++) { res->nh_sel = hop; if (hop) ip_rt_put(rtres); err = __mymkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth); if (err) return err; hash = myrt_hash_code(daddr, saddr ^ (fl->iif << 5), tos); err = myrt_intern_hash(hash, rth, &rtres); if (err) return err; //multipath_set_nhinfo(rth, FIB_RES_NETWORK(*res), FIB_RES_NETMASK(*res), // res->prefixlen, &FIB_RES_NH(*res)); } skb->dst = &rtres->u.dst;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -