⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dn_route.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
}static int dn_route_input_slow(struct sk_buff *skb){	struct dn_route *rt = NULL;	struct dn_skb_cb *cb = DN_SKB_CB(skb);	struct net_device *in_dev = skb->dev;	struct net_device *out_dev = NULL;	struct dn_dev *dn_db;	struct neighbour *neigh = NULL;	unsigned hash;	int flags = 0;	__le16 gateway = 0;	__le16 local_src = 0;	struct flowi fl = { .nl_u = { .dn_u =				     { .daddr = cb->dst,				       .saddr = cb->src,				       .scope = RT_SCOPE_UNIVERSE,				    } },			    .mark = skb->mark,			    .iif = skb->dev->ifindex };	struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };	int err = -EINVAL;	int free_res = 0;	dev_hold(in_dev);	if ((dn_db = in_dev->dn_ptr) == NULL)		goto out;	/* Zero source addresses are not allowed */	if (fl.fld_src == 0)		goto out;	/*	 * In this case we've just received a packet from a source	 * outside ourselves pretending to come from us. We don't	 * allow it any further to prevent routing loops, spoofing and	 * other nasties. Loopback packets already have the dst attached	 * so this only affects packets which have originated elsewhere.	 */	err  = -ENOTUNIQ;	if (dn_dev_islocal(in_dev, cb->src))		goto out;	err = dn_fib_lookup(&fl, &res);	if (err) {		if (err != -ESRCH)			goto out;		/*		 * Is the destination us ?		 */		if (!dn_dev_islocal(in_dev, cb->dst))			goto e_inval;		res.type = RTN_LOCAL;	} else {		__le16 src_map = fl.fld_src;		free_res = 1;		out_dev = DN_FIB_RES_DEV(res);		if (out_dev == NULL) {			if (net_ratelimit())				printk(KERN_CRIT "Bug in dn_route_input_slow() "						 "No output device\n");			goto e_inval;		}		dev_hold(out_dev);		if (res.r)			src_map = fl.fld_src; /* no NAT support for now */		gateway = DN_FIB_RES_GW(res);		if (res.type == RTN_NAT) {			fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res);			dn_fib_res_put(&res);			free_res = 0;			if (dn_fib_lookup(&fl, &res))				goto e_inval;			free_res = 1;			if (res.type != RTN_UNICAST)				goto e_inval;			flags |= RTCF_DNAT;			gateway = fl.fld_dst;		}		fl.fld_src = src_map;	}	switch(res.type) {	case RTN_UNICAST:		/*		 * Forwarding check here, we only check for forwarding		 * being turned off, if you want to only forward intra		 * area, its up to you to set the routing tables up		 * correctly.		 */		if (dn_db->parms.forwarding == 0)			goto e_inval;		if (res.fi->fib_nhs > 1 && fl.oif == 0)			dn_fib_select_multipath(&fl, &res);		/*		 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT		 * flag as a hint to set the intra-ethernet bit when		 * forwarding. If we've got NAT in operation, we don't do		 * this optimisation.		 */		if (out_dev == in_dev && !(flags & RTCF_NAT))			flags |= RTCF_DOREDIRECT;		local_src = DN_FIB_RES_PREFSRC(res);	case RTN_BLACKHOLE:	case RTN_UNREACHABLE:		break;	case RTN_LOCAL:		flags |= RTCF_LOCAL;		fl.fld_src = cb->dst;		fl.fld_dst = cb->src;		/* Routing tables gave us a gateway */		if (gateway)			goto make_route;		/* Packet was intra-ethernet, so we know its on-link */		if (cb->rt_flags & DN_RT_F_IE) {			gateway = cb->src;			flags |= RTCF_DIRECTSRC;			goto make_route;		}		/* Use the default router if there is one */		neigh = neigh_clone(dn_db->router);		if (neigh) {			gateway = ((struct dn_neigh *)neigh)->addr;			goto make_route;		}		/* Close eyes and pray */		gateway = cb->src;		flags |= RTCF_DIRECTSRC;		goto make_route;	default:		goto e_inval;	}make_route:	rt = dst_alloc(&dn_dst_ops);	if (rt == NULL)		goto e_nobufs;	rt->rt_saddr      = fl.fld_src;	rt->rt_daddr      = fl.fld_dst;	rt->rt_gateway    = fl.fld_dst;	if (gateway)		rt->rt_gateway = gateway;	rt->rt_local_src  = local_src ? local_src : rt->rt_saddr;	rt->rt_dst_map    = fl.fld_dst;	rt->rt_src_map    = fl.fld_src;	rt->fl.fld_src    = cb->src;	rt->fl.fld_dst    = cb->dst;	rt->fl.oif        = 0;	rt->fl.iif        = in_dev->ifindex;	rt->fl.mark       = fl.mark;	rt->u.dst.flags = DST_HOST;	rt->u.dst.neighbour = neigh;	rt->u.dst.dev = out_dev;	rt->u.dst.lastuse = jiffies;	rt->u.dst.output = dn_rt_bug;	switch(res.type) {		case RTN_UNICAST:			rt->u.dst.input = dn_forward;			break;		case RTN_LOCAL:			rt->u.dst.output = dn_output;			rt->u.dst.input = dn_nsp_rx;			rt->u.dst.dev = in_dev;			flags |= RTCF_LOCAL;			break;		default:		case RTN_UNREACHABLE:		case RTN_BLACKHOLE:			rt->u.dst.input = dn_blackhole;	}	rt->rt_flags = flags;	if (rt->u.dst.dev)		dev_hold(rt->u.dst.dev);	err = dn_rt_set_next_hop(rt, &res);	if (err)		goto e_neighbour;	hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);	dn_insert_route(rt, hash, (struct dn_route **)&skb->dst);done:	if (neigh)		neigh_release(neigh);	if (free_res)		dn_fib_res_put(&res);	dev_put(in_dev);	if (out_dev)		dev_put(out_dev);out:	return err;e_inval:	err = -EINVAL;	goto done;e_nobufs:	err = -ENOBUFS;	goto done;e_neighbour:	dst_free(&rt->u.dst);	goto done;}int dn_route_input(struct sk_buff *skb){	struct dn_route *rt;	struct dn_skb_cb *cb = DN_SKB_CB(skb);	unsigned hash = dn_hash(cb->src, cb->dst);	if (skb->dst)		return 0;	rcu_read_lock();	for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;	    rt = rcu_dereference(rt->u.dst.dn_next)) {		if ((rt->fl.fld_src == cb->src) &&		    (rt->fl.fld_dst == cb->dst) &&		    (rt->fl.oif == 0) &&		    (rt->fl.mark == skb->mark) &&		    (rt->fl.iif == cb->iif)) {			dst_use(&rt->u.dst, jiffies);			rcu_read_unlock();			skb->dst = (struct dst_entry *)rt;			return 0;		}	}	rcu_read_unlock();	return dn_route_input_slow(skb);}static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,			   int event, int nowait, unsigned int flags){	struct dn_route *rt = (struct dn_route *)skb->dst;	struct rtmsg *r;	struct nlmsghdr *nlh;	unsigned char *b = skb_tail_pointer(skb);	long expires;	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);	r = NLMSG_DATA(nlh);	r->rtm_family = AF_DECnet;	r->rtm_dst_len = 16;	r->rtm_src_len = 0;	r->rtm_tos = 0;	r->rtm_table = RT_TABLE_MAIN;	RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);	r->rtm_type = rt->rt_type;	r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;	r->rtm_scope = RT_SCOPE_UNIVERSE;	r->rtm_protocol = RTPROT_UNSPEC;	if (rt->rt_flags & RTCF_NOTIFY)		r->rtm_flags |= RTM_F_NOTIFY;	RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr);	if (rt->fl.fld_src) {		r->rtm_src_len = 16;		RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src);	}	if (rt->u.dst.dev)		RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);	/*	 * Note to self - change this if input routes reverse direction when	 * they deal only with inputs and not with replies like they do	 * currently.	 */	RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);	if (rt->rt_daddr != rt->rt_gateway)		RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);	if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)		goto rtattr_failure;	expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;	if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires,			       rt->u.dst.error) < 0)		goto rtattr_failure;	if (rt->fl.iif)		RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);	nlh->nlmsg_len = skb_tail_pointer(skb) - b;	return skb->len;nlmsg_failure:rtattr_failure:	nlmsg_trim(skb, b);	return -1;}/* * This is called by both endnodes and routers now. */static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg){	struct rtattr **rta = arg;	struct rtmsg *rtm = NLMSG_DATA(nlh);	struct dn_route *rt = NULL;	struct dn_skb_cb *cb;	int err;	struct sk_buff *skb;	struct flowi fl;	memset(&fl, 0, sizeof(fl));	fl.proto = DNPROTO_NSP;	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);	if (skb == NULL)		return -ENOBUFS;	skb_reset_mac_header(skb);	cb = DN_SKB_CB(skb);	if (rta[RTA_SRC-1])		memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2);	if (rta[RTA_DST-1])		memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2);	if (rta[RTA_IIF-1])		memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));	if (fl.iif) {		struct net_device *dev;		if ((dev = dev_get_by_index(&init_net, fl.iif)) == NULL) {			kfree_skb(skb);			return -ENODEV;		}		if (!dev->dn_ptr) {			dev_put(dev);			kfree_skb(skb);			return -ENODEV;		}		skb->protocol = __constant_htons(ETH_P_DNA_RT);		skb->dev = dev;		cb->src = fl.fld_src;		cb->dst = fl.fld_dst;		local_bh_disable();		err = dn_route_input(skb);		local_bh_enable();		memset(cb, 0, sizeof(struct dn_skb_cb));		rt = (struct dn_route *)skb->dst;		if (!err && -rt->u.dst.error)			err = rt->u.dst.error;	} else {		int oif = 0;		if (rta[RTA_OIF - 1])			memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));		fl.oif = oif;		err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0);	}	if (skb->dev)		dev_put(skb->dev);	skb->dev = NULL;	if (err)		goto out_free;	skb->dst = &rt->u.dst;	if (rtm->rtm_flags & RTM_F_NOTIFY)		rt->rt_flags |= RTCF_NOTIFY;	err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);	if (err == 0)		goto out_free;	if (err < 0) {		err = -EMSGSIZE;		goto out_free;	}	return rtnl_unicast(skb, NETLINK_CB(in_skb).pid);out_free:	kfree_skb(skb);	return err;}/* * For routers, this is called from dn_fib_dump, but for endnodes its * called directly from the rtnetlink dispatch table. */int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb){	struct dn_route *rt;	int h, s_h;	int idx, s_idx;	if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg))		return -EINVAL;	if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED))		return 0;	s_h = cb->args[0];	s_idx = idx = cb->args[1];	for(h = 0; h <= dn_rt_hash_mask; h++) {		if (h < s_h)			continue;		if (h > s_h)			s_idx = 0;		rcu_read_lock_bh();		for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;			rt;			rt = rcu_dereference(rt->u.dst.dn_next), idx++) {			if (idx < s_idx)				continue;			skb->dst = dst_clone(&rt->u.dst);			if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,					cb->nlh->nlmsg_seq, RTM_NEWROUTE,					1, NLM_F_MULTI) <= 0) {				dst_release(xchg(&skb->dst, NULL));				rcu_read_unlock_bh();				goto done;			}			dst_release(xchg(&skb->dst, NULL));		}		rcu_read_unlock_bh();	}done:	cb->args[0] = h;	cb->args[1] = idx;	return skb->len;}#ifdef CONFIG_PROC_FSstruct dn_rt_cache_iter_state {	int bucket;};static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq){	struct dn_route *rt = NULL;	struct dn_rt_cache_iter_state *s = seq->private;	for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {		rcu_read_lock_bh();		rt = dn_rt_hash_table[s->bucket].chain;		if (rt)			break;		rcu_read_unlock_bh();	}	return rcu_dereference(rt);}static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt){	struct dn_rt_cache_iter_state *s = seq->private;	rt = rt->u.dst.dn_next;	while(!rt) {		rcu_read_unlock_bh();		if (--s->bucket < 0)			break;		rcu_read_lock_bh();		rt = dn_rt_hash_table[s->bucket].chain;	}	return rcu_dereference(rt);}static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos){	struct dn_route *rt = dn_rt_cache_get_first(seq);	if (rt) {		while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))			--*pos;	}	return *pos ? NULL : rt;}static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos){	struct dn_route *rt = dn_rt_cache_get_next(seq, v);	++*pos;	return rt;}static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v){	if (v)		rcu_read_unlock_bh();}static int dn_rt_cache_seq_show(struct seq_file *seq, void *v){	struct dn_route *rt = v;	char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];	seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",			rt->u.dst.dev ? rt->u.dst.dev->name : "*",			dn_addr2asc(dn_ntohs(rt->rt_daddr), buf1),			dn_addr2asc(dn_ntohs(rt->rt_saddr), buf2),			atomic_read(&rt->u.dst.__refcnt),			rt->u.dst.__use,			(int) dst_metric(&rt->u.dst, RTAX_RTT));	return 0;}static const struct seq_operations dn_rt_cache_seq_ops = {	.start	= dn_rt_cache_seq_start,	.next	= dn_rt_cache_seq_next,	.stop	= dn_rt_cache_seq_stop,	.show	= dn_rt_cache_seq_show,};static int dn_rt_cache_seq_open(struct inode *inode, struct file *file){	return seq_open_private(file, &dn_rt_cache_seq_ops,			sizeof(struct dn_rt_cache_iter_state));}static const struct file_operations dn_rt_cache_seq_fops = {	.owner	 = THIS_MODULE,	.open	 = dn_rt_cache_seq_open,	.read	 = seq_read,	.llseek	 = seq_lseek,	.release = seq_release_private,};#endif /* CONFIG_PROC_FS */void __init dn_route_init(void){	int i, goal, order;	dn_dst_ops.kmem_cachep =		kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);	init_timer(&dn_route_timer);	dn_route_timer.function = dn_dst_check_expire;	dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;	add_timer(&dn_route_timer);	goal = num_physpages >> (26 - PAGE_SHIFT);	for(order = 0; (1UL << order) < goal; order++)		/* NOTHING */;	/*	 * Only want 1024 entries max, since the table is very, very unlikely	 * to be larger than that.	 */	while(order && ((((1UL << order) * PAGE_SIZE) /				sizeof(struct dn_rt_hash_bucket)) >= 2048))		order--;	do {		dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /			sizeof(struct dn_rt_hash_bucket);		while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))			dn_rt_hash_mask--;		dn_rt_hash_table = (struct dn_rt_hash_bucket *)			__get_free_pages(GFP_ATOMIC, order);	} while (dn_rt_hash_table == NULL && --order > 0);	if (!dn_rt_hash_table)		panic("Failed to allocate DECnet route cache hash table\n");	printk(KERN_INFO		"DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",		dn_rt_hash_mask,		(long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);	dn_rt_hash_mask--;	for(i = 0; i <= dn_rt_hash_mask; i++) {		spin_lock_init(&dn_rt_hash_table[i].lock);		dn_rt_hash_table[i].chain = NULL;	}	dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);	proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);#ifdef CONFIG_DECNET_ROUTER	rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump);#else	rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,		      dn_cache_dump);#endif}void __exit dn_route_cleanup(void){	del_timer(&dn_route_timer);	dn_run_flush(0);	proc_net_remove(&init_net, "decnet_cache");}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -