⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 neighbour.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			atomic_inc(&hh->hh_refcnt);			hh->hh_next = n->hh;			n->hh	    = hh;			if (n->nud_state & NUD_CONNECTED)				hh->hh_output = n->ops->hh_output;			else				hh->hh_output = n->ops->output;		}	}	if (hh)	{		atomic_inc(&hh->hh_refcnt);		dst->hh = hh;	}}/* This function can be used in contexts, where only old dev_queue_xmit   worked, f.e. if you want to override normal output path (eql, shaper),   but resolution is not made yet. */int neigh_compat_output(struct sk_buff *skb){	struct net_device *dev = skb->dev;	__skb_pull(skb, skb_network_offset(skb));	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,			    skb->len) < 0 &&	    dev->header_ops->rebuild(skb))		return 0;	return dev_queue_xmit(skb);}/* Slow and careful. */int neigh_resolve_output(struct sk_buff *skb){	struct dst_entry *dst = skb->dst;	struct neighbour *neigh;	int rc = 0;	if (!dst || !(neigh = dst->neighbour))		goto discard;	__skb_pull(skb, skb_network_offset(skb));	if (!neigh_event_send(neigh, skb)) {		int err;		struct net_device *dev = neigh->dev;		if (dev->header_ops->cache && !dst->hh) {			write_lock_bh(&neigh->lock);			if (!dst->hh)				neigh_hh_init(neigh, dst, dst->ops->protocol);			err = dev_hard_header(skb, dev, ntohs(skb->protocol),					      neigh->ha, NULL, skb->len);			write_unlock_bh(&neigh->lock);		} else {			read_lock_bh(&neigh->lock);			err = dev_hard_header(skb, dev, ntohs(skb->protocol),					      neigh->ha, NULL, skb->len);			read_unlock_bh(&neigh->lock);		}		if (err >= 0)			rc = neigh->ops->queue_xmit(skb);		else			goto out_kfree_skb;	}out:	return rc;discard:	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",		      dst, dst ? dst->neighbour : NULL);out_kfree_skb:	rc = -EINVAL;	kfree_skb(skb);	goto out;}/* As fast as possible without hh cache */int neigh_connected_output(struct sk_buff *skb){	int err;	struct dst_entry *dst = skb->dst;	struct neighbour *neigh = dst->neighbour;	struct net_device *dev = neigh->dev;	__skb_pull(skb, skb_network_offset(skb));	read_lock_bh(&neigh->lock);	err = dev_hard_header(skb, dev, ntohs(skb->protocol),			      neigh->ha, NULL, skb->len);	read_unlock_bh(&neigh->lock);	if (err >= 0)		err = neigh->ops->queue_xmit(skb);	else {		err = -EINVAL;		kfree_skb(skb);	}	return err;}static void neigh_proxy_process(unsigned long arg){	struct neigh_table *tbl = (struct neigh_table *)arg;	long sched_next = 0;	unsigned long now = jiffies;	struct sk_buff *skb;	spin_lock(&tbl->proxy_queue.lock);	skb = tbl->proxy_queue.next;	while (skb != (struct sk_buff *)&tbl->proxy_queue) {		struct sk_buff *back = skb;		long tdif = NEIGH_CB(back)->sched_next - now;		skb = skb->next;		if (tdif <= 0) {			struct net_device *dev = back->dev;			__skb_unlink(back, &tbl->proxy_queue);			if (tbl->proxy_redo && netif_running(dev))				tbl->proxy_redo(back);			else				kfree_skb(back);			dev_put(dev);		} else if (!sched_next || tdif < sched_next)			sched_next = tdif;	}	del_timer(&tbl->proxy_timer);	if (sched_next)		mod_timer(&tbl->proxy_timer, jiffies + sched_next);	spin_unlock(&tbl->proxy_queue.lock);}void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,		    struct sk_buff *skb){	unsigned long now = jiffies;	unsigned long sched_next = now + (net_random() % p->proxy_delay);	if (tbl->proxy_queue.qlen > p->proxy_qlen) {		kfree_skb(skb);		return;	}	NEIGH_CB(skb)->sched_next = sched_next;	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;	spin_lock(&tbl->proxy_queue.lock);	if (del_timer(&tbl->proxy_timer)) {		if (time_before(tbl->proxy_timer.expires, sched_next))			sched_next = tbl->proxy_timer.expires;	}	dst_release(skb->dst);	skb->dst = NULL;	dev_hold(skb->dev);	__skb_queue_tail(&tbl->proxy_queue, skb);	mod_timer(&tbl->proxy_timer, sched_next);	spin_unlock(&tbl->proxy_queue.lock);}struct neigh_parms *neigh_parms_alloc(struct net_device *dev,				      struct neigh_table *tbl){	struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);	if (p) {		p->tbl		  = tbl;		atomic_set(&p->refcnt, 1);		INIT_RCU_HEAD(&p->rcu_head);		p->reachable_time =				neigh_rand_reach_time(p->base_reachable_time);		if (dev) {			if (dev->neigh_setup && dev->neigh_setup(dev, p)) {				kfree(p);				return NULL;			}			dev_hold(dev);			p->dev = dev;		}		p->sysctl_table = NULL;		write_lock_bh(&tbl->lock);		p->next		= tbl->parms.next;		tbl->parms.next = p;		write_unlock_bh(&tbl->lock);	}	return p;}static void neigh_rcu_free_parms(struct rcu_head *head){	struct neigh_parms *parms =		container_of(head, struct neigh_parms, rcu_head);	neigh_parms_put(parms);}void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms){	struct neigh_parms **p;	if (!parms || parms == &tbl->parms)		return;	write_lock_bh(&tbl->lock);	for (p = &tbl->parms.next; *p; p = &(*p)->next) {		if (*p == parms) {			*p = parms->next;			parms->dead = 1;			write_unlock_bh(&tbl->lock);			if (parms->dev)				dev_put(parms->dev);			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);			return;		}	}	write_unlock_bh(&tbl->lock);	NEIGH_PRINTK1("neigh_parms_release: not found\n");}void neigh_parms_destroy(struct neigh_parms *parms){	kfree(parms);}static struct lock_class_key neigh_table_proxy_queue_class;void neigh_table_init_no_netlink(struct neigh_table *tbl){	unsigned long now = jiffies;	unsigned long phsize;	atomic_set(&tbl->parms.refcnt, 1);	INIT_RCU_HEAD(&tbl->parms.rcu_head);	tbl->parms.reachable_time =			  neigh_rand_reach_time(tbl->parms.base_reachable_time);	if (!tbl->kmem_cachep)		tbl->kmem_cachep =			kmem_cache_create(tbl->id, tbl->entry_size, 0,					  SLAB_HWCACHE_ALIGN|SLAB_PANIC,					  NULL);	tbl->stats = alloc_percpu(struct neigh_statistics);	if (!tbl->stats)		panic("cannot create neighbour cache statistics");#ifdef CONFIG_PROC_FS	tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);	if (!tbl->pde)		panic("cannot create neighbour proc dir entry");	tbl->pde->proc_fops = &neigh_stat_seq_fops;	tbl->pde->data = tbl;#endif	tbl->hash_mask = 1;	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);	if (!tbl->hash_buckets || !tbl->phash_buckets)		panic("cannot allocate neighbour cache hashes");	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));	rwlock_init(&tbl->lock);	init_timer(&tbl->gc_timer);	tbl->gc_timer.data     = (unsigned long)tbl;	tbl->gc_timer.function = neigh_periodic_timer;	tbl->gc_timer.expires  = now + 1;	add_timer(&tbl->gc_timer);	init_timer(&tbl->proxy_timer);	tbl->proxy_timer.data	  = (unsigned long)tbl;	tbl->proxy_timer.function = neigh_proxy_process;	skb_queue_head_init_class(&tbl->proxy_queue,			&neigh_table_proxy_queue_class);	tbl->last_flush = now;	tbl->last_rand	= now + tbl->parms.reachable_time * 20;}void neigh_table_init(struct neigh_table *tbl){	struct neigh_table *tmp;	neigh_table_init_no_netlink(tbl);	write_lock(&neigh_tbl_lock);	for (tmp = neigh_tables; tmp; tmp = tmp->next) {		if (tmp->family == tbl->family)			break;	}	tbl->next	= neigh_tables;	neigh_tables	= tbl;	write_unlock(&neigh_tbl_lock);	if (unlikely(tmp)) {		printk(KERN_ERR "NEIGH: Registering multiple tables for "		       "family %d\n", tbl->family);		dump_stack();	}}int neigh_table_clear(struct neigh_table *tbl){	struct neigh_table **tp;	/* It is not clean... Fix it to unload IPv6 module safely */	del_timer_sync(&tbl->gc_timer);	del_timer_sync(&tbl->proxy_timer);	pneigh_queue_purge(&tbl->proxy_queue);	neigh_ifdown(tbl, NULL);	if (atomic_read(&tbl->entries))		printk(KERN_CRIT "neighbour leakage\n");	write_lock(&neigh_tbl_lock);	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {		if (*tp == tbl) {			*tp = tbl->next;			break;		}	}	write_unlock(&neigh_tbl_lock);	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);	tbl->hash_buckets = NULL;	kfree(tbl->phash_buckets);	tbl->phash_buckets = NULL;	remove_proc_entry(tbl->id, init_net.proc_net_stat);	free_percpu(tbl->stats);	tbl->stats = NULL;	kmem_cache_destroy(tbl->kmem_cachep);	tbl->kmem_cachep = NULL;	return 0;}static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg){	struct net *net = skb->sk->sk_net;	struct ndmsg *ndm;	struct nlattr *dst_attr;	struct neigh_table *tbl;	struct net_device *dev = NULL;	int err = -EINVAL;	if (nlmsg_len(nlh) < sizeof(*ndm))		goto out;	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);	if (dst_attr == NULL)		goto out;	ndm = nlmsg_data(nlh);	if (ndm->ndm_ifindex) {		dev = dev_get_by_index(net, ndm->ndm_ifindex);		if (dev == NULL) {			err = -ENODEV;			goto out;		}	}	read_lock(&neigh_tbl_lock);	for (tbl = neigh_tables; tbl; tbl = tbl->next) {		struct neighbour *neigh;		if (tbl->family != ndm->ndm_family)			continue;		read_unlock(&neigh_tbl_lock);		if (nla_len(dst_attr) < tbl->key_len)			goto out_dev_put;		if (ndm->ndm_flags & NTF_PROXY) {			err = pneigh_delete(tbl, nla_data(dst_attr), dev);			goto out_dev_put;		}		if (dev == NULL)			goto out_dev_put;		neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);		if (neigh == NULL) {			err = -ENOENT;			goto out_dev_put;		}		err = neigh_update(neigh, NULL, NUD_FAILED,				   NEIGH_UPDATE_F_OVERRIDE |				   NEIGH_UPDATE_F_ADMIN);		neigh_release(neigh);		goto out_dev_put;	}	read_unlock(&neigh_tbl_lock);	err = -EAFNOSUPPORT;out_dev_put:	if (dev)		dev_put(dev);out:	return err;}static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg){	struct net *net = skb->sk->sk_net;	struct ndmsg *ndm;	struct nlattr *tb[NDA_MAX+1];	struct neigh_table *tbl;	struct net_device *dev = NULL;	int err;	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);	if (err < 0)		goto out;	err = -EINVAL;	if (tb[NDA_DST] == NULL)		goto out;	ndm = nlmsg_data(nlh);	if (ndm->ndm_ifindex) {		dev = dev_get_by_index(net, ndm->ndm_ifindex);		if (dev == NULL) {			err = -ENODEV;			goto out;		}		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)			goto out_dev_put;	}	read_lock(&neigh_tbl_lock);	for (tbl = neigh_tables; tbl; tbl = tbl->next) {		int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;		struct neighbour *neigh;		void *dst, *lladdr;		if (tbl->family != ndm->ndm_family)			continue;		read_unlock(&neigh_tbl_lock);		if (nla_len(tb[NDA_DST]) < tbl->key_len)			goto out_dev_put;		dst = nla_data(tb[NDA_DST]);		lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;		if (ndm->ndm_flags & NTF_PROXY) {			struct pneigh_entry *pn;			err = -ENOBUFS;			pn = pneigh_lookup(tbl, dst, dev, 1);			if (pn) {				pn->flags = ndm->ndm_flags;				err = 0;			}			goto out_dev_put;		}		if (dev == NULL)			goto out_dev_put;		neigh = neigh_lookup(tbl, dst, dev);		if (neigh == NULL) {			if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {				err = -ENOENT;				goto out_dev_put;			}			neigh = __neigh_lookup_errno(tbl, dst, dev);			if (IS_ERR(neigh)) {				err = PTR_ERR(neigh);				goto out_dev_put;			}		} else {			if (nlh->nlmsg_flags & NLM_F_EXCL) {				err = -EEXIST;				neigh_release(neigh);				goto out_dev_put;			}			if (!(nlh->nlmsg_flags & NLM_F_REPLACE))				flags &= ~NEIGH_UPDATE_F_OVERRIDE;		}		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);		neigh_release(neigh);		goto out_dev_put;	}	read_unlock(&neigh_tbl_lock);	err = -EAFNOSUPPORT;out_dev_put:	if (dev)		dev_put(dev);out:	return err;}static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms){	struct nlattr *nest;	nest = nla_nest_start(skb, NDTA_PARMS);	if (nest == NULL)		return -ENOBUFS;	if (parms->dev)		NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);	NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));	NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);	NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);	NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);	NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);	NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);	NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);	NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,		      parms->base_reachable_time);	NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);	NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);	NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);	NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);	NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);	NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);	return nla_nest_end(skb, nest);nla_put_failure:	return nla_nest_cancel(skb, nest);}static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,			      u32 pid, u32 seq, int type, int flags){	struct nlmsghdr *nlh;	struct ndtmsg *ndtmsg;	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);	if (nlh == NULL)		return -EMSGSIZE;	ndtmsg = nlmsg_data(nlh);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -