📄 neighbour.c
字号:
static void neigh_sync(struct neighbour *n){ unsigned long now = jiffies; u8 state = n->nud_state; ASSERT_WL(n); if (state&(NUD_NOARP|NUD_PERMANENT)) return; if (state&NUD_REACHABLE) { if (now - n->confirmed > n->parms->reachable_time) { n->nud_state = NUD_STALE; neigh_suspect(n); } } else if (state&NUD_VALID) { if (now - n->confirmed < n->parms->reachable_time) { neigh_del_timer(n); n->nud_state = NUD_REACHABLE; neigh_connect(n); } }}static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg){ struct neigh_table *tbl = (struct neigh_table*)arg; unsigned long now = jiffies; int i; write_lock(&tbl->lock); /* * periodicly recompute ReachableTime from random function */ if (now - tbl->last_rand > 300*HZ) { struct neigh_parms *p; tbl->last_rand = now; for (p=&tbl->parms; p; p = p->next) p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); } for (i=0; i <= NEIGH_HASHMASK; i++) { struct neighbour *n, **np; np = &tbl->hash_buckets[i]; while ((n = *np) != NULL) { unsigned state; write_lock(&n->lock); state = n->nud_state; if (state&(NUD_PERMANENT|NUD_IN_TIMER)) { write_unlock(&n->lock); goto next_elt; } if ((long)(n->used - n->confirmed) < 0) n->used = n->confirmed; if (atomic_read(&n->refcnt) == 1 && (state == NUD_FAILED || now - n->used > n->parms->gc_staletime)) { *np = n->next; n->dead = 1; write_unlock(&n->lock); neigh_release(n); continue; } if (n->nud_state&NUD_REACHABLE && now - n->confirmed > n->parms->reachable_time) { n->nud_state = NUD_STALE; neigh_suspect(n); } write_unlock(&n->lock);next_elt: np = &n->next; } } mod_timer(&tbl->gc_timer, now + tbl->gc_interval); write_unlock(&tbl->lock);}#ifdef CONFIG_SMPstatic void neigh_periodic_timer(unsigned long arg){ struct neigh_table *tbl = (struct neigh_table*)arg; tasklet_schedule(&tbl->gc_task);}#endifstatic __inline__ int neigh_max_probes(struct neighbour *n){ struct neigh_parms *p = n->parms; return p->ucast_probes + p->app_probes + p->mcast_probes;}/* Called when a timer expires for a neighbour entry. */static void neigh_timer_handler(unsigned long arg) { unsigned long now = jiffies; struct neighbour *neigh = (struct neighbour*)arg; unsigned state; int notify = 0; write_lock(&neigh->lock); state = neigh->nud_state; if (!(state&NUD_IN_TIMER)) {#ifndef CONFIG_SMP printk("neigh: timer & !nud_in_timer\n");#endif goto out; } if ((state&NUD_VALID) && now - neigh->confirmed < neigh->parms->reachable_time) { neigh->nud_state = NUD_REACHABLE; NEIGH_PRINTK2("neigh %p is still alive.\n", neigh); neigh_connect(neigh); goto out; } if (state == NUD_DELAY) { NEIGH_PRINTK2("neigh %p is probed.\n", neigh); neigh->nud_state = NUD_PROBE; atomic_set(&neigh->probes, 0); } if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { struct sk_buff *skb; neigh->nud_state = NUD_FAILED; notify = 1; neigh->tbl->stats.res_failed++; NEIGH_PRINTK2("neigh %p is failed.\n", neigh); /* It is very thin place. report_unreachable is very complicated routine. Particularly, it can hit the same neighbour entry! So that, we try to be accurate and avoid dead loop. --ANK */ while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) { write_unlock(&neigh->lock); neigh->ops->error_report(neigh, skb); write_lock(&neigh->lock); } skb_queue_purge(&neigh->arp_queue); goto out; } neigh->timer.expires = now + neigh->parms->retrans_time; add_timer(&neigh->timer); write_unlock(&neigh->lock); neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue)); atomic_inc(&neigh->probes); return;out: write_unlock(&neigh->lock);#ifdef CONFIG_ARPD if (notify && neigh->parms->app_probes) neigh_app_notify(neigh);#endif neigh_release(neigh);}int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb){ write_lock_bh(&neigh->lock); if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) { if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) { if (neigh->parms->mcast_probes + neigh->parms->app_probes) { atomic_set(&neigh->probes, neigh->parms->ucast_probes); neigh->nud_state = NUD_INCOMPLETE; neigh_hold(neigh); neigh->timer.expires = jiffies + neigh->parms->retrans_time; add_timer(&neigh->timer); write_unlock_bh(&neigh->lock); neigh->ops->solicit(neigh, skb); atomic_inc(&neigh->probes); write_lock_bh(&neigh->lock); } else { neigh->nud_state = NUD_FAILED; write_unlock_bh(&neigh->lock); if (skb) kfree_skb(skb); return 1; } } if (neigh->nud_state == NUD_INCOMPLETE) { if (skb) { if (skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len) { struct sk_buff *buff; buff = neigh->arp_queue.prev; __skb_unlink(buff, &neigh->arp_queue); kfree_skb(buff); } __skb_queue_head(&neigh->arp_queue, skb); } write_unlock_bh(&neigh->lock); return 1; } if (neigh->nud_state == NUD_STALE) { NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); neigh_hold(neigh); neigh->nud_state = NUD_DELAY; neigh->timer.expires = jiffies + neigh->parms->delay_probe_time; add_timer(&neigh->timer); } } write_unlock_bh(&neigh->lock); return 0;}static __inline__ void neigh_update_hhs(struct neighbour *neigh){ struct hh_cache *hh; void (*update)(struct hh_cache*, struct net_device*, unsigned char*) = neigh->dev->header_cache_update; if (update) { for (hh=neigh->hh; hh; hh=hh->hh_next) { write_lock_bh(&hh->hh_lock); update(hh, neigh->dev, neigh->ha); write_unlock_bh(&hh->hh_lock); } }}/* Generic update routine. -- lladdr is new lladdr or NULL, if it is not supplied. -- new is new state. -- override==1 allows to override existing lladdr, if it is different. -- arp==0 means that the change is administrative. Caller MUST hold reference count on the entry. */int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, int override, int arp){ u8 old; int err; int notify = 0; struct net_device *dev = neigh->dev; write_lock_bh(&neigh->lock); old = neigh->nud_state; err = -EPERM; if (arp && (old&(NUD_NOARP|NUD_PERMANENT))) goto out; if (!(new&NUD_VALID)) { neigh_del_timer(neigh); if (old&NUD_CONNECTED) neigh_suspect(neigh); neigh->nud_state = new; err = 0; notify = old&NUD_VALID; goto out; } /* Compare new lladdr with cached one */ if (dev->addr_len == 0) { /* First case: device needs no address. */ lladdr = neigh->ha; } else if (lladdr) { /* The second case: if something is already cached and a new address is proposed: - compare new & old - if they are different, check override flag */ if (old&NUD_VALID) { if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0) lladdr = neigh->ha; else if (!override) goto out; } } else { /* No address is supplied; if we know something, use it, otherwise discard the request. */ err = -EINVAL; if (!(old&NUD_VALID)) goto out; lladdr = neigh->ha; } neigh_sync(neigh); old = neigh->nud_state; if (new&NUD_CONNECTED) neigh->confirmed = jiffies; neigh->updated = jiffies; /* If entry was valid and address is not changed, do not change entry state, if new one is STALE. */ err = 0; if (old&NUD_VALID) { if (lladdr == neigh->ha) if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED))) goto out; } neigh_del_timer(neigh); neigh->nud_state = new; if (lladdr != neigh->ha) { memcpy(&neigh->ha, lladdr, dev->addr_len); neigh_update_hhs(neigh); if (!(new&NUD_CONNECTED)) neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);#ifdef CONFIG_ARPD notify = 1;#endif } if (new == old) goto out; if (new&NUD_CONNECTED) neigh_connect(neigh); else neigh_suspect(neigh); if (!(old&NUD_VALID)) { struct sk_buff *skb; /* Again: avoid dead loop if something went wrong */ while (neigh->nud_state&NUD_VALID && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) { struct neighbour *n1 = neigh; write_unlock_bh(&neigh->lock); /* On shaper/eql skb->dst->neighbour != neigh :( */ if (skb->dst && skb->dst->neighbour) n1 = skb->dst->neighbour; n1->output(skb); write_lock_bh(&neigh->lock); } skb_queue_purge(&neigh->arp_queue); }out: write_unlock_bh(&neigh->lock);#ifdef CONFIG_ARPD if (notify && neigh->parms->app_probes) neigh_app_notify(neigh);#endif return err;}struct neighbour * neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, struct net_device *dev){ struct neighbour *neigh; neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len); if (neigh) neigh_update(neigh, lladdr, NUD_STALE, 1, 1); return neigh;}static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, u16 protocol){ struct hh_cache *hh = NULL; struct net_device *dev = dst->dev; for (hh=n->hh; hh; hh = hh->hh_next) if (hh->hh_type == protocol) break; if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { memset(hh, 0, sizeof(struct hh_cache)); hh->hh_lock = RW_LOCK_UNLOCKED; hh->hh_type = protocol; atomic_set(&hh->hh_refcnt, 0); hh->hh_next = NULL; if (dev->hard_header_cache(n, hh)) { kfree(hh); hh = NULL; } else { atomic_inc(&hh->hh_refcnt); hh->hh_next = n->hh; n->hh = hh; if (n->nud_state&NUD_CONNECTED) hh->hh_output = n->ops->hh_output; else hh->hh_output = n->ops->output; } } if (hh) { atomic_inc(&hh->hh_refcnt); dst->hh = hh; }}/* This function can be used in contexts, where only old dev_queue_xmit worked, f.e. if you want to override normal output path (eql, shaper), but resoltution is not made yet. */int neigh_compat_output(struct sk_buff *skb){ struct net_device *dev = skb->dev; __skb_pull(skb, skb->nh.raw - skb->data); if (dev->hard_header && dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len) < 0 && dev->rebuild_header(skb)) return 0; return dev_queue_xmit(skb);}/* Slow and careful. */int neigh_resolve_output(struct sk_buff *skb){ struct dst_entry *dst = skb->dst; struct neighbour *neigh; if (!dst || !(neigh = dst->neighbour)) goto discard; __skb_pull(skb, skb->nh.raw - skb->data); if (neigh_event_send(neigh, skb) == 0) { int err; struct net_device *dev = neigh->dev; if (dev->hard_header_cache && dst->hh == NULL) { write_lock_bh(&neigh->lock); if (dst->hh == NULL) neigh_hh_init(neigh, dst, dst->ops->protocol); err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); write_unlock_bh(&neigh->lock); } else { read_lock_bh(&neigh->lock); err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); read_unlock_bh(&neigh->lock); } if (err >= 0) return neigh->ops->queue_xmit(skb); kfree_skb(skb); return -EINVAL; } return 0;discard: NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", dst, dst ? dst->neighbour : NULL); kfree_skb(skb); return -EINVAL;}/* As fast as possible without hh cache */int neigh_connected_output(struct sk_buff *skb){ int err; struct dst_entry *dst = skb->dst; struct neighbour *neigh = dst->neighbour; struct net_device *dev = neigh->dev; __skb_pull(skb, skb->nh.raw - skb->data); read_lock_bh(&neigh->lock); err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); read_unlock_bh(&neigh->lock); if (err >= 0) return neigh->ops->queue_xmit(skb); kfree_skb(skb); return -EINVAL;}static void neigh_proxy_process(unsigned long arg){ struct neigh_table *tbl = (struct neigh_table *)arg; long sched_next = 0; unsigned long now = jiffies; struct sk_buff *skb; spin_lock(&tbl->proxy_queue.lock); skb = tbl->proxy_queue.next; while (skb != (struct sk_buff*)&tbl->proxy_queue) { struct sk_buff *back = skb; long tdif = back->stamp.tv_usec - now; skb = skb->next; if (tdif <= 0) { struct net_device *dev = back->dev; __skb_unlink(back, &tbl->proxy_queue); if (tbl->proxy_redo && netif_running(dev)) tbl->proxy_redo(back); else kfree_skb(back); dev_put(dev); } else if (!sched_next || tdif < sched_next) sched_next = tdif; } del_timer(&tbl->proxy_timer); if (sched_next) mod_timer(&tbl->proxy_timer, jiffies + sched_next); spin_unlock(&tbl->proxy_queue.lock);}void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, struct sk_buff *skb){ unsigned long now = jiffies; long sched_next = net_random()%p->proxy_delay; if (tbl->proxy_queue.qlen > p->proxy_qlen) { kfree_skb(skb); return; } skb->stamp.tv_sec = 0; skb->stamp.tv_usec = now + sched_next;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -