📄 neighbour.c
字号:
printk(KERN_WARNING "Destroying alive neighbour %p\n", neigh); dump_stack(); return; } if (neigh_del_timer(neigh)) printk(KERN_WARNING "Impossible event.\n"); while ((hh = neigh->hh) != NULL) { neigh->hh = hh->hh_next; hh->hh_next = NULL; write_lock_bh(&hh->hh_lock); hh->hh_output = neigh_blackhole; write_unlock_bh(&hh->hh_lock); if (atomic_dec_and_test(&hh->hh_refcnt)) kfree(hh); } if (neigh->ops && neigh->ops->destructor) (neigh->ops->destructor)(neigh); skb_queue_purge(&neigh->arp_queue); dev_put(neigh->dev); neigh_parms_put(neigh->parms); NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh); neigh_glbl_allocs--; neigh->tbl->entries--; kmem_cache_free(neigh->tbl->kmem_cachep, neigh);}/* Neighbour state is suspicious; disable fast path. Called with write_locked neigh. */static void neigh_suspect(struct neighbour *neigh){ struct hh_cache *hh; NEIGH_PRINTK2("neigh %p is suspected.\n", neigh); neigh->output = neigh->ops->output; for (hh = neigh->hh; hh; hh = hh->hh_next) hh->hh_output = neigh->ops->output;}/* Neighbour state is OK; enable fast path. Called with write_locked neigh. */static void neigh_connect(struct neighbour *neigh){ struct hh_cache *hh; NEIGH_PRINTK2("neigh %p is connected.\n", neigh); neigh->output = neigh->ops->connected_output; for (hh = neigh->hh; hh; hh = hh->hh_next) hh->hh_output = neigh->ops->hh_output;}static void neigh_periodic_timer(unsigned long arg){ struct neigh_table *tbl = (struct neigh_table *)arg; struct neighbour *n, **np; unsigned long expire, now = jiffies; NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); write_lock(&tbl->lock); /* * periodically recompute ReachableTime from random function */ if (time_after(now, tbl->last_rand + 300 * HZ)) { struct neigh_parms *p; tbl->last_rand = now; for (p = &tbl->parms; p; p = p->next) p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); } np = &tbl->hash_buckets[tbl->hash_chain_gc]; tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask); while ((n = *np) != NULL) { unsigned int state; write_lock(&n->lock); state = n->nud_state; if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { write_unlock(&n->lock); goto next_elt; } if (time_before(n->used, n->confirmed)) n->used = n->confirmed; if (atomic_read(&n->refcnt) == 1 && (state == NUD_FAILED || time_after(now, n->used + n->parms->gc_staletime))) { *np = n->next; n->dead = 1; write_unlock(&n->lock); neigh_release(n); continue; } write_unlock(&n->lock);next_elt: np = &n->next; } /* Cycle through all hash buckets every base_reachable_time/2 ticks. * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 * base_reachable_time. */ expire = tbl->parms.base_reachable_time >> 1; expire /= (tbl->hash_mask + 1); if (!expire) expire = 1; mod_timer(&tbl->gc_timer, now + expire); write_unlock(&tbl->lock);}static __inline__ int neigh_max_probes(struct neighbour *n){ struct neigh_parms *p = n->parms; return (n->nud_state & NUD_PROBE ? p->ucast_probes : p->ucast_probes + p->app_probes + p->mcast_probes);}/* Called when a timer expires for a neighbour entry. */static void neigh_timer_handler(unsigned long arg){ unsigned long now, next; struct neighbour *neigh = (struct neighbour *)arg; unsigned state; int notify = 0; write_lock(&neigh->lock); state = neigh->nud_state; now = jiffies; next = now + HZ; if (!(state & NUD_IN_TIMER)) {#ifndef CONFIG_SMP printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");#endif goto out; } if (state & NUD_REACHABLE) { if (time_before_eq(now, neigh->confirmed + neigh->parms->reachable_time)) { NEIGH_PRINTK2("neigh %p is still alive.\n", neigh); next = neigh->confirmed + neigh->parms->reachable_time; } else if (time_before_eq(now, neigh->used + neigh->parms->delay_probe_time)) { NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); neigh->nud_state = NUD_DELAY; neigh_suspect(neigh); next = now + neigh->parms->delay_probe_time; } else { NEIGH_PRINTK2("neigh %p is suspected.\n", neigh); neigh->nud_state = NUD_STALE; neigh_suspect(neigh); } } else if (state & NUD_DELAY) { if (time_before_eq(now, neigh->confirmed + neigh->parms->delay_probe_time)) { NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh); neigh->nud_state = NUD_REACHABLE; neigh_connect(neigh); next = neigh->confirmed + neigh->parms->reachable_time; } else { NEIGH_PRINTK2("neigh %p is probed.\n", neigh); neigh->nud_state = NUD_PROBE; atomic_set(&neigh->probes, 0); next = now + neigh->parms->retrans_time; } } else { /* NUD_PROBE|NUD_INCOMPLETE */ next = now + neigh->parms->retrans_time; } if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { struct sk_buff *skb; neigh->nud_state = NUD_FAILED; notify = 1; NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); NEIGH_PRINTK2("neigh %p is failed.\n", neigh); /* It is very thin place. report_unreachable is very complicated routine. Particularly, it can hit the same neighbour entry! So that, we try to be accurate and avoid dead loop. --ANK */ while (neigh->nud_state == NUD_FAILED && (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { write_unlock(&neigh->lock); neigh->ops->error_report(neigh, skb); write_lock(&neigh->lock); } skb_queue_purge(&neigh->arp_queue); } if (neigh->nud_state & NUD_IN_TIMER) { neigh_hold(neigh); if (time_before(next, jiffies + HZ/2)) next = jiffies + HZ/2; neigh->timer.expires = next; add_timer(&neigh->timer); } if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { struct sk_buff *skb = skb_peek(&neigh->arp_queue); /* keep skb alive even if arp_queue overflows */ if (skb) skb_get(skb); write_unlock(&neigh->lock); neigh->ops->solicit(neigh, skb); atomic_inc(&neigh->probes); if (skb) kfree_skb(skb); } else {out: write_unlock(&neigh->lock); }#ifdef CONFIG_ARPD if (notify && neigh->parms->app_probes) neigh_app_notify(neigh);#endif neigh_release(neigh);}int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb){ int rc; unsigned long now; write_lock_bh(&neigh->lock); rc = 0; if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) goto out_unlock_bh; now = jiffies; if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { if (neigh->parms->mcast_probes + neigh->parms->app_probes) { atomic_set(&neigh->probes, neigh->parms->ucast_probes); neigh->nud_state = NUD_INCOMPLETE; neigh_hold(neigh); neigh->timer.expires = now + 1; add_timer(&neigh->timer); } else { neigh->nud_state = NUD_FAILED; write_unlock_bh(&neigh->lock); if (skb) kfree_skb(skb); return 1; } } else if (neigh->nud_state & NUD_STALE) { NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); neigh_hold(neigh); neigh->nud_state = NUD_DELAY; neigh->timer.expires = jiffies + neigh->parms->delay_probe_time; add_timer(&neigh->timer); } if (neigh->nud_state == NUD_INCOMPLETE) { if (skb) { if (skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len) { struct sk_buff *buff; buff = neigh->arp_queue.next; __skb_unlink(buff, &neigh->arp_queue); kfree_skb(buff); } __skb_queue_tail(&neigh->arp_queue, skb); } rc = 1; }out_unlock_bh: write_unlock_bh(&neigh->lock); return rc;}static __inline__ void neigh_update_hhs(struct neighbour *neigh){ struct hh_cache *hh; void (*update)(struct hh_cache*, struct net_device*, unsigned char *) = neigh->dev->header_cache_update; if (update) { for (hh = neigh->hh; hh; hh = hh->hh_next) { write_lock_bh(&hh->hh_lock); update(hh, neigh->dev, neigh->ha); write_unlock_bh(&hh->hh_lock); } }}/* Generic update routine. -- lladdr is new lladdr or NULL, if it is not supplied. -- new is new state. -- flags NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, if it is different. NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" lladdr instead of overriding it if it is different. It also allows to retain current state if lladdr is unchanged. NEIGH_UPDATE_F_ADMIN means that the change is administrative. NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing NTF_ROUTER flag. NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as a router. Caller MUST hold reference count on the entry. */int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags){ u8 old; int err;#ifdef CONFIG_ARPD int notify = 0;#endif struct net_device *dev; int update_isrouter = 0; write_lock_bh(&neigh->lock); dev = neigh->dev; old = neigh->nud_state; err = -EPERM; if (!(flags & NEIGH_UPDATE_F_ADMIN) && (old & (NUD_NOARP | NUD_PERMANENT))) goto out; if (!(new & NUD_VALID)) { neigh_del_timer(neigh); if (old & NUD_CONNECTED) neigh_suspect(neigh); neigh->nud_state = new; err = 0;#ifdef CONFIG_ARPD notify = old & NUD_VALID;#endif goto out; } /* Compare new lladdr with cached one */ if (!dev->addr_len) { /* First case: device needs no address. */ lladdr = neigh->ha; } else if (lladdr) { /* The second case: if something is already cached and a new address is proposed: - compare new & old - if they are different, check override flag */ if ((old & NUD_VALID) && !memcmp(lladdr, neigh->ha, dev->addr_len)) lladdr = neigh->ha; } else { /* No address is supplied; if we know something, use it, otherwise discard the request. */ err = -EINVAL; if (!(old & NUD_VALID)) goto out; lladdr = neigh->ha; } if (new & NUD_CONNECTED) neigh->confirmed = jiffies; neigh->updated = jiffies; /* If entry was valid and address is not changed, do not change entry state, if new one is STALE. */ err = 0; update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER; if (old & NUD_VALID) { if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) { update_isrouter = 0; if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) && (old & NUD_CONNECTED)) { lladdr = neigh->ha; new = NUD_STALE; } else goto out; } else { if (lladdr == neigh->ha && new == NUD_STALE && ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) || (old & NUD_CONNECTED)) ) new = old; } } if (new != old) { neigh_del_timer(neigh); if (new & NUD_IN_TIMER) { neigh_hold(neigh); neigh->timer.expires = jiffies + ((new & NUD_REACHABLE) ? neigh->parms->reachable_time : 0); add_timer(&neigh->timer); } neigh->nud_state = new; } if (lladdr != neigh->ha) { memcpy(&neigh->ha, lladdr, dev->addr_len); neigh_update_hhs(neigh); if (!(new & NUD_CONNECTED)) neigh->confirmed = jiffies - (neigh->parms->base_reachable_time << 1);#ifdef CONFIG_ARPD notify = 1;#endif } if (new == old) goto out; if (new & NUD_CONNECTED) neigh_connect(neigh); else neigh_suspect(neigh); if (!(old & NUD_VALID)) { struct sk_buff *skb; /* Again: avoid dead loop if something went wrong */ while (neigh->nud_state & NUD_VALID && (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { struct neighbour *n1 = neigh; write_unlock_bh(&neigh->lock); /* On shaper/eql skb->dst->neighbour != neigh :( */ if (skb->dst && skb->dst->neighbour) n1 = skb->dst->neighbour; n1->output(skb); write_lock_bh(&neigh->lock); } skb_queue_purge(&neigh->arp_queue); }out: if (update_isrouter) { neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ? (neigh->flags | NTF_ROUTER) : (neigh->flags & ~NTF_ROUTER); } write_unlock_bh(&neigh->lock);#ifdef CONFIG_ARPD if (notify && neigh->parms->app_probes) neigh_app_notify(neigh);#endif return err;}struct neighbour *neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, struct net_device *dev){ struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len); if (neigh) neigh_update(neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_OVERRIDE); return neigh;}static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, u16 protocol){ struct hh_cache *hh; struct net_device *dev = dst->dev; for (hh = n->hh; hh; hh = hh->hh_next) if (hh->hh_type == protocol) break; if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { memset(hh, 0, sizeof(struct hh_cache)); hh->hh_lock = RW_LOCK_UNLOCKED; hh->hh_type = protocol; atomic_set(&hh->hh_refcnt, 0); hh->hh_next = NULL; if (dev->hard_header_cache(n, hh)) { kfree(hh); hh = NULL; } else { atomic_inc(&hh->hh_refcnt); hh->hh_next = n->hh; n->hh = hh; if (n->nud_state & NUD_CONNECTED) hh->hh_output = n->ops->hh_output; else hh->hh_output = n->ops->output; } } if (hh) { atomic_inc(&hh->hh_refcnt); dst->hh = hh; }}/* This function can be used in contexts, where only old dev_queue_xmit worked, f.e. if you want to override normal output path (eql, shaper), but resolution is not made yet. */int neigh_compat_output(struct sk_buff *skb){ struct net_device *dev = skb->dev; __skb_pull(skb, skb->nh.raw - skb->data); if (dev->hard_header && dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len) < 0 && dev->rebuild_header(skb)) return 0; return dev_queue_xmit(skb);}/* Slow and careful. */int neigh_resolve_output(struct sk_buff *skb){ struct dst_entry *dst = skb->dst; struct neighbour *neigh; int rc = 0; if (!dst || !(neigh = dst->neighbour)) goto discard; __skb_pull(skb, skb->nh.raw - skb->data); if (!neigh_event_send(neigh, skb)) { int err; struct net_device *dev = neigh->dev; if (dev->hard_header_cache && !dst->hh) { write_lock_bh(&neigh->lock); if (!dst->hh) neigh_hh_init(neigh, dst, dst->ops->protocol); err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); write_unlock_bh(&neigh->lock); } else { read_lock_bh(&neigh->lock); err = dev->hard_header(skb, dev, ntohs(skb->protocol),
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -