📄 neighbour.c
字号:
del_timer(&n->timer); n->nud_state = NUD_REACHABLE; neigh_connect(n); } }}static void neigh_periodic_timer(unsigned long arg){ struct neigh_table *tbl = (struct neigh_table*)arg; unsigned long now = jiffies; int i; if (atomic_read(&tbl->lock)) { tbl->gc_timer.expires = now + 1*HZ; add_timer(&tbl->gc_timer); return; } /* * periodicly recompute ReachableTime from random function */ if (now - tbl->last_rand > 300*HZ) { struct neigh_parms *p; tbl->last_rand = now; for (p=&tbl->parms; p; p = p->next) p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); } for (i=0; i <= NEIGH_HASHMASK; i++) { struct neighbour *n, **np; np = &tbl->hash_buckets[i]; while ((n = *np) != NULL) { unsigned state = n->nud_state; if (state&(NUD_PERMANENT|NUD_IN_TIMER)) goto next_elt; if ((long)(n->used - n->confirmed) < 0) n->used = n->confirmed; if (atomic_read(&n->refcnt) == 0 && (state == NUD_FAILED || now - n->used > n->parms->gc_staletime)) { *np = n->next; n->tbl = NULL; n->next = NULL; tbl->entries--; neigh_destroy(n); continue; } if (n->nud_state&NUD_REACHABLE && now - n->confirmed > n->parms->reachable_time) { n->nud_state = NUD_STALE; neigh_suspect(n); }next_elt: np = &n->next; } } tbl->gc_timer.expires = now + tbl->gc_interval; add_timer(&tbl->gc_timer);}static __inline__ int neigh_max_probes(struct neighbour *n){ struct neigh_parms *p = n->parms; return p->ucast_probes + p->app_probes + p->mcast_probes;}/* Called when a timer expires for a neighbour entry. */static void neigh_timer_handler(unsigned long arg) { unsigned long now = jiffies; struct neighbour *neigh = (struct neighbour*)arg; unsigned state = neigh->nud_state; if (!(state&NUD_IN_TIMER)) { NEIGH_PRINTK1("neigh: timer & !nud_in_timer\n"); return; } if ((state&NUD_VALID) && now - neigh->confirmed < neigh->parms->reachable_time) { neigh->nud_state = NUD_REACHABLE; NEIGH_PRINTK2("neigh %p is still alive.\n", neigh); neigh_connect(neigh); return; } if (state == NUD_DELAY) { NEIGH_PRINTK2("neigh %p is probed.\n", neigh); neigh->nud_state = NUD_PROBE; neigh->probes = 0; } if (neigh->probes >= neigh_max_probes(neigh)) { struct sk_buff *skb; neigh->nud_state = NUD_FAILED; neigh->tbl->stats.res_failed++; NEIGH_PRINTK2("neigh %p is failed.\n", neigh); /* It is very thin place. report_unreachable is very complicated routine. Particularly, it can hit the same neighbour entry! So that, we try to be accurate and avoid dead loop. --ANK */ while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) neigh->ops->error_report(neigh, skb); skb_queue_purge(&neigh->arp_queue); return; } neigh->timer.expires = now + neigh->parms->retrans_time; add_timer(&neigh->timer); neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue)); neigh->probes++;}int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb){ start_bh_atomic(); if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) { if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) { if (neigh->tbl == NULL) { NEIGH_PRINTK2("neigh %p used after death.\n", neigh); if (skb) kfree_skb(skb); end_bh_atomic(); return 1; } if (neigh->parms->mcast_probes + neigh->parms->app_probes) { neigh->probes = neigh->parms->ucast_probes; neigh->nud_state = NUD_INCOMPLETE; neigh->timer.expires = jiffies + neigh->parms->retrans_time; add_timer(&neigh->timer); neigh->ops->solicit(neigh, skb); neigh->probes++; } else { neigh->nud_state = NUD_FAILED; if (skb) kfree_skb(skb); end_bh_atomic(); return 1; } } if (neigh->nud_state == NUD_INCOMPLETE) { if (skb) { if (skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len) { struct sk_buff *buff; buff = neigh->arp_queue.prev; __skb_unlink(buff, &neigh->arp_queue); kfree_skb(buff); } __skb_queue_head(&neigh->arp_queue, skb); } end_bh_atomic(); return 1; } if (neigh->nud_state == NUD_STALE) { NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); neigh->nud_state = NUD_DELAY; neigh->timer.expires = jiffies + neigh->parms->delay_probe_time; add_timer(&neigh->timer); } } end_bh_atomic(); return 0;}static __inline__ void neigh_update_hhs(struct neighbour *neigh){ struct hh_cache *hh; void (*update)(struct hh_cache*, struct device*, unsigned char*) = neigh->dev->header_cache_update; if (update) { for (hh=neigh->hh; hh; hh=hh->hh_next) { write_lock_irq(&hh->hh_lock); update(hh, neigh->dev, neigh->ha); write_unlock_irq(&hh->hh_lock); } }}/* Generic update routine. -- lladdr is new lladdr or NULL, if it is not supplied. -- new is new state. -- override==1 allows to override existing lladdr, if it is different. -- arp==0 means that the change is administrative. */int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int arp){ u8 old = neigh->nud_state; struct device *dev = neigh->dev; if (arp && (old&(NUD_NOARP|NUD_PERMANENT))) return -EPERM; if (!(new&NUD_VALID)) { if (old&NUD_IN_TIMER) del_timer(&neigh->timer); if (old&NUD_CONNECTED) neigh_suspect(neigh); neigh->nud_state = new; return 0; } /* Compare new lladdr with cached one */ if (dev->addr_len == 0) { /* First case: device needs no address. */ lladdr = neigh->ha; } else if (lladdr) { /* The second case: if something is already cached and a new address is proposed: - compare new & old - if they are different, check override flag */ if (old&NUD_VALID) { if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0) lladdr = neigh->ha; else if (!override) return -EPERM; } } else { /* No address is supplied; if we know something, use it, otherwise discard the request. */ if (!(old&NUD_VALID)) return -EINVAL; lladdr = neigh->ha; } neigh_sync(neigh); old = neigh->nud_state; if (new&NUD_CONNECTED) neigh->confirmed = jiffies; neigh->updated = jiffies; /* If entry was valid and address is not changed, do not change entry state, if new one is STALE. */ if (old&NUD_VALID) { if (lladdr == neigh->ha) if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED))) return 0; } if (old&NUD_IN_TIMER) del_timer(&neigh->timer); neigh->nud_state = new; if (lladdr != neigh->ha) { memcpy(&neigh->ha, lladdr, dev->addr_len); neigh_update_hhs(neigh); neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);#ifdef CONFIG_ARPD if (neigh->parms->app_probes) neigh_app_notify(neigh);#endif } if (new == old) return 0; if (new&NUD_CONNECTED) neigh_connect(neigh); else neigh_suspect(neigh); if (!(old&NUD_VALID)) { struct sk_buff *skb; /* Again: avoid dead loop if something went wrong */ while (neigh->nud_state&NUD_VALID && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) { struct neighbour *n1 = neigh; /* On shaper/eql skb->dst->neighbour != neigh :( */ if (skb->dst && skb->dst->neighbour) n1 = skb->dst->neighbour; n1->output(skb); } skb_queue_purge(&neigh->arp_queue); } return 0;}struct neighbour * neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, struct device *dev){ struct neighbour *neigh; neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len); if (neigh) neigh_update(neigh, lladdr, NUD_STALE, 1, 1); return neigh;}static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, u16 protocol){ struct hh_cache *hh = NULL; struct device *dev = dst->dev; for (hh=n->hh; hh; hh = hh->hh_next) if (hh->hh_type == protocol) break; if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { memset(hh, 0, sizeof(struct hh_cache)); hh->hh_type = protocol; atomic_set(&hh->hh_refcnt, 0); hh->hh_next = NULL; if (dev->hard_header_cache(n, hh)) { kfree(hh); hh = NULL; } else { atomic_inc(&hh->hh_refcnt); hh->hh_next = n->hh; n->hh = hh; if (n->nud_state&NUD_CONNECTED) hh->hh_output = n->ops->hh_output; else hh->hh_output = n->ops->output; } } if (hh) { atomic_inc(&hh->hh_refcnt); dst->hh = hh; }}/* This function can be used in contexts, where only old dev_queue_xmit worked, f.e. if you want to override normal output path (eql, shaper), but resoltution is not made yet. */int neigh_compat_output(struct sk_buff *skb){ struct device *dev = skb->dev; __skb_pull(skb, skb->nh.raw - skb->data); if (dev->hard_header && dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len) < 0 && dev->rebuild_header(skb)) return 0; return dev_queue_xmit(skb);}/* Slow and careful. */int neigh_resolve_output(struct sk_buff *skb){ struct dst_entry *dst = skb->dst; struct neighbour *neigh; if (!dst || !(neigh = dst->neighbour)) goto discard; __skb_pull(skb, skb->nh.raw - skb->data); if (neigh_event_send(neigh, skb) == 0) { int err; struct device *dev = neigh->dev; if (dev->hard_header_cache && dst->hh == NULL) { start_bh_atomic(); if (dst->hh == NULL) neigh_hh_init(neigh, dst, dst->ops->protocol); err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); end_bh_atomic(); } else { start_bh_atomic(); err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); end_bh_atomic(); } if (err >= 0) return neigh->ops->queue_xmit(skb); kfree_skb(skb); return -EINVAL; } return 0;discard: NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", dst, dst ? dst->neighbour : NULL); kfree_skb(skb); return -EINVAL;}/* As fast as possible without hh cache */int neigh_connected_output(struct sk_buff *skb){ int err; struct dst_entry *dst = skb->dst; struct neighbour *neigh = dst->neighbour; struct device *dev = neigh->dev; __skb_pull(skb, skb->nh.raw - skb->data); start_bh_atomic(); err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); end_bh_atomic(); if (err >= 0) return neigh->ops->queue_xmit(skb); kfree_skb(skb); return -EINVAL;}static void neigh_proxy_process(unsigned long arg){ struct neigh_table *tbl = (struct neigh_table *)arg; long sched_next = 0; unsigned long now = jiffies; struct sk_buff *skb = tbl->proxy_queue.next; while (skb != (struct sk_buff*)&tbl->proxy_queue) { struct sk_buff *back = skb; long tdif = back->stamp.tv_usec - now; skb = skb->next; if (tdif <= 0) { __skb_unlink(back, &tbl->proxy_queue); if (tbl->proxy_redo) tbl->proxy_redo(back); else kfree_skb(back); } else if (!sched_next || tdif < sched_next) sched_next = tdif; } del_timer(&tbl->proxy_timer); if (sched_next) { tbl->proxy_timer.expires = jiffies + sched_next; add_timer(&tbl->proxy_timer); }}void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, struct sk_buff *skb){ unsigned long now = jiffies; long sched_next = net_random()%p->proxy_delay; if (tbl->proxy_queue.qlen > p->proxy_qlen) { kfree_skb(skb); return; } skb->stamp.tv_sec = 0; skb->stamp.tv_usec = now + sched_next; if (del_timer(&tbl->proxy_timer)) { long tval = tbl->proxy_timer.expires - now; if (tval < sched_next) sched_next = tval; } tbl->proxy_timer.expires = now + sched_next; dst_release(skb->dst); skb->dst = NULL;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -