📄 neighbour.c
字号:
NEIGH_CACHE_STAT_INC( tbl, hits ); PR_DEEP_DEBUG( "find a neighbour in hash_buckets: %p\n", n ); break; } } read_unlock_bh( &tbl->lock ); return n;}struct neighbour *myneigh_create( struct neigh_table *tbl, const void *pkey, struct net_device *dev ){ u32 hash_val; int key_len = tbl->key_len; int error; struct neighbour *n1, *rc, *n = myneigh_alloc( tbl ); if( !n ){ rc = ERR_PTR(-ENOBUFS); goto out; } memcpy(n->primary_key, pkey, key_len); n->dev = dev; dev_hold(dev); if( tbl->constructor && (error = tbl->constructor(n)) < 0 ){ PR_ERR( "tbl->constructor failed!\n" ); rc = ERR_PTR(error); goto out_neigh_release; } if( n->parms->neigh_setup && (error = n->parms->neigh_setup(n)) < 0 ){ PR_ERR( "n->parms->neigh_setup failed!\n" ); rc = ERR_PTR(error); goto out_neigh_release; } n->confirmed = jiffies - (n->parms->base_reachable_time << 1); write_lock_bh(&tbl->lock); if( atomic_read(&tbl->entries) > (tbl->hash_mask + 1) ) myneigh_hash_grow( tbl, (tbl->hash_mask + 1) << 1 ); hash_val = tbl->hash(pkey, dev) & tbl->hash_mask; if( n->parms->dead ){ rc = ERR_PTR(-EINVAL); goto out_tbl_unlock; } for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) { if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { neigh_hold(n1); rc = n1; goto out_tbl_unlock; } } n->next = tbl->hash_buckets[hash_val]; tbl->hash_buckets[hash_val] = n; n->dead = 0; neigh_hold(n); PR_DEBUG( "neigh: %d\n", atomic_read( &n->refcnt ) ); write_unlock_bh(&tbl->lock); PR_DEEP_DEBUG( "the hash val: %d, the hash: %p\n", hash_val, tbl->hash_buckets[hash_val] ); PR_DEBUG( "neigh %p is created.\n", n ); rc = n;out: return rc;out_tbl_unlock: write_unlock_bh(&tbl->lock);out_neigh_release: myneigh_release(n); goto out;}static inline void myneigh_add_timer(struct neighbour *n, unsigned long when){ if( unlikely(mod_timer(&n->timer, when)) ){ PR_ERR( "NEIGH: BUG, double timer add, state is %x\n", n->nud_state); dump_stack(); }}int __myneigh_event_send(struct neighbour *neigh, struct sk_buff *skb){ int rc; unsigned long now; write_lock_bh(&neigh->lock); rc = 0; if( neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE) ) goto out_unlock_bh; now = jiffies; if( !(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE)) ){ if( neigh->parms->mcast_probes + neigh->parms->app_probes ){ atomic_set( &neigh->probes, neigh->parms->ucast_probes ); neigh->nud_state = NUD_INCOMPLETE; neigh_hold(neigh); PR_DEBUG( "neigh: %d\n", atomic_read(&neigh->refcnt) ); myneigh_add_timer(neigh, now + 1); }else{ neigh->nud_state = NUD_FAILED; write_unlock_bh(&neigh->lock); if (skb) kfree_skb(skb); return 1; } }else if( neigh->nud_state & NUD_STALE ) { PR_DEBUG( "neigh %p is delayed.\n", neigh ); neigh_hold(neigh); neigh->nud_state = NUD_DELAY; myneigh_add_timer( neigh, jiffies + neigh->parms->delay_probe_time ); } if( neigh->nud_state == NUD_INCOMPLETE ){ if( skb ){ if( skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len ){ struct sk_buff *buff; buff = neigh->arp_queue.next; __skb_unlink( buff, &neigh->arp_queue ); kfree_skb( buff ); } __skb_queue_tail( &neigh->arp_queue, skb ); } rc = 1; } PR_DEBUG( "mcast_probes: %d, app_probes: %d\n", neigh->parms->mcast_probes, neigh->parms->app_probes );out_unlock_bh: write_unlock_bh(&neigh->lock); return rc;}int myneigh_resolve_output(struct sk_buff *skb){ struct dst_entry *dst = skb->dst; struct neighbour *neigh; int rc = 0; PR_DEBUG( "resolve output!\n" ); if (!dst || !(neigh = dst->neighbour)){ PR_ERR( "dst is NULL, discard!\n"); goto discard; } __skb_pull(skb, skb->nh.raw - skb->data); if( !myneigh_event_send(neigh, skb) ){ }out: return rc;discard: PR_DEBUG( "neigh_resolve_output: dst=%p neigh=%p\n", dst, dst ? dst->neighbour : NULL);//out_kfree_skb: rc = -EINVAL; kfree_skb(skb); goto out;}int myneigh_connected_output( struct sk_buff *skb ){ return 0;}int myneigh_compat_output( struct sk_buff *skb ){ return 0;}void myneigh_parms_destroy(struct neigh_parms *parms){ kfree(parms);}static int myneigh_del_timer(struct neighbour *n){ if( (n->nud_state & NUD_IN_TIMER) && del_timer(&n->timer) ){ myneigh_release(n); return 1; } return 0;}void myneigh_destroy(struct neighbour *neigh){ struct hh_cache *hh; NEIGH_CACHE_STAT_INC( neigh->tbl, destroys ); if( !neigh->dead ){ PR_WARN( "Destroying alive neighbour %p\n", neigh ); dump_stack(); return; } if( myneigh_del_timer(neigh) ) PR_WARN( "Impossible event.\n" ); while( (hh = neigh->hh) != NULL ){ neigh->hh = hh->hh_next; hh->hh_next = NULL; write_lock_bh(&hh->hh_lock); hh->hh_output = myneigh_blackhole; write_unlock_bh(&hh->hh_lock); if( atomic_dec_and_test(&hh->hh_refcnt) ) kfree( hh ); } if( neigh->ops && neigh->ops->destructor ) (neigh->ops->destructor)(neigh); skb_queue_purge( &neigh->arp_queue ); dev_put(neigh->dev); myneigh_parms_put(neigh->parms); PR_DEBUG( "neigh %p is destroyed.\n", neigh); atomic_dec(&neigh->tbl->entries); kmem_cache_free( neigh->tbl->kmem_cachep, neigh );}static void myneigh_periodic_timer(unsigned long arg){ struct neigh_table *tbl = (struct neigh_table *)arg; struct neighbour *n, **np; unsigned long expire, now = jiffies; NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); write_lock(&tbl->lock); //PR_DEBUG( "%lu, %lu\n", now/HZ, tbl->last_rand/HZ + 300 ); if( time_after(now, tbl->last_rand + 300 * HZ) ){ struct neigh_parms *p; tbl->last_rand = now; for( p = &tbl->parms; p; p = p->next ) p->reachable_time = neigh_rand_reach_time( p->base_reachable_time ); } np = &tbl->hash_buckets[ tbl->hash_chain_gc ]; tbl->hash_chain_gc = ( (tbl->hash_chain_gc + 1) & tbl->hash_mask ); while ((n = *np) != NULL) { unsigned int state; write_lock(&n->lock); state = n->nud_state; if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { write_unlock(&n->lock); goto next_elt; } if (time_before(n->used, n->confirmed)) n->used = n->confirmed; PR_DEBUG( "refcnt: %d\n", atomic_read(&n->refcnt) ); if (atomic_read(&n->refcnt) == 1 && (state == NUD_FAILED || time_after(now, n->used + n->parms->gc_staletime))) { *np = n->next; n->dead = 1; write_unlock( &n->lock ); myneigh_release( n ); continue; } write_unlock(&n->lock);next_elt: np = &n->next; } expire = tbl->parms.base_reachable_time >> 1; expire /= (tbl->hash_mask + 1); if (!expire) expire = 1; mod_timer(&tbl->gc_timer, now + expire); //PR_DEBUG( "expire: %lus\n", expire/HZ ); write_unlock(&tbl->lock);}static void myneigh_proxy_process(unsigned long arg){}struct neigh_parms *myneigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl){ struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL); if( p ){ memcpy( p, &tbl->parms, sizeof(*p) ); p->tbl = tbl; atomic_set(&p->refcnt, 1); INIT_RCU_HEAD(&p->rcu_head); p->reachable_time = neigh_rand_reach_time( p->base_reachable_time ); if (dev) { if( dev->neigh_setup && dev->neigh_setup(dev, p) ){ kfree(p); return NULL; } dev_hold(dev); p->dev = dev; } p->sysctl_table = NULL; write_lock_bh( &tbl->lock ); p->next = tbl->parms.next; tbl->parms.next = p; write_unlock_bh( &tbl->lock ); } return p;}static void myneigh_rcu_free_parms( struct rcu_head *head ){ struct neigh_parms *parms = container_of(head, struct neigh_parms, rcu_head); myneigh_parms_put(parms);}void myneigh_parms_release( struct neigh_table *tbl, struct neigh_parms *parms ){ struct neigh_parms **p; if( !parms || parms == &tbl->parms ) return; write_lock_bh(&tbl->lock); for( p = &tbl->parms.next; *p; p = &(*p)->next) { if( *p == parms ){ *p = parms->next; parms->dead = 1; write_unlock_bh(&tbl->lock); if( parms->dev ) dev_put( parms->dev ); call_rcu( &parms->rcu_head, myneigh_rcu_free_parms ); return; } } write_unlock_bh(&tbl->lock); PR_ERR( "neigh_parms_release: not found\n" );}static void myneigh_flush_dev(struct neigh_table *tbl, struct net_device *dev){ int i; for( i = 0; i <= tbl->hash_mask; i++ ){ struct neighbour *n, **np = &tbl->hash_buckets[i]; while( (n = *np) != NULL ){ if( dev && n->dev != dev ){ np = &n->next; continue; } *np = n->next; write_lock(&n->lock); myneigh_del_timer(n); n->dead = 1; if (atomic_read(&n->refcnt) != 1) { skb_queue_purge( &n->arp_queue ); n->output = myneigh_blackhole; if( n->nud_state & NUD_VALID ) n->nud_state = NUD_NOARP; else n->nud_state = NUD_NONE; PR_WARN("neigh %p is stray.\n", n); } write_unlock(&n->lock); myneigh_release(n); } }}static int mypneigh_ifdown( struct neigh_table *tbl, struct net_device *dev ){ struct pneigh_entry *n, **np; u32 h; for (h = 0; h <= PNEIGH_HASHMASK; h++) { np = &tbl->phash_buckets[h]; while ((n = *np) != NULL) { if (!dev || n->dev == dev) { *np = n->next; if( tbl->pdestructor ) tbl->pdestructor(n); if( n->dev ) dev_put( n->dev ); kfree(n); continue; } np = &n->next; } } return -ENOENT;}static void mypneigh_queue_purge( struct sk_buff_head *list ){ struct sk_buff *skb; while( (skb = skb_dequeue(list)) != NULL ){ dev_put(skb->dev); kfree_skb(skb); }}int myneigh_ifdown( struct neigh_table *tbl, struct net_device *dev ){ write_lock_bh( &tbl->lock ); myneigh_flush_dev( tbl, dev ); mypneigh_ifdown( tbl, dev ); write_unlock_bh( &tbl->lock ); del_timer_sync( &tbl->proxy_timer ); mypneigh_queue_purge( &tbl->proxy_queue ); return 0;}void myneigh_table_init(struct neigh_table *tbl){ unsigned long now = jiffies; unsigned long phsize; atomic_set( &tbl->parms.refcnt, 1 ); INIT_RCU_HEAD(&tbl->parms.rcu_head); tbl->parms.reachable_time = neigh_rand_reach_time( tbl->parms.base_reachable_time ); if( !tbl->kmem_cachep ) tbl->kmem_cachep = kmem_cache_create( tbl->id, tbl->entry_size, 0, SLAB_HWCACHE_ALIGN, NULL, NULL ); if (!tbl->kmem_cachep) panic("cannot create neighbour cache"); tbl->stats = alloc_percpu(struct neigh_statistics); if (!tbl->stats) panic("cannot create neighbour cache statistics");#ifdef CONFIG_PROC_FS tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat); if (!tbl->pde) panic("cannot create neighbour proc dir entry"); tbl->pde->proc_fops = &myneigh_stat_seq_fops; tbl->pde->data = tbl;#endif tbl->hash_mask = 1; tbl->hash_buckets = myneigh_hash_alloc( tbl->hash_mask + 1 ); phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL); if (!tbl->hash_buckets || !tbl->phash_buckets) panic("cannot allocate neighbour cache hashes"); memset(tbl->phash_buckets, 0, phsize); get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); rwlock_init( &tbl->lock ); init_timer( &tbl->gc_timer ); tbl->gc_timer.data = (unsigned long)tbl; tbl->gc_timer.function = myneigh_periodic_timer; tbl->gc_timer.expires = now + 1; add_timer( &tbl->gc_timer ); init_timer( &tbl->proxy_timer ); tbl->proxy_timer.data = (unsigned long)tbl; tbl->proxy_timer.function = myneigh_proxy_process; skb_queue_head_init( &tbl->proxy_queue ); tbl->last_flush = now; tbl->last_rand = now + tbl->parms.reachable_time * 20; write_lock( &myneigh_tbl_lock ); tbl->next = myneigh_tables; myneigh_tables = tbl; write_unlock( &myneigh_tbl_lock );}void myneigh_table_destroy( struct neigh_table *tbl ){ int i; del_timer( &tbl->gc_timer ); if( tbl->phash_buckets ){ kfree( tbl->phash_buckets ); tbl->phash_buckets = NULL; } PR_DEBUG( "the hash_mask: %d\n", tbl->hash_mask ); for( i = 0; i < tbl->hash_mask + 1; i ++ ){ struct neighbour *p1, *p2; p1 = tbl->hash_buckets[i]; PR_DEBUG( "the p1: %p\n", p1 ); while( p1 != NULL ){ p2 = p1; p1 = p1->next; PR_DEEP_DEBUG( "free nighbour: %p\n", p2 ); dev_put( p2->dev ); atomic_set( &(p2->refcnt), 0 ); kmem_cache_free( tbl->kmem_cachep, p2 ); } } if( tbl->hash_buckets ){ myneigh_hash_free( tbl->hash_buckets, tbl->hash_mask + 1 ); tbl->hash_buckets = NULL; } if( tbl->pde ){ remove_proc_entry( tbl->id, proc_net_stat ); tbl->pde = NULL; } if( tbl->stats ){ free_percpu( tbl->stats ); tbl->stats = NULL; } if( tbl->kmem_cachep ){ kmem_cache_destroy( tbl->kmem_cachep ); tbl->kmem_cachep = NULL; } atomic_set( &tbl->parms.refcnt, 0 );}EXPORT_SYMBOL_GPL( myneigh_table_init );EXPORT_SYMBOL_GPL( myneigh_table_destroy );EXPORT_SYMBOL_GPL( myneigh_resolve_output );EXPORT_SYMBOL_GPL( myneigh_connected_output );EXPORT_SYMBOL_GPL( myneigh_compat_output );EXPORT_SYMBOL_GPL( myneigh_create );EXPORT_SYMBOL_GPL( myneigh_lookup );EXPORT_SYMBOL_GPL( myneigh_parms_alloc );EXPORT_SYMBOL_GPL( myneigh_parms_release );EXPORT_SYMBOL_GPL( myneigh_ifdown );EXPORT_SYMBOL_GPL( myarp_tbl );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -