📄 socklnd.c
字号:
route->ksnr_peer = peer; ksocknal_peer_addref(peer); /* peer's routelist takes over my ref on 'route' */ list_add_tail(&route->ksnr_list, &peer->ksnp_routes); list_for_each(tmp, &peer->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); if (conn->ksnc_ipaddr != route->ksnr_ipaddr) continue; ksocknal_associate_route_conn_locked(route, conn); /* keep going (typed routes) */ }}voidksocknal_del_route_locked (ksock_route_t *route){ ksock_peer_t *peer = route->ksnr_peer; ksock_interface_t *iface; ksock_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; LASSERT (!route->ksnr_deleted); /* Close associated conns */ list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) { conn = list_entry(ctmp, ksock_conn_t, ksnc_list); if (conn->ksnc_route != route) continue; ksocknal_close_conn_locked (conn, 0); } if (route->ksnr_myipaddr != 0) { iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, route->ksnr_myipaddr); if (iface != NULL) iface->ksni_nroutes--; } route->ksnr_deleted = 1; list_del (&route->ksnr_list); ksocknal_route_decref(route); /* drop peer's ref */ if (list_empty (&peer->ksnp_routes) && list_empty (&peer->ksnp_conns)) { /* I've just removed the last route to a peer with no active * connections */ ksocknal_unlink_peer_locked (peer); }}intksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port){ struct list_head *tmp; ksock_peer_t *peer; ksock_peer_t *peer2; ksock_route_t *route; ksock_route_t *route2; int rc; if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY) return (-EINVAL); /* Have a brand new peer ready... */ rc = ksocknal_create_peer(&peer, ni, id); if (rc != 0) return rc; route = ksocknal_create_route (ipaddr, port); if (route == NULL) { ksocknal_peer_decref(peer); return (-ENOMEM); } write_lock_bh (&ksocknal_data.ksnd_global_lock); /* always called with a ref on ni, so shutdown can't have started */ LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0); peer2 = ksocknal_find_peer_locked (ni, id); if (peer2 != NULL) { ksocknal_peer_decref(peer); peer = peer2; } else { /* peer table takes my ref on peer */ list_add_tail (&peer->ksnp_list, ksocknal_nid2peerlist (id.nid)); } route2 = NULL; list_for_each (tmp, &peer->ksnp_routes) { route2 = list_entry(tmp, ksock_route_t, ksnr_list); if (route2->ksnr_ipaddr == ipaddr) break; route2 = NULL; } if (route2 == NULL) { ksocknal_add_route_locked(peer, route); route->ksnr_share_count++; } else { ksocknal_route_decref(route); route2->ksnr_share_count++; } write_unlock_bh (&ksocknal_data.ksnd_global_lock); return (0);}voidksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip){ ksock_conn_t *conn; ksock_route_t *route; struct list_head *tmp; struct list_head *nxt; int nshared; LASSERT (!peer->ksnp_closing); /* Extra ref prevents peer disappearing until I'm done with it */ ksocknal_peer_addref(peer); list_for_each_safe (tmp, nxt, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); /* no match */ if (!(ip == 0 || route->ksnr_ipaddr == ip)) continue; route->ksnr_share_count = 0; /* This deletes associated conns too */ ksocknal_del_route_locked (route); } nshared = 0; list_for_each_safe (tmp, nxt, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); nshared += route->ksnr_share_count; } if (nshared == 0) { /* remove everything else if there are no explicit entries * left */ list_for_each_safe (tmp, nxt, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); /* we should only be removing auto-entries */ LASSERT(route->ksnr_share_count == 0); ksocknal_del_route_locked (route); } list_for_each_safe (tmp, nxt, &peer->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); ksocknal_close_conn_locked(conn, 0); } } ksocknal_peer_decref(peer); /* NB peer unlinks itself when last conn/route is removed */}intksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip){ CFS_LIST_HEAD (zombies); struct list_head *ptmp; struct list_head *pnxt; ksock_peer_t *peer; int lo; int hi; int i; int rc = -ENOENT; write_lock_bh (&ksocknal_data.ksnd_global_lock); if (id.nid != LNET_NID_ANY) lo = hi = ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers; else { lo = 0; hi = ksocknal_data.ksnd_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { list_for_each_safe (ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { peer = list_entry (ptmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni != ni) continue; if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) && (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid))) continue; ksocknal_peer_addref(peer); /* a ref for me... */ ksocknal_del_peer_locked (peer, ip); if (peer->ksnp_closing && !list_empty(&peer->ksnp_tx_queue)) { LASSERT (list_empty(&peer->ksnp_conns)); LASSERT (list_empty(&peer->ksnp_routes)); list_splice_init(&peer->ksnp_tx_queue, &zombies); } ksocknal_peer_decref(peer); /* ...till here */ rc = 0; /* matched! */ } } write_unlock_bh (&ksocknal_data.ksnd_global_lock); ksocknal_txlist_done(ni, &zombies, 1); return (rc);}ksock_conn_t *ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index){ ksock_peer_t *peer; struct list_head *ptmp; ksock_conn_t *conn; struct list_head *ctmp; int i; read_lock (&ksocknal_data.ksnd_global_lock); for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) { peer = list_entry (ptmp, ksock_peer_t, ksnp_list); LASSERT (!peer->ksnp_closing); if (peer->ksnp_ni != ni) continue; list_for_each (ctmp, &peer->ksnp_conns) { if (index-- > 0) continue; conn = list_entry (ctmp, ksock_conn_t, ksnc_list); ksocknal_conn_addref(conn); read_unlock (&ksocknal_data.ksnd_global_lock); return (conn); } } } read_unlock (&ksocknal_data.ksnd_global_lock); return (NULL);}ksock_sched_t *ksocknal_choose_scheduler_locked (unsigned int irq){ ksock_sched_t *sched; ksock_irqinfo_t *info; int i; LASSERT (irq < NR_IRQS); info = &ksocknal_data.ksnd_irqinfo[irq]; if (irq != 0 && /* hardware NIC */ info->ksni_valid) { /* already set up */ return (&ksocknal_data.ksnd_schedulers[info->ksni_sched]); } /* software NIC (irq == 0) || not associated with a scheduler yet. * Choose the CPU with the fewest connections... */ sched = &ksocknal_data.ksnd_schedulers[0]; for (i = 1; i < ksocknal_data.ksnd_nschedulers; i++) if (sched->kss_nconns > ksocknal_data.ksnd_schedulers[i].kss_nconns) sched = &ksocknal_data.ksnd_schedulers[i]; if (irq != 0) { /* Hardware NIC */ info->ksni_valid = 1; info->ksni_sched = sched - ksocknal_data.ksnd_schedulers; /* no overflow... */ LASSERT (info->ksni_sched == sched - ksocknal_data.ksnd_schedulers); } return (sched);}intksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs){ ksock_net_t *net = ni->ni_data; int i; int nip; read_lock (&ksocknal_data.ksnd_global_lock); nip = net->ksnn_ninterfaces; LASSERT (nip < LNET_MAX_INTERFACES); /* Only offer interfaces for additional connections if I have * more than one. */ if (nip < 2) { read_unlock (&ksocknal_data.ksnd_global_lock); return 0; } for (i = 0; i < nip; i++) { ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr; LASSERT (ipaddrs[i] != 0); } read_unlock (&ksocknal_data.ksnd_global_lock); return (nip);}intksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips){ int best_netmatch = 0; int best_xor = 0; int best = -1; int this_xor; int this_netmatch; int i; for (i = 0; i < nips; i++) { if (ips[i] == 0) continue; this_xor = (ips[i] ^ iface->ksni_ipaddr); this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0; if (!(best < 0 || best_netmatch < this_netmatch || (best_netmatch == this_netmatch && best_xor > this_xor))) continue; best = i; best_netmatch = this_netmatch; best_xor = this_xor; } LASSERT (best >= 0); return (best);}intksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips){ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; ksock_net_t *net = peer->ksnp_ni->ni_data; ksock_interface_t *iface; ksock_interface_t *best_iface; int n_ips; int i; int j; int k; __u32 ip; __u32 xor; int this_netmatch;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -