📄 api-ni.c
字号:
rc = lnet_freelist_init(&the_lnet.ln_free_mes, MAX_MES, sizeof (lnet_me_t)); if (rc != 0) return (rc); rc = lnet_freelist_init(&the_lnet.ln_free_msgs, MAX_MSGS, sizeof (lnet_msg_t)); if (rc != 0) return (rc); rc = lnet_freelist_init(&the_lnet.ln_free_mds, MAX_MDS, sizeof (lnet_libmd_t)); if (rc != 0) return (rc); rc = lnet_freelist_init(&the_lnet.ln_free_eqs, MAX_EQS, sizeof (lnet_eq_t)); return (rc);}voidlnet_descriptor_cleanup (void){ lnet_freelist_fini (&the_lnet.ln_free_mes); lnet_freelist_fini (&the_lnet.ln_free_msgs); lnet_freelist_fini (&the_lnet.ln_free_mds); lnet_freelist_fini (&the_lnet.ln_free_eqs);}#endif__u64lnet_create_interface_cookie (void){ /* NB the interface cookie in wire handles guards against delayed * replies and ACKs appearing valid after reboot. Initialisation time, * even if it's only implemented to millisecond resolution is probably * easily good enough. */ struct timeval tv; __u64 cookie;#ifndef __KERNEL__ int rc = gettimeofday (&tv, NULL); LASSERT (rc == 0);#else do_gettimeofday(&tv);#endif cookie = tv.tv_sec; cookie *= 1000000; cookie += tv.tv_usec; return cookie;}intlnet_setup_handle_hash (void) { int i; /* Arbitrary choice of hash table size */#ifdef __KERNEL__ the_lnet.ln_lh_hash_size = CFS_PAGE_SIZE / sizeof (struct list_head);#else the_lnet.ln_lh_hash_size = (MAX_MES + MAX_MDS + MAX_EQS)/4;#endif LIBCFS_ALLOC(the_lnet.ln_lh_hash_table, the_lnet.ln_lh_hash_size * sizeof (struct list_head)); if (the_lnet.ln_lh_hash_table == NULL) return (-ENOMEM); for (i = 0; i < the_lnet.ln_lh_hash_size; i++) CFS_INIT_LIST_HEAD (&the_lnet.ln_lh_hash_table[i]); the_lnet.ln_next_object_cookie = LNET_COOKIE_TYPES; return (0);}voidlnet_cleanup_handle_hash (void){ if (the_lnet.ln_lh_hash_table == NULL) return; LIBCFS_FREE(the_lnet.ln_lh_hash_table, the_lnet.ln_lh_hash_size * sizeof (struct list_head));}lnet_libhandle_t *lnet_lookup_cookie (__u64 cookie, int type) { /* ALWAYS called with LNET_LOCK held */ struct list_head *list; struct list_head *el; unsigned int hash; if ((cookie & (LNET_COOKIE_TYPES - 1)) != type) return (NULL); hash = ((unsigned int)cookie) % the_lnet.ln_lh_hash_size; list = &the_lnet.ln_lh_hash_table[hash]; list_for_each (el, list) { lnet_libhandle_t *lh = list_entry (el, lnet_libhandle_t, lh_hash_chain); if (lh->lh_cookie == cookie) return (lh); } return (NULL);}voidlnet_initialise_handle (lnet_libhandle_t *lh, int type) { /* ALWAYS called with LNET_LOCK held */ unsigned int hash; LASSERT (type >= 0 && type < LNET_COOKIE_TYPES); lh->lh_cookie = the_lnet.ln_next_object_cookie | type; the_lnet.ln_next_object_cookie += LNET_COOKIE_TYPES; hash = ((unsigned int)lh->lh_cookie) % the_lnet.ln_lh_hash_size; list_add (&lh->lh_hash_chain, &the_lnet.ln_lh_hash_table[hash]);}voidlnet_invalidate_handle (lnet_libhandle_t *lh){ /* ALWAYS called with LNET_LOCK held */ list_del (&lh->lh_hash_chain);}intlnet_init_finalizers(void){#ifdef __KERNEL__ int i; the_lnet.ln_nfinalizers = num_online_cpus(); LIBCFS_ALLOC(the_lnet.ln_finalizers, the_lnet.ln_nfinalizers * sizeof(*the_lnet.ln_finalizers)); if (the_lnet.ln_finalizers == NULL) { CERROR("Can't allocate ln_finalizers\n"); return -ENOMEM; } for (i = 0; i < the_lnet.ln_nfinalizers; i++) the_lnet.ln_finalizers[i] = NULL;#else the_lnet.ln_finalizing = 0;#endif CFS_INIT_LIST_HEAD(&the_lnet.ln_finalizeq); return 0;}voidlnet_fini_finalizers(void){#ifdef __KERNEL__ int i; for (i = 0; i < the_lnet.ln_nfinalizers; i++) LASSERT (the_lnet.ln_finalizers[i] == NULL); LIBCFS_FREE(the_lnet.ln_finalizers, the_lnet.ln_nfinalizers * sizeof(*the_lnet.ln_finalizers));#else LASSERT (!the_lnet.ln_finalizing);#endif LASSERT (list_empty(&the_lnet.ln_finalizeq));}#ifndef __KERNEL__/* Temporary workaround to allow uOSS and test programs force server * mode in userspace. See comments near ln_server_mode_flag in * lnet/lib-types.h */voidlnet_server_mode() { the_lnet.ln_server_mode_flag = 1;}#endif intlnet_prepare(lnet_pid_t requested_pid){ /* Prepare to bring up the network */ int rc = 0; int i; LASSERT (the_lnet.ln_refcount == 0); the_lnet.ln_routing = 0;#ifdef __KERNEL__ LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0); the_lnet.ln_pid = requested_pid;#else if (the_lnet.ln_server_mode_flag) {/* server case (uOSS) */ LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0); if (cfs_curproc_uid())/* Only root can run user-space server */ return -EPERM; the_lnet.ln_pid = requested_pid; } else {/* client case (liblustre) */ /* My PID must be unique on this node and flag I'm userspace */ the_lnet.ln_pid = getpid() | LNET_PID_USERFLAG; } #endif rc = lnet_descriptor_setup(); if (rc != 0) goto failed0; memset(&the_lnet.ln_counters, 0, sizeof(the_lnet.ln_counters)); CFS_INIT_LIST_HEAD (&the_lnet.ln_active_msgs); CFS_INIT_LIST_HEAD (&the_lnet.ln_active_mds); CFS_INIT_LIST_HEAD (&the_lnet.ln_active_eqs); CFS_INIT_LIST_HEAD (&the_lnet.ln_test_peers); CFS_INIT_LIST_HEAD (&the_lnet.ln_nis); CFS_INIT_LIST_HEAD (&the_lnet.ln_zombie_nis); CFS_INIT_LIST_HEAD (&the_lnet.ln_remote_nets); CFS_INIT_LIST_HEAD (&the_lnet.ln_routers); the_lnet.ln_interface_cookie = lnet_create_interface_cookie(); lnet_init_rtrpools(); rc = lnet_setup_handle_hash (); if (rc != 0) goto failed0; rc = lnet_create_peer_table(); if (rc != 0) goto failed1; rc = lnet_init_finalizers(); if (rc != 0) goto failed2; the_lnet.ln_nportals = MAX_PORTALS; LIBCFS_ALLOC(the_lnet.ln_portals, the_lnet.ln_nportals * sizeof(*the_lnet.ln_portals)); if (the_lnet.ln_portals == NULL) { rc = -ENOMEM; goto failed3; } for (i = 0; i < the_lnet.ln_nportals; i++) { CFS_INIT_LIST_HEAD(&(the_lnet.ln_portals[i].ptl_ml)); CFS_INIT_LIST_HEAD(&(the_lnet.ln_portals[i].ptl_msgq)); the_lnet.ln_portals[i].ptl_options = 0; } return 0; failed3: lnet_fini_finalizers(); failed2: lnet_destroy_peer_table(); failed1: lnet_cleanup_handle_hash(); failed0: lnet_descriptor_cleanup(); return rc;}intlnet_unprepare (void){ int idx; /* NB no LNET_LOCK since this is the last reference. All LND instances * have shut down already, so it is safe to unlink and free all * descriptors, even those that appear committed to a network op (eg MD * with non-zero pending count) */ lnet_fail_nid(LNET_NID_ANY, 0); LASSERT (list_empty(&the_lnet.ln_test_peers)); LASSERT (the_lnet.ln_refcount == 0); LASSERT (list_empty(&the_lnet.ln_nis)); LASSERT (list_empty(&the_lnet.ln_zombie_nis)); LASSERT (the_lnet.ln_nzombie_nis == 0); for (idx = 0; idx < the_lnet.ln_nportals; idx++) { LASSERT (list_empty(&the_lnet.ln_portals[idx].ptl_msgq)); while (!list_empty (&the_lnet.ln_portals[idx].ptl_ml)) { lnet_me_t *me = list_entry (the_lnet.ln_portals[idx].ptl_ml.next, lnet_me_t, me_list); CERROR ("Active me %p on exit\n", me); list_del (&me->me_list); lnet_me_free (me); } } while (!list_empty (&the_lnet.ln_active_mds)) { lnet_libmd_t *md = list_entry (the_lnet.ln_active_mds.next, lnet_libmd_t, md_list); CERROR ("Active md %p on exit\n", md); list_del (&md->md_list); lnet_md_free (md); } while (!list_empty (&the_lnet.ln_active_eqs)) { lnet_eq_t *eq = list_entry (the_lnet.ln_active_eqs.next, lnet_eq_t, eq_list); CERROR ("Active eq %p on exit\n", eq); list_del (&eq->eq_list); lnet_eq_free (eq); } while (!list_empty (&the_lnet.ln_active_msgs)) { lnet_msg_t *msg = list_entry (the_lnet.ln_active_msgs.next, lnet_msg_t, msg_activelist); CERROR ("Active msg %p on exit\n", msg); LASSERT (msg->msg_onactivelist); msg->msg_onactivelist = 0; list_del (&msg->msg_activelist); lnet_msg_free (msg); } LIBCFS_FREE(the_lnet.ln_portals, the_lnet.ln_nportals * sizeof(*the_lnet.ln_portals)); lnet_free_rtrpools(); lnet_fini_finalizers(); lnet_destroy_peer_table(); lnet_cleanup_handle_hash(); lnet_descriptor_cleanup(); return (0);}lnet_ni_t *lnet_net2ni_locked (__u32 net){ struct list_head *tmp; lnet_ni_t *ni; list_for_each (tmp, &the_lnet.ln_nis) { ni = list_entry(tmp, lnet_ni_t, ni_list); if (lnet_ptlcompat_matchnet(LNET_NIDNET(ni->ni_nid), net)) { lnet_ni_addref_locked(ni); return ni; } } return NULL;}intlnet_islocalnet (__u32 net){ lnet_ni_t *ni; LNET_LOCK(); ni = lnet_net2ni_locked(net); if (ni != NULL) lnet_ni_decref_locked(ni); LNET_UNLOCK(); return ni != NULL;}lnet_ni_t *lnet_nid2ni_locked (lnet_nid_t nid){ struct list_head *tmp; lnet_ni_t *ni; list_for_each (tmp, &the_lnet.ln_nis) { ni = list_entry(tmp, lnet_ni_t, ni_list); if (lnet_ptlcompat_matchnid(ni->ni_nid, nid)) { lnet_ni_addref_locked(ni); return ni; } } return NULL;}intlnet_islocalnid (lnet_nid_t nid){ lnet_ni_t *ni; LNET_LOCK(); ni = lnet_nid2ni_locked(nid); if (ni != NULL) lnet_ni_decref_locked(ni); LNET_UNLOCK(); return ni != NULL;}intlnet_count_acceptor_nis (lnet_ni_t **first_ni){ /* Return the # of NIs that need the acceptor. Return the first one in * *first_ni so the acceptor can pass it connections "blind" to retain * binary compatibility. */ int count = 0;#if defined(__KERNEL__) || defined(HAVE_LIBPTHREAD) struct list_head *tmp; lnet_ni_t *ni; LNET_LOCK(); list_for_each (tmp, &the_lnet.ln_nis) { ni = list_entry(tmp, lnet_ni_t, ni_list); if (ni->ni_lnd->lnd_accept != NULL) { /* This LND uses the acceptor */ if (count == 0 && first_ni != NULL) { lnet_ni_addref_locked(ni); *first_ni = ni; } count++; } } LNET_UNLOCK();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -