📄 viblnd.c
字号:
for (i = 0; i < IBNAL_TX_MSGS(); i++) { kib_tx_t *tx = &kibnal_data.kib_tx_descs[i];#if IBNAL_USE_FMR if (tx->tx_pages != NULL) LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV * sizeof(*tx->tx_pages));#else if (tx->tx_wrq != NULL) LIBCFS_FREE(tx->tx_wrq, (1 + IBNAL_MAX_RDMA_FRAGS) * sizeof(*tx->tx_wrq)); if (tx->tx_gl != NULL) LIBCFS_FREE(tx->tx_gl, (1 + IBNAL_MAX_RDMA_FRAGS) * sizeof(*tx->tx_gl)); if (tx->tx_rd != NULL) LIBCFS_FREE(tx->tx_rd, offsetof(kib_rdma_desc_t, rd_frags[IBNAL_MAX_RDMA_FRAGS]));#endif } LIBCFS_FREE(kibnal_data.kib_tx_descs, IBNAL_TX_MSGS() * sizeof(kib_tx_t));}#if IBNAL_USE_FMRvoidkibnal_free_fmrs (int n) { int i; vv_return_t vvrc; kib_tx_t *tx; for (i = 0; i < n; i++) { tx = &kibnal_data.kib_tx_descs[i]; vvrc = vv_free_fmr(kibnal_data.kib_hca, tx->tx_md.md_fmrhandle); if (vvrc != vv_return_ok) CWARN("vv_free_fmr[%d]: %d\n", i, vvrc); }}#endifintkibnal_setup_tx_descs (void){ int ipage = 0; int page_offset = 0; struct page *page; kib_tx_t *tx; vv_mem_reg_h_t mem_h; vv_r_key_t rkey; vv_return_t vvrc; int i; int rc;#if IBNAL_USE_FMR vv_fmr_t fmr_props;#endif /* pre-mapped messages are not bigger than 1 page */ CLASSERT (IBNAL_MSG_SIZE <= PAGE_SIZE); /* No fancy arithmetic when we do the buffer calculations */ CLASSERT (PAGE_SIZE % IBNAL_MSG_SIZE == 0); rc = kibnal_alloc_pages(&kibnal_data.kib_tx_pages, IBNAL_TX_MSG_PAGES(), 0); if (rc != 0) return (rc); for (i = 0; i < IBNAL_TX_MSGS(); i++) { page = kibnal_data.kib_tx_pages->ibp_pages[ipage]; tx = &kibnal_data.kib_tx_descs[i];#if IBNAL_USE_FMR memset(&fmr_props, 0, sizeof(fmr_props)); fmr_props.pd_hndl = kibnal_data.kib_pd; fmr_props.acl = (vv_acc_r_mem_write | vv_acc_l_mem_write); fmr_props.max_pages = LNET_MAX_IOV; fmr_props.log2_page_sz = PAGE_SHIFT; fmr_props.max_outstanding_maps = *kibnal_tunables.kib_fmr_remaps; vvrc = vv_alloc_fmr(kibnal_data.kib_hca, &fmr_props, &tx->tx_md.md_fmrhandle); if (vvrc != vv_return_ok) { CERROR("Can't allocate fmr %d: %d\n", i, vvrc); kibnal_free_fmrs(i); kibnal_free_pages (kibnal_data.kib_tx_pages); return -ENOMEM; } tx->tx_md.md_fmrcount = *kibnal_tunables.kib_fmr_remaps; tx->tx_md.md_active = 0;#endif tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset); vvrc = vv_get_gen_mr_attrib(kibnal_data.kib_hca, tx->tx_msg, IBNAL_MSG_SIZE, &mem_h, &tx->tx_lkey, &rkey); LASSERT (vvrc == vv_return_ok); CDEBUG(D_NET, "Tx[%d] %p->%p[%x]\n", i, tx, tx->tx_msg, tx->tx_lkey); list_add (&tx->tx_list, &kibnal_data.kib_idle_txs); page_offset += IBNAL_MSG_SIZE; LASSERT (page_offset <= PAGE_SIZE); if (page_offset == PAGE_SIZE) { page_offset = 0; ipage++; LASSERT (ipage <= IBNAL_TX_MSG_PAGES()); } } return (0);}voidkibnal_shutdown (lnet_ni_t *ni){ int i; vv_return_t vvrc; LASSERT (ni == kibnal_data.kib_ni); LASSERT (ni->ni_data == &kibnal_data); CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", atomic_read (&libcfs_kmemory)); switch (kibnal_data.kib_init) { case IBNAL_INIT_ALL: /* stop accepting connections and prevent new peers */ kibnal_stop_listener(ni); /* nuke all existing peers */ kibnal_del_peer(LNET_NID_ANY); /* Wait for all peer state to clean up */ i = 2; while (atomic_read(&kibnal_data.kib_npeers) != 0) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */ "waiting for %d peers to disconnect\n", atomic_read(&kibnal_data.kib_npeers)); cfs_pause(cfs_time_seconds(1)); } /* fall through */ case IBNAL_INIT_CQ: vvrc = vv_cq_destroy(kibnal_data.kib_hca, kibnal_data.kib_cq); if (vvrc != vv_return_ok) CERROR ("Destroy CQ error: %d\n", vvrc); /* fall through */ case IBNAL_INIT_TXD: kibnal_free_pages (kibnal_data.kib_tx_pages);#if IBNAL_USE_FMR kibnal_free_fmrs(IBNAL_TX_MSGS());#endif /* fall through */ case IBNAL_INIT_PD:#if 0 /* Only deallocate a PD if we actually allocated one */ vvrc = vv_pd_deallocate(kibnal_data.kib_hca, kibnal_data.kib_pd); if (vvrc != vv_return_ok) CERROR ("Destroy PD error: %d\n", vvrc);#endif /* fall through */ case IBNAL_INIT_ASYNC: vvrc = vv_dell_async_event_cb (kibnal_data.kib_hca, kibnal_async_callback); if (vvrc != vv_return_ok) CERROR("vv_dell_async_event_cb error: %d\n", vvrc); /* fall through */ case IBNAL_INIT_HCA: vvrc = vv_hca_close(kibnal_data.kib_hca); if (vvrc != vv_return_ok) CERROR ("Close HCA error: %d\n", vvrc); /* fall through */ case IBNAL_INIT_DATA: LASSERT (atomic_read(&kibnal_data.kib_npeers) == 0); LASSERT (kibnal_data.kib_peers != NULL); for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) { LASSERT (list_empty (&kibnal_data.kib_peers[i])); } LASSERT (atomic_read (&kibnal_data.kib_nconns) == 0); LASSERT (list_empty (&kibnal_data.kib_connd_zombies)); LASSERT (list_empty (&kibnal_data.kib_connd_conns)); LASSERT (list_empty (&kibnal_data.kib_connd_pcreqs)); LASSERT (list_empty (&kibnal_data.kib_connd_peers)); /* flag threads to terminate; wake and wait for them to die */ kibnal_data.kib_shutdown = 1; wake_up_all (&kibnal_data.kib_sched_waitq); wake_up_all (&kibnal_data.kib_connd_waitq); i = 2; while (atomic_read (&kibnal_data.kib_nthreads) != 0) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ "Waiting for %d threads to terminate\n", atomic_read (&kibnal_data.kib_nthreads)); cfs_pause(cfs_time_seconds(1)); } /* fall through */ case IBNAL_INIT_NOTHING: break; } kibnal_free_tx_descs(); if (kibnal_data.kib_peers != NULL) LIBCFS_FREE (kibnal_data.kib_peers, sizeof (struct list_head) * kibnal_data.kib_peer_hash_size); CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n", atomic_read (&libcfs_kmemory)); kibnal_data.kib_init = IBNAL_INIT_NOTHING; PORTAL_MODULE_UNUSE;}intkibnal_startup (lnet_ni_t *ni){ char scratch[32]; char ipif_name[32]; char *hca_name; __u32 ip; __u32 netmask; int up; int nob; int devno; struct timeval tv; int rc; int i; vv_request_event_record_t req_er; vv_return_t vvrc; LASSERT (ni->ni_lnd == &the_kiblnd); /* Only 1 instance supported */ if (kibnal_data.kib_init != IBNAL_INIT_NOTHING) { CERROR ("Only 1 instance supported\n"); return -EPERM; } if (*kibnal_tunables.kib_credits > *kibnal_tunables.kib_ntx) { CERROR ("Can't set credits(%d) > ntx(%d)\n", *kibnal_tunables.kib_credits, *kibnal_tunables.kib_ntx); return -EINVAL; } ni->ni_maxtxcredits = *kibnal_tunables.kib_credits; ni->ni_peertxcredits = *kibnal_tunables.kib_peercredits; CLASSERT (LNET_MAX_INTERFACES > 1); if (ni->ni_interfaces[0] != NULL) { /* Use the HCA specified in 'networks=' */ if (ni->ni_interfaces[1] != NULL) { CERROR("Multiple interfaces not supported\n"); return -EPERM; } /* Parse <hca base name><number> */ hca_name = ni->ni_interfaces[0]; nob = strlen(*kibnal_tunables.kib_hca_basename); if (strncmp(hca_name, *kibnal_tunables.kib_hca_basename, nob) || sscanf(hca_name + nob, "%d%n", &devno, &nob) < 1) { CERROR("Unrecognised HCA %s\n", hca_name); return -EINVAL; } } else { /* Use <hca base name>0 */ devno = 0; hca_name = scratch; snprintf(hca_name, sizeof(scratch), "%s%d", *kibnal_tunables.kib_hca_basename, devno); if (strlen(hca_name) == sizeof(scratch) - 1) { CERROR("HCA name %s truncated\n", hca_name); return -EINVAL; } } /* Find IP address from <ipif base name><hca number> */ snprintf(ipif_name, sizeof(ipif_name), "%s%d", *kibnal_tunables.kib_ipif_basename, devno); if (strlen(ipif_name) == sizeof(ipif_name) - 1) { CERROR("IPoIB interface name %s truncated\n", ipif_name); return -EINVAL; } rc = libcfs_ipif_query(ipif_name, &up, &ip, &netmask); if (rc != 0) { CERROR("Can't query IPoIB interface %s: %d\n", ipif_name, rc); return -ENETDOWN; } if (!up) { CERROR("Can't query IPoIB interface %s: it's down\n", ipif_name); return -ENETDOWN; } ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ip); PORTAL_MODULE_USE; memset (&kibnal_data, 0, sizeof (kibnal_data)); /* zero pointers, flags etc */ kibnal_data.kib_ni = ni; ni->ni_data = &kibnal_data; do_gettimeofday(&tv); kibnal_data.kib_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; rwlock_init(&kibnal_data.kib_global_lock); kibnal_data.kib_peer_hash_size = IBNAL_PEER_HASH_SIZE; LIBCFS_ALLOC (kibnal_data.kib_peers, sizeof (struct list_head) * ki
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -