📄 openiblnd.c
字号:
intkibnal_close_peer_conns_locked (kib_peer_t *peer, int why){ kib_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) { conn = list_entry (ctmp, kib_conn_t, ibc_list); count++; kibnal_close_conn_locked (conn, why); } return (count);}intkibnal_close_stale_conns_locked (kib_peer_t *peer, __u64 incarnation){ kib_conn_t *conn; struct list_head *ctmp; struct list_head *cnxt; int count = 0; list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) { conn = list_entry (ctmp, kib_conn_t, ibc_list); if (conn->ibc_incarnation == incarnation) continue; CDEBUG(D_NET, "Closing stale conn %p nid: %s" " incarnation:"LPX64"("LPX64")\n", conn, libcfs_nid2str(peer->ibp_nid), conn->ibc_incarnation, incarnation); count++; kibnal_close_conn_locked (conn, -ESTALE); } return (count);}intkibnal_close_matching_conns (lnet_nid_t nid){ unsigned long flags; kib_peer_t *peer; struct list_head *ptmp; struct list_head *pnxt; int lo; int hi; int i; int count = 0; write_lock_irqsave (&kibnal_data.kib_global_lock, flags); if (nid != LNET_NID_ANY) lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers; else { lo = 0; hi = kibnal_data.kib_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) { peer = list_entry (ptmp, kib_peer_t, ibp_list); LASSERT (peer->ibp_persistence != 0 || peer->ibp_connecting != 0 || peer->ibp_accepting != 0 || !list_empty (&peer->ibp_conns)); if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid)) continue; count += kibnal_close_peer_conns_locked (peer, 0); } } write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags); /* wildcards always succeed */ if (nid == LNET_NID_ANY) return (0); return (count == 0 ? -ENOENT : 0);}intkibnal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg){ struct libcfs_ioctl_data *data = arg; int rc = -EINVAL; LASSERT (ni == kibnal_data.kib_ni); switch(cmd) { case IOC_LIBCFS_GET_PEER: { lnet_nid_t nid = 0; __u32 ip = 0; int port = 0; int share_count = 0; rc = kibnal_get_peer_info(data->ioc_count, &nid, &ip, &port, &share_count); data->ioc_nid = nid; data->ioc_count = share_count; data->ioc_u32[0] = ip; data->ioc_u32[1] = port; break; } case IOC_LIBCFS_ADD_PEER: { rc = kibnal_add_persistent_peer (data->ioc_nid, data->ioc_u32[0], /* IP */ data->ioc_u32[1]); /* port */ break; } case IOC_LIBCFS_DEL_PEER: { rc = kibnal_del_peer (data->ioc_nid); break; } case IOC_LIBCFS_GET_CONN: { kib_conn_t *conn = kibnal_get_conn_by_idx (data->ioc_count); if (conn == NULL) rc = -ENOENT; else { rc = 0; data->ioc_nid = conn->ibc_peer->ibp_nid; kibnal_conn_decref(conn); } break; } case IOC_LIBCFS_CLOSE_CONNECTION: { rc = kibnal_close_matching_conns (data->ioc_nid); break; } case IOC_LIBCFS_REGISTER_MYNID: { /* Ignore if this is a noop */ if (data->ioc_nid == ni->ni_nid) { rc = 0; } else { CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n", libcfs_nid2str(data->ioc_nid), libcfs_nid2str(ni->ni_nid)); rc = -EINVAL; } break; } } return rc;}voidkibnal_free_pages (kib_pages_t *p){ int npages = p->ibp_npages; int rc; int i; if (p->ibp_mapped) { rc = ib_memory_deregister(p->ibp_handle); if (rc != 0) CERROR ("Deregister error: %d\n", rc); } for (i = 0; i < npages; i++) if (p->ibp_pages[i] != NULL) __free_page(p->ibp_pages[i]); LIBCFS_FREE (p, offsetof(kib_pages_t, ibp_pages[npages]));}intkibnal_alloc_pages (kib_pages_t **pp, int npages, int access){ kib_pages_t *p; struct ib_physical_buffer *phys_pages; int i; int rc; LIBCFS_ALLOC(p, offsetof(kib_pages_t, ibp_pages[npages])); if (p == NULL) { CERROR ("Can't allocate buffer %d\n", npages); return (-ENOMEM); } memset (p, 0, offsetof(kib_pages_t, ibp_pages[npages])); p->ibp_npages = npages; for (i = 0; i < npages; i++) { p->ibp_pages[i] = alloc_page (GFP_KERNEL); if (p->ibp_pages[i] == NULL) { CERROR ("Can't allocate page %d of %d\n", i, npages); kibnal_free_pages(p); return (-ENOMEM); } } LIBCFS_ALLOC(phys_pages, npages * sizeof(*phys_pages)); if (phys_pages == NULL) { CERROR ("Can't allocate physarray for %d pages\n", npages); kibnal_free_pages(p); return (-ENOMEM); } for (i = 0; i < npages; i++) { phys_pages[i].size = PAGE_SIZE; phys_pages[i].address = lnet_page2phys(p->ibp_pages[i]); } p->ibp_vaddr = 0; rc = ib_memory_register_physical(kibnal_data.kib_pd, phys_pages, npages, &p->ibp_vaddr, npages * PAGE_SIZE, 0, access, &p->ibp_handle, &p->ibp_lkey, &p->ibp_rkey); LIBCFS_FREE(phys_pages, npages * sizeof(*phys_pages)); if (rc != 0) { CERROR ("Error %d mapping %d pages\n", rc, npages); kibnal_free_pages(p); return (rc); } p->ibp_mapped = 1; *pp = p; return (0);}intkibnal_setup_tx_descs (void){ int ipage = 0; int page_offset = 0; __u64 vaddr; __u64 vaddr_base; struct page *page; kib_tx_t *tx; int i; int rc; /* pre-mapped messages are not bigger than 1 page */ LASSERT (IBNAL_MSG_SIZE <= PAGE_SIZE); /* No fancy arithmetic when we do the buffer calculations */ LASSERT (PAGE_SIZE % IBNAL_MSG_SIZE == 0); rc = kibnal_alloc_pages(&kibnal_data.kib_tx_pages, IBNAL_TX_MSG_PAGES(), 0); /* local read access only */ if (rc != 0) return (rc); vaddr = vaddr_base = kibnal_data.kib_tx_pages->ibp_vaddr; for (i = 0; i < IBNAL_TX_MSGS(); i++) { page = kibnal_data.kib_tx_pages->ibp_pages[ipage]; tx = &kibnal_data.kib_tx_descs[i]; memset (tx, 0, sizeof(*tx)); /* zero flags etc */ tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset); tx->tx_vaddr = vaddr; tx->tx_mapped = KIB_TX_UNMAPPED; CDEBUG(D_NET, "Tx[%d] %p->%p - "LPX64"\n", i, tx, tx->tx_msg, tx->tx_vaddr); list_add (&tx->tx_list, &kibnal_data.kib_idle_txs); vaddr += IBNAL_MSG_SIZE; LASSERT (vaddr <= vaddr_base + IBNAL_TX_MSG_BYTES()); page_offset += IBNAL_MSG_SIZE; LASSERT (page_offset <= PAGE_SIZE); if (page_offset == PAGE_SIZE) { page_offset = 0; ipage++; LASSERT (ipage <= IBNAL_TX_MSG_PAGES()); } } return (0);}voidkibnal_shutdown (lnet_ni_t *ni){ int i; int rc; unsigned long flags; CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", atomic_read (&libcfs_kmemory)); LASSERT(ni == kibnal_data.kib_ni); LASSERT(ni->ni_data == &kibnal_data); switch (kibnal_data.kib_init) { default: CERROR ("Unexpected state %d\n", kibnal_data.kib_init); LBUG(); case IBNAL_INIT_ALL: /* Prevent new peers from being created */ write_lock_irqsave(&kibnal_data.kib_global_lock, flags); kibnal_data.kib_nonewpeers = 1; write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags); kibnal_stop_ib_listener(); /* Remove all existing peers from the peer table */ kibnal_del_peer(LNET_NID_ANY); /* Wait for pending conn reqs to be handled */ i = 2; spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags); while (!list_empty(&kibnal_data.kib_connd_acceptq)) { spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags); i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */ "waiting for conn reqs to clean up\n"); cfs_pause(cfs_time_seconds(1)); spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags); } spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags); /* Wait for all peer state to clean up */ i = 2; while (atomic_read(&kibnal_data.kib_npeers) != 0) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ "waiting for %d peers to close down\n", atomic_read(&kibnal_data.kib_npeers)); cfs_pause(cfs_time_seconds(1)); } /* fall through */ case IBNAL_INIT_CQ: rc = ib_cq_destroy (kibnal_data.kib_cq); if (rc != 0) CERROR ("Destroy CQ error: %d\n", rc); /* fall through */ case IBNAL_INIT_TXD: kibnal_free_pages (kibnal_data.kib_tx_pages); /* fall through */#if IBNAL_FMR case IBNAL_INIT_FMR: rc = ib_fmr_pool_destroy (kibnal_data.kib_fmr_pool); if (rc != 0) CERROR ("Destroy FMR pool error: %d\n", rc); /* fall through */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -