📄 iiblnd.c
字号:
* they are destroyed, so we can be assured that _all_ state to do * with this peer has been cleaned up when its refcount drops to * zero. */ atomic_dec (&kibnal_data.kib_npeers);}/* the caller is responsible for accounting for the additional reference * that this creates */kib_peer_t *kibnal_find_peer_locked (lnet_nid_t nid){ struct list_head *peer_list = kibnal_nid2peerlist (nid); struct list_head *tmp; kib_peer_t *peer; list_for_each (tmp, peer_list) { peer = list_entry (tmp, kib_peer_t, ibp_list); LASSERT (peer->ibp_persistence != 0 || kibnal_peer_connecting(peer) || !list_empty (&peer->ibp_conns)); if (peer->ibp_nid != nid) continue; CDEBUG(D_NET, "got peer %s (%d)\n", libcfs_nid2str(nid), atomic_read (&peer->ibp_refcount)); return (peer); } return (NULL);}voidkibnal_unlink_peer_locked (kib_peer_t *peer){ LASSERT (peer->ibp_persistence == 0); LASSERT (list_empty(&peer->ibp_conns)); LASSERT (kibnal_peer_active(peer)); list_del_init (&peer->ibp_list); /* lose peerlist's ref */ kibnal_peer_decref(peer);}intkibnal_get_peer_info (int index, lnet_nid_t *nidp, int *persistencep){ kib_peer_t *peer; struct list_head *ptmp; unsigned long flags; int i; read_lock_irqsave(&kibnal_data.kib_global_lock, flags); for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) { list_for_each (ptmp, &kibnal_data.kib_peers[i]) { peer = list_entry (ptmp, kib_peer_t, ibp_list); LASSERT (peer->ibp_persistence != 0 || kibnal_peer_connecting(peer) || !list_empty (&peer->ibp_conns)); if (index-- > 0) continue; *nidp = peer->ibp_nid; *persistencep = peer->ibp_persistence; read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags); return (0); } } read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags); return (-ENOENT);}intkibnal_add_persistent_peer (lnet_nid_t nid){ unsigned long flags; kib_peer_t *peer; kib_peer_t *peer2; int rc; if (nid == LNET_NID_ANY) return (-EINVAL); rc = kibnal_create_peer(&peer, nid); if (rc != 0) return rc; write_lock_irqsave (&kibnal_data.kib_global_lock, flags); /* I'm always called with a reference on kibnal_data.kib_ni * so shutdown can't have started */ LASSERT (kibnal_data.kib_listener_cep != NULL); peer2 = kibnal_find_peer_locked (nid); if (peer2 != NULL) { kibnal_peer_decref (peer); peer = peer2; } else { /* peer table takes existing ref on peer */ list_add_tail (&peer->ibp_list, kibnal_nid2peerlist (nid)); } peer->ibp_persistence++; write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags); return (0);}voidkibnal_del_peer_locked (kib_peer_t *peer){ struct list_head *ctmp; struct list_head *cnxt; kib_conn_t *conn; peer->ibp_persistence = 0; if (list_empty(&peer->ibp_conns)) { kibnal_unlink_peer_locked(peer); } else { list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); kibnal_close_conn_locked (conn, 0); } /* NB peer is no longer persistent; closing its last conn * unlinked it. */ } /* NB peer now unlinked; might even be freed if the peer table had the * last ref on it. */}intkibnal_del_peer (lnet_nid_t nid){ unsigned long flags; CFS_LIST_HEAD (zombies); struct list_head *ptmp; struct list_head *pnxt; kib_peer_t *peer; int lo; int hi; int i; int rc = -ENOENT; write_lock_irqsave (&kibnal_data.kib_global_lock, flags); if (nid != LNET_NID_ANY) lo = hi = kibnal_nid2peerlist(nid) - kibnal_data.kib_peers; else { lo = 0; hi = kibnal_data.kib_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { list_for_each_safe (ptmp, pnxt, &kibnal_data.kib_peers[i]) { peer = list_entry (ptmp, kib_peer_t, ibp_list); LASSERT (peer->ibp_persistence != 0 || kibnal_peer_connecting(peer) || !list_empty (&peer->ibp_conns)); if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid)) continue; if (!list_empty(&peer->ibp_tx_queue)) { LASSERT (list_empty(&peer->ibp_conns)); list_splice_init(&peer->ibp_tx_queue, &zombies); } kibnal_del_peer_locked (peer); rc = 0; /* matched something */ } } write_unlock_irqrestore (&kibnal_data.kib_global_lock, flags); kibnal_txlist_done(&zombies, -EIO); return (rc);}kib_conn_t *kibnal_get_conn_by_idx (int index){ kib_peer_t *peer; struct list_head *ptmp; kib_conn_t *conn; struct list_head *ctmp; unsigned long flags; int i; read_lock_irqsave(&kibnal_data.kib_global_lock, flags); for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) { list_for_each (ptmp, &kibnal_data.kib_peers[i]) { peer = list_entry (ptmp, kib_peer_t, ibp_list); LASSERT (peer->ibp_persistence != 0 || kibnal_peer_connecting(peer) || !list_empty (&peer->ibp_conns)); list_for_each (ctmp, &peer->ibp_conns) { if (index-- > 0) continue; conn = list_entry (ctmp, kib_conn_t, ibc_list); kibnal_conn_addref(conn); read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags); return (conn); } } } read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags); return (NULL);}intkibnal_conn_rts(kib_conn_t *conn, __u32 qpn, __u8 resp_res, __u8 init_depth, __u32 psn){ IB_PATH_RECORD *path = &conn->ibc_cvars->cv_path; IB_HANDLE qp = conn->ibc_qp; IB_QP_ATTRIBUTES_MODIFY modify_attr; FSTATUS frc; int rc; if (resp_res > kibnal_data.kib_hca_attrs.MaxQPResponderResources) resp_res = kibnal_data.kib_hca_attrs.MaxQPResponderResources; if (init_depth > kibnal_data.kib_hca_attrs.MaxQPInitiatorDepth) init_depth = kibnal_data.kib_hca_attrs.MaxQPInitiatorDepth; modify_attr = (IB_QP_ATTRIBUTES_MODIFY) { .RequestState = QPStateReadyToRecv, .RecvPSN = IBNAL_STARTING_PSN, .DestQPNumber = qpn, .ResponderResources = resp_res, .MinRnrTimer = UsecToRnrNakTimer(2000), /* 20 ms */ .Attrs = (IB_QP_ATTR_RECVPSN | IB_QP_ATTR_DESTQPNUMBER | IB_QP_ATTR_RESPONDERRESOURCES | IB_QP_ATTR_DESTAV | IB_QP_ATTR_PATHMTU | IB_QP_ATTR_MINRNRTIMER), }; GetAVFromPath(0, path, &modify_attr.PathMTU, NULL, &modify_attr.DestAV); frc = iba_modify_qp(qp, &modify_attr, NULL); if (frc != FSUCCESS) { CERROR("Can't set QP %s ready to receive: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), frc); return -EIO; } rc = kibnal_post_receives(conn); if (rc != 0) { CERROR("Can't post receives for %s: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); return rc; } modify_attr = (IB_QP_ATTRIBUTES_MODIFY) { .RequestState = QPStateReadyToSend, .FlowControl = TRUE, .InitiatorDepth = init_depth, .SendPSN = psn, .LocalAckTimeout = path->PktLifeTime + 2, /* 2 or 1? */ .RetryCount = IBNAL_RETRY, .RnrRetryCount = IBNAL_RNR_RETRY, .Attrs = (IB_QP_ATTR_FLOWCONTROL | IB_QP_ATTR_INITIATORDEPTH | IB_QP_ATTR_SENDPSN | IB_QP_ATTR_LOCALACKTIMEOUT | IB_QP_ATTR_RETRYCOUNT | IB_QP_ATTR_RNRRETRYCOUNT), }; frc = iba_modify_qp(qp, &modify_attr, NULL); if (frc != FSUCCESS) { CERROR("Can't set QP %s ready to send: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), frc); return -EIO; } frc = iba_query_qp(conn->ibc_qp, &conn->ibc_cvars->cv_qpattrs, NULL); if (frc != FSUCCESS) { CERROR ("Can't query QP %s attributes: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), frc); return -EIO; } return 0;}kib_conn_t *kibnal_create_conn (lnet_nid_t nid, int proto_version){ kib_conn_t *conn; int i; int page_offset; int ipage; int rc; FSTATUS frc; union { IB_QP_ATTRIBUTES_CREATE qp_create; IB_QP_ATTRIBUTES_MODIFY qp_attr; } params; LIBCFS_ALLOC (conn, sizeof (*conn)); if (conn == NULL) { CERROR ("Can't allocate connection for %s\n", libcfs_nid2str(nid)); return (NULL); } /* zero flags, NULL pointers etc... */ memset (conn, 0, sizeof (*conn)); conn->ibc_state = IBNAL_CONN_INIT_NOTHING; conn->ibc_version = proto_version; INIT_LIST_HEAD (&conn->ibc_early_rxs); INIT_LIST_HEAD (&conn->ibc_tx_queue_nocred); INIT_LIST_HEAD (&conn->ibc_tx_queue); INIT_LIST_HEAD (&conn->ibc_tx_queue_rsrvd); INIT_LIST_HEAD (&conn->ibc_active_txs); spin_lock_init (&conn->ibc_lock); atomic_inc (&kibnal_data.kib_nconns); /* well not really, but I call destroy() on failure, which decrements */ LIBCFS_ALLOC(conn->ibc_cvars, sizeof (*conn->ibc_cvars)); if (conn->ibc_cvars == NULL) { CERROR ("Can't allocate connvars for %s\n", libcfs_nid2str(nid)); goto failed; } memset(conn->ibc_cvars, 0, sizeof (*conn->ibc_cvars)); LIBCFS_ALLOC(conn->ibc_rxs, IBNAL_RX_MSGS * sizeof (kib_rx_t)); if (conn->ibc_rxs == NULL) { CERROR("Cannot allocate RX descriptors for %s\n", libcfs_nid2str(nid)); goto failed; } memset (conn->ibc_rxs, 0, IBNAL_RX_MSGS * sizeof(kib_rx_t)); rc = kibnal_alloc_pages(&conn->ibc_rx_pages, IBNAL_RX_MSG_PAGES); if (rc != 0) { CERROR("Can't allocate RX buffers for %s\n", libcfs_nid2str(nid)); goto failed; } for (i = ipage = page_offset = 0; i < IBNAL_RX_MSGS; i++) { struct page *page = conn->ibc_rx_pages->ibp_pages[ipage]; kib_rx_t *rx = &conn->ibc_rxs[i]; rx->rx_conn = conn; rx->rx_msg = (kib_msg_t *)(((char *)page_address(page)) + page_offset); rx->rx_hca_msg = kibnal_data.kib_whole_mem.md_addr + lnet_page2phys(page) + page_offset;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -