⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 viblnd.c

📁 非常经典的一个分布式系统
💻 C
📖 第 1 页 / 共 5 页
字号:
#endif                break;        case IBNAL_MSG_GET_REQ:                if (msg_nob < hdr_size + sizeof(msg->ibm_u.get)) {                        CERROR("Short GET_REQ: %d(%d)\n", msg_nob,                               (int)(hdr_size + sizeof(msg->ibm_u.get)));                        return -EPROTO;                }#if IBNAL_USE_FMR                if (flip) {                        __swab64s(&msg->ibm_u.get.ibgm_rd.rd_addr);                        __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nob);                        __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);                }#else                                if (flip) {                        __swab32s(&msg->ibm_u.get.ibgm_rd.rd_key);                        __swab32s(&msg->ibm_u.get.ibgm_rd.rd_nfrag);                }                n = msg->ibm_u.get.ibgm_rd.rd_nfrag;                if (n <= 0 || n > IBNAL_MAX_RDMA_FRAGS) {                        CERROR("Bad GET_REQ nfrags: %d, should be 0 < n <= %d\n",                                n, IBNAL_MAX_RDMA_FRAGS);                        return -EPROTO;                }                                if (msg_nob < offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n])) {                        CERROR("Short GET_REQ: %d(%d)\n", msg_nob,                               (int)offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[n]));                        return -EPROTO;                }                                if (flip)                        for (i = 0; i < msg->ibm_u.get.ibgm_rd.rd_nfrag; i++) {                                __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_nob);                                __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr_lo);                                __swab32s(&msg->ibm_u.get.ibgm_rd.rd_frags[i].rf_addr_hi);                        }#endif                break;        case IBNAL_MSG_PUT_NAK:        case IBNAL_MSG_PUT_DONE:        case IBNAL_MSG_GET_DONE:                if (msg_nob < hdr_size + sizeof(msg->ibm_u.completion)) {                        CERROR("Short RDMA completion: %d(%d)\n", msg_nob,                               (int)(hdr_size + sizeof(msg->ibm_u.completion)));                        return -EPROTO;                }                if (flip)                        __swab32s(&msg->ibm_u.completion.ibcm_status);                break;        case IBNAL_MSG_CONNREQ:        case IBNAL_MSG_CONNACK:                if (msg_nob < hdr_size + sizeof(msg->ibm_u.connparams)) {                        CERROR("Short connreq/ack: %d(%d)\n", msg_nob,                               (int)(hdr_size + sizeof(msg->ibm_u.connparams)));                        return -EPROTO;                }                if (flip) {                        __swab32s(&msg->ibm_u.connparams.ibcp_queue_depth);                        __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);                        __swab32s(&msg->ibm_u.connparams.ibcp_max_frags);                }                break;        }        return 0;}intkibnal_start_listener (lnet_ni_t *ni){        static cm_listen_data_t info;        cm_return_t      cmrc;        LASSERT (kibnal_data.kib_listen_handle == NULL);        kibnal_data.kib_listen_handle =                 cm_create_cep(cm_cep_transp_rc);        if (kibnal_data.kib_listen_handle == NULL) {                CERROR ("Can't create listen CEP\n");                return -ENOMEM;        }        CDEBUG(D_NET, "Created CEP %p for listening\n",                kibnal_data.kib_listen_handle);        memset(&info, 0, sizeof(info));        info.listen_addr.end_pt.sid =                 (__u64)(*kibnal_tunables.kib_service_number);        cmrc = cm_listen(kibnal_data.kib_listen_handle, &info,                         kibnal_listen_callback, NULL);        if (cmrc == cm_stat_success)                return 0;                CERROR ("cm_listen error: %d\n", cmrc);        cmrc = cm_destroy_cep(kibnal_data.kib_listen_handle);        LASSERT (cmrc == cm_stat_success);        kibnal_data.kib_listen_handle = NULL;        return -EINVAL;}voidkibnal_stop_listener(lnet_ni_t *ni){        cm_return_t      cmrc;        LASSERT (kibnal_data.kib_listen_handle != NULL);                cmrc = cm_cancel(kibnal_data.kib_listen_handle);        if (cmrc != cm_stat_success)                CERROR ("Error %d stopping listener\n", cmrc);        cfs_pause(cfs_time_seconds(1)/10);   /* ensure no more callbacks */                cmrc = cm_destroy_cep(kibnal_data.kib_listen_handle);        if (cmrc != vv_return_ok)                CERROR ("Error %d destroying CEP\n", cmrc);        kibnal_data.kib_listen_handle = NULL;}intkibnal_create_peer (kib_peer_t **peerp, lnet_nid_t nid){        kib_peer_t     *peer;        unsigned long   flags;        int             rc;        LASSERT (nid != LNET_NID_ANY);        LIBCFS_ALLOC(peer, sizeof (*peer));        if (peer == NULL) {                CERROR("Cannot allocate peer\n");                return -ENOMEM;        }        memset(peer, 0, sizeof(*peer));         /* zero flags etc */        peer->ibp_nid = nid;        atomic_set (&peer->ibp_refcount, 1);    /* 1 ref for caller */        INIT_LIST_HEAD (&peer->ibp_list);       /* not in the peer table yet */        INIT_LIST_HEAD (&peer->ibp_conns);        INIT_LIST_HEAD (&peer->ibp_tx_queue);        peer->ibp_error = 0;        peer->ibp_last_alive = cfs_time_current();        peer->ibp_reconnect_interval = 0;       /* OK to connect at any time */        write_lock_irqsave(&kibnal_data.kib_global_lock, flags);        if (atomic_read(&kibnal_data.kib_npeers) >=            *kibnal_tunables.kib_concurrent_peers) {                rc = -EOVERFLOW;        /* !! but at least it distinguishes */        } else if (kibnal_data.kib_listen_handle == NULL) {                rc = -ESHUTDOWN;        /* shutdown has started */        } else {                rc = 0;                /* npeers only grows with the global lock held */                atomic_inc(&kibnal_data.kib_npeers);        }                write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);        if (rc != 0) {                CERROR("Can't create peer: %s\n",                        (rc == -ESHUTDOWN) ? "shutting down" :                        "too many peers");                LIBCFS_FREE(peer, sizeof(*peer));        } else {                *peerp = peer;        }                return rc;}voidkibnal_destroy_peer (kib_peer_t *peer){        LASSERT (atomic_read (&peer->ibp_refcount) == 0);        LASSERT (peer->ibp_persistence == 0);        LASSERT (!kibnal_peer_active(peer));        LASSERT (peer->ibp_connecting == 0);        LASSERT (peer->ibp_accepting == 0);        LASSERT (list_empty (&peer->ibp_conns));        LASSERT (list_empty (&peer->ibp_tx_queue));                LIBCFS_FREE (peer, sizeof (*peer));        /* NB a peer's connections keep a reference on their peer until         * they are destroyed, so we can be assured that _all_ state to do         * with this peer has been cleaned up when its refcount drops to         * zero. */        atomic_dec(&kibnal_data.kib_npeers);}kib_peer_t *kibnal_find_peer_locked (lnet_nid_t nid){        /* the caller is responsible for accounting the additional reference         * that this creates */        struct list_head *peer_list = kibnal_nid2peerlist (nid);        struct list_head *tmp;        kib_peer_t       *peer;        list_for_each (tmp, peer_list) {                peer = list_entry (tmp, kib_peer_t, ibp_list);                LASSERT (peer->ibp_persistence != 0 || /* persistent peer */                         peer->ibp_connecting != 0 || /* creating conns */                         peer->ibp_accepting != 0 ||                         !list_empty (&peer->ibp_conns));  /* active conn */                if (peer->ibp_nid != nid)                        continue;                CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",                       peer, libcfs_nid2str(nid),                       atomic_read (&peer->ibp_refcount));                return (peer);        }        return (NULL);}voidkibnal_unlink_peer_locked (kib_peer_t *peer){        LASSERT (peer->ibp_persistence == 0);        LASSERT (list_empty(&peer->ibp_conns));        LASSERT (kibnal_peer_active(peer));        list_del_init (&peer->ibp_list);        /* lose peerlist's ref */        kibnal_peer_decref(peer);}intkibnal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp,                      int *persistencep){        kib_peer_t        *peer;        struct list_head  *ptmp;        int                i;        unsigned long      flags;        read_lock_irqsave(&kibnal_data.kib_global_lock, flags);        for (i = 0; i < kibnal_data.kib_peer_hash_size; i++) {                list_for_each (ptmp, &kibnal_data.kib_peers[i]) {                        peer = list_entry (ptmp, kib_peer_t, ibp_list);                        LASSERT (peer->ibp_persistence != 0 ||                                 peer->ibp_connecting != 0 ||                                 peer->ibp_accepting != 0 ||                                 !list_empty (&peer->ibp_conns));                        if (index-- > 0)                                continue;                        *nidp = peer->ibp_nid;                        *ipp = peer->ibp_ip;                        *persistencep = peer->ibp_persistence;                        read_unlock_irqrestore(&kibnal_data.kib_global_lock,                                               flags);                        return (0);                }        }        read_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);        return (-ENOENT);}intkibnal_add_persistent_peer (lnet_nid_t nid, __u32 ip){        kib_peer_t        *peer;        kib_peer_t        *peer2;        unsigned long      flags;        int                rc;        CDEBUG(D_NET, "%s at %u.%u.%u.%u\n",               libcfs_nid2str(nid), HIPQUAD(ip));                if (nid == LNET_NID_ANY)                return (-EINVAL);        rc = kibnal_create_peer(&peer, nid);        if (rc != 0)                return rc;        write_lock_irqsave(&kibnal_data.kib_global_lock, flags);        /* I'm always called with a reference on kibnal_data.kib_ni         * so shutdown can't have started */        LASSERT (kibnal_data.kib_listen_handle != NULL);        peer2 = kibnal_find_peer_locked (nid);        if (peer2 != NULL) {                kibnal_peer_decref (peer);                peer = peer2;        } else {                /* peer table takes existing ref on peer */                list_add_tail (&peer->ibp_list,                               kibnal_nid2peerlist (nid));        }        peer->ibp_ip = ip;        peer->ibp_persistence++;                write_unlock_irqrestore(&kibnal_data.kib_global_lock, flags);        return (0);}voidkibnal_del_peer_locked (kib_peer_t *peer){        struct list_head *ctmp;        struct list_head *cnxt;        kib_conn_t       *conn;        peer->ibp_persistence = 0;        if (list_empty(&peer->ibp_conns)) {                kibnal_unlink_peer_locked(peer);        } else {                list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {                        conn = list_entry(ctmp, kib_conn_t, ibc_list);                        kibnal_close_conn_locked (conn, 0);                }                /* NB peer is no longer persistent; closing its last conn                 * unlinked it. */        }        /* NB peer now unlinked; might even be freed if the peer table had the         * last ref on it. */}intkibnal_del_peer (lnet_nid_t nid){        CFS_LIST_HEAD     (zombies);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -