⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 viblnd_cb.c

📁 非常经典的一个分布式系统
💻 C
📖 第 1 页 / 共 5 页
字号:
                break;        case IBNAL_MSG_PUT_NAK:                rsrvd_credit = 1;               /* rdma reply (was pre-reserved) */                                CWARN ("PUT_NACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid));                kibnal_handle_completion(conn, IBNAL_MSG_PUT_REQ,                                          msg->ibm_u.completion.ibcm_status,                                         msg->ibm_u.completion.ibcm_cookie);                break;        case IBNAL_MSG_PUT_ACK:                rsrvd_credit = 1;               /* rdma reply (was pre-reserved) */                spin_lock(&conn->ibc_lock);                tx = kibnal_find_waiting_tx_locked(conn, IBNAL_MSG_PUT_REQ,                                                   msg->ibm_u.putack.ibpam_src_cookie);                if (tx != NULL)                        list_del(&tx->tx_list);                spin_unlock(&conn->ibc_lock);                if (tx == NULL) {                        CERROR("Unmatched PUT_ACK from %s\n",                               libcfs_nid2str(conn->ibc_peer->ibp_nid));                        rc = -EPROTO;                        break;                }                LASSERT (tx->tx_waiting);                /* CAVEAT EMPTOR: I could be racing with tx_complete, but...                 * (a) I can overwrite tx_msg since my peer has received it!                 * (b) tx_waiting set tells tx_complete() it's not done. */                tx->tx_nwrq = 0;                /* overwrite PUT_REQ */                rc2 = kibnal_init_rdma(tx, IBNAL_MSG_PUT_DONE,                                        kibnal_rd_size(&msg->ibm_u.putack.ibpam_rd),                                       &msg->ibm_u.putack.ibpam_rd,                                       msg->ibm_u.putack.ibpam_dst_cookie);                if (rc2 < 0)                        CERROR("Can't setup rdma for PUT to %s: %d\n",                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);                spin_lock(&conn->ibc_lock);                if (tx->tx_status == 0 && rc2 < 0)                        tx->tx_status = rc2;                tx->tx_waiting = 0;             /* clear waiting and queue atomically */                kibnal_queue_tx_locked(tx, conn);                spin_unlock(&conn->ibc_lock);                break;                        case IBNAL_MSG_PUT_DONE:                /* This buffer was pre-reserved by not returning the credit                 * when the PUT_REQ's buffer was reposted, so I just return it                 * now */                kibnal_handle_completion(conn, IBNAL_MSG_PUT_ACK,                                         msg->ibm_u.completion.ibcm_status,                                         msg->ibm_u.completion.ibcm_cookie);                break;        case IBNAL_MSG_GET_REQ:                rc = lnet_parse(kibnal_data.kib_ni, &msg->ibm_u.get.ibgm_hdr,                                msg->ibm_srcnid, rx, 1);                repost = rc < 0;                /* repost on error */                break;        case IBNAL_MSG_GET_DONE:                rsrvd_credit = 1;               /* rdma reply (was pre-reserved) */                                kibnal_handle_completion(conn, IBNAL_MSG_GET_REQ,                                         msg->ibm_u.completion.ibcm_status,                                         msg->ibm_u.completion.ibcm_cookie);                break;        }        if (rc < 0)                             /* protocol error */                kibnal_close_conn(conn, rc);        if (repost) {                if (conn->ibc_version == IBNAL_MSG_VERSION_RDMAREPLYNOTRSRVD)                        rsrvd_credit = 0;       /* peer isn't pre-reserving */                kibnal_post_rx(rx, !rsrvd_credit, rsrvd_credit);        }}voidkibnal_rx_complete (kib_rx_t *rx, vv_comp_status_t vvrc, int nob, __u64 rxseq){        kib_msg_t    *msg = rx->rx_msg;        kib_conn_t   *conn = rx->rx_conn;        unsigned long flags;        int           rc;        CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);        LASSERT (rx->rx_nob < 0);               /* was posted */        rx->rx_nob = 0;                         /* isn't now */        if (conn->ibc_state > IBNAL_CONN_ESTABLISHED)                goto ignore;        if (vvrc != vv_comp_status_success) {                CERROR("Rx from %s failed: %d\n",                        libcfs_nid2str(conn->ibc_peer->ibp_nid), vvrc);                goto failed;        }        rc = kibnal_unpack_msg(msg, conn->ibc_version, nob);        if (rc != 0) {                CERROR ("Error %d unpacking rx from %s\n",                        rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));                goto failed;        }        rx->rx_nob = nob;                       /* Can trust 'nob' now */        if (!lnet_ptlcompat_matchnid(conn->ibc_peer->ibp_nid,                                     msg->ibm_srcnid) ||            !lnet_ptlcompat_matchnid(kibnal_data.kib_ni->ni_nid,                                      msg->ibm_dstnid) ||            msg->ibm_srcstamp != conn->ibc_incarnation ||            msg->ibm_dststamp != kibnal_data.kib_incarnation) {                CERROR ("Stale rx from %s\n",                        libcfs_nid2str(conn->ibc_peer->ibp_nid));                goto failed;        }        if (msg->ibm_seq != rxseq) {                CERROR ("Out-of-sequence rx from %s"                        ": got "LPD64" but expected "LPD64"\n",                        libcfs_nid2str(conn->ibc_peer->ibp_nid),                        msg->ibm_seq, rxseq);                goto failed;        }        /* set time last known alive */        kibnal_peer_alive(conn->ibc_peer);        /* racing with connection establishment/teardown! */        if (conn->ibc_state < IBNAL_CONN_ESTABLISHED) {                write_lock_irqsave(&kibnal_data.kib_global_lock, flags);                /* must check holding global lock to eliminate race */                if (conn->ibc_state < IBNAL_CONN_ESTABLISHED) {                        list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);                        write_unlock_irqrestore(&kibnal_data.kib_global_lock,                                                 flags);                        return;                }                write_unlock_irqrestore(&kibnal_data.kib_global_lock,                                         flags);        }        kibnal_handle_rx(rx);        return;         failed:        CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);        kibnal_close_conn(conn, -EIO); ignore:        /* Don't re-post rx & drop its ref on conn */        kibnal_conn_decref(conn);}struct page *kibnal_kvaddr_to_page (unsigned long vaddr){        struct page *page;        if (vaddr >= VMALLOC_START &&            vaddr < VMALLOC_END) {                page = vmalloc_to_page ((void *)vaddr);                LASSERT (page != NULL);                return page;        }#ifdef CONFIG_HIGHMEM        if (vaddr >= PKMAP_BASE &&            vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {                /* No highmem pages only used for bulk (kiov) I/O */                CERROR("find page for address in highmem\n");                LBUG();        }#endif        page = virt_to_page (vaddr);        LASSERT (page != NULL);        return page;}#if !IBNAL_USE_FMRintkibnal_append_rdfrag(kib_rdma_desc_t *rd, int active, struct page *page,                      unsigned long page_offset, unsigned long len){        kib_rdma_frag_t *frag = &rd->rd_frags[rd->rd_nfrag];        vv_l_key_t       l_key;        vv_r_key_t       r_key;        __u64            addr;        __u64            frag_addr;        vv_mem_reg_h_t   mem_h;        vv_return_t      vvrc;        if (rd->rd_nfrag >= IBNAL_MAX_RDMA_FRAGS) {                CERROR ("Too many RDMA fragments\n");                return -EMSGSIZE;        }        /* Try to create an address that adaptor-tavor will munge into a valid         * network address, given how it maps all phys mem into 1 region */        addr = lnet_page2phys(page) + page_offset + PAGE_OFFSET;        /* NB this relies entirely on there being a single region for the whole         * of memory, since "high" memory will wrap in the (void *) cast! */        vvrc = vv_get_gen_mr_attrib(kibnal_data.kib_hca,                                     (void *)((unsigned long)addr),                                    len, &mem_h, &l_key, &r_key);        LASSERT (vvrc == vv_return_ok);        if (active) {                if (rd->rd_nfrag == 0) {                        rd->rd_key = l_key;                } else if (l_key != rd->rd_key) {                        CERROR ("> 1 key for single RDMA desc\n");                        return -EINVAL;                }                frag_addr = addr;        } else {                if (rd->rd_nfrag == 0) {                        rd->rd_key = r_key;                } else if (r_key != rd->rd_key) {                        CERROR ("> 1 key for single RDMA desc\n");                        return -EINVAL;                }                frag_addr = kibnal_addr2net(addr);        }        kibnal_rf_set(frag, frag_addr, len);        CDEBUG(D_NET,"map frag [%d][%d %x %08x%08x] "LPX64"\n",                rd->rd_nfrag, frag->rf_nob, rd->rd_key,                frag->rf_addr_hi, frag->rf_addr_lo, frag_addr);        rd->rd_nfrag++;        return 0;}intkibnal_setup_rd_iov(kib_tx_t *tx, kib_rdma_desc_t *rd,                     vv_access_con_bit_mask_t access,                    unsigned int niov, struct iovec *iov, int offset, int nob)                 {        /* active if I'm sending */        int           active = ((access & vv_acc_r_mem_write) == 0);        int           fragnob;        int           rc;        unsigned long vaddr;        struct page  *page;        int           page_offset;        LASSERT (nob > 0);        LASSERT (niov > 0);        LASSERT ((rd != tx->tx_rd) == !active);        while (offset >= iov->iov_len) {                offset -= iov->iov_len;                niov--;                iov++;                LASSERT (niov > 0);        }        rd->rd_nfrag = 0;        do {                LASSERT (niov > 0);                vaddr = ((unsigned long)iov->iov_base) + offset;                page_offset = vaddr & (PAGE_SIZE - 1);                page = kibnal_kvaddr_to_page(vaddr);                if (page == NULL) {                        CERROR ("Can't find page\n");                        return -EFAULT;                }                fragnob = min((int)(iov->iov_len - offset), nob);                fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);                rc = kibnal_append_rdfrag(rd, active, page,                                           page_offset, fragnob);                if (rc != 0)                        return rc;                if (offset + fragnob < iov->iov_len) {                        offset += fragnob;                } else {                        offset = 0;                        iov++;                        niov--;                }                nob -= fragnob;        } while (nob > 0);                return 0;}intkibnal_setup_rd_kiov (kib_tx_t *tx, kib_rdma_desc_t *rd,                       vv_access_con_bit_mask_t access,                      int nkiov, lnet_kiov_t *kiov, int offset, int nob){        /* active if I'm sending */        int            active = ((access & vv_acc_r_mem_write) == 0);        int            fragnob;        int            rc;        CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);        LASSERT (nob > 0);        LASSERT (nkiov > 0);        LASSERT ((rd != tx->tx_rd) == !active);        while (offset >= kiov->kiov_len) {                offset -= kiov->kiov_len;                nkiov--;                kiov++;                LASSERT (nkiov > 0);        }        rd->rd_nfrag = 0;        do {                LASSERT (nkiov > 0);                fragnob = min((int)(kiov->kiov_len - offset), nob);                                rc = kibnal_append_rdfrag(rd, active, kiov->kiov_page,                                          kiov->kiov_offset + offset,                                          fragnob);                if (rc != 0)                        return rc;                offset = 0;                kiov++;                nkiov--;                nob -= fragnob;        } while (nob > 0);        return 0;}#elseintkibnal_map_tx (kib_tx_t *tx, kib_rdma_desc_t *rd, int active,               int npages, unsigned long page_offset, int nob){        vv_return_t   vvrc;        vv_fmr_map_t  map_props;        LASSERT ((rd != tx->tx_rd) == !active);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -