📄 iwch_provider.c
字号:
struct iwch_dev *rhp; PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); rhp = (struct iwch_dev *) ibdev; pdid = cxio_hal_get_pdid(rhp->rdev.rscp); if (!pdid) return ERR_PTR(-EINVAL); php = kzalloc(sizeof(*php), GFP_KERNEL); if (!php) { cxio_hal_put_pdid(rhp->rdev.rscp, pdid); return ERR_PTR(-ENOMEM); } php->pdid = pdid; php->rhp = rhp; if (context) { if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) { iwch_deallocate_pd(&php->ibpd); return ERR_PTR(-EFAULT); } } PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php); return &php->ibpd;}static int iwch_dereg_mr(struct ib_mr *ib_mr){ struct iwch_dev *rhp; struct iwch_mr *mhp; u32 mmid; PDBG("%s ib_mr %p\n", __FUNCTION__, ib_mr); /* There can be no memory windows */ if (atomic_read(&ib_mr->usecnt)) return -EINVAL; mhp = to_iwch_mr(ib_mr); rhp = mhp->rhp; mmid = mhp->attr.stag >> 8; cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); remove_handle(rhp, &rhp->mmidr, mmid); if (mhp->kva) kfree((void *) (unsigned long) mhp->kva); if (mhp->umem) ib_umem_release(mhp->umem); PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); kfree(mhp); return 0;}static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start){ __be64 *page_list; int shift; u64 total_size; int npages; struct iwch_dev *rhp; struct iwch_pd *php; struct iwch_mr *mhp; int ret; PDBG("%s ib_pd %p\n", __FUNCTION__, pd); php = to_iwch_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); /* First check that we have enough alignment */ if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { ret = -EINVAL; goto err; } if (num_phys_buf > 1 && ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { ret = -EINVAL; goto err; } ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, &total_size, &npages, &shift, &page_list); if (ret) goto err; mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = iwch_ib_to_tpt_access(acc); mhp->attr.va_fbo = *iova_start; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) total_size; mhp->attr.pbl_size = npages; ret = iwch_register_mem(rhp, php, mhp, shift, page_list); kfree(page_list); if (ret) { goto err; } return &mhp->ibmr;err: kfree(mhp); return ERR_PTR(ret);}static int iwch_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 * iova_start){ struct iwch_mr mh, *mhp; struct iwch_pd *php; struct iwch_dev *rhp; __be64 *page_list = NULL; int shift = 0; u64 total_size; int npages; int ret; PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__, mr, pd); /* There can be no memory windows */ if (atomic_read(&mr->usecnt)) return -EINVAL; mhp = to_iwch_mr(mr); rhp = mhp->rhp; php = to_iwch_pd(mr->pd); /* make sure we are on the same adapter */ if (rhp != php->rhp) return -EINVAL; memcpy(&mh, mhp, sizeof *mhp); if (mr_rereg_mask & IB_MR_REREG_PD) php = to_iwch_pd(pd); if (mr_rereg_mask & IB_MR_REREG_ACCESS) mh.attr.perms = iwch_ib_to_tpt_access(acc); if (mr_rereg_mask & IB_MR_REREG_TRANS) { ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, &total_size, &npages, &shift, &page_list); if (ret) return ret; } ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages); kfree(page_list); if (ret) { return ret; } if (mr_rereg_mask & IB_MR_REREG_PD) mhp->attr.pdid = php->pdid; if (mr_rereg_mask & IB_MR_REREG_ACCESS) mhp->attr.perms = iwch_ib_to_tpt_access(acc); if (mr_rereg_mask & IB_MR_REREG_TRANS) { mhp->attr.zbva = 0; mhp->attr.va_fbo = *iova_start; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) total_size; mhp->attr.pbl_size = npages; } return 0;}static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata){ __be64 *pages; int shift, n, len; int i, j, k; int err = 0; struct ib_umem_chunk *chunk; struct iwch_dev *rhp; struct iwch_pd *php; struct iwch_mr *mhp; struct iwch_reg_user_mr_resp uresp; PDBG("%s ib_pd %p\n", __FUNCTION__, pd); php = to_iwch_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc); if (IS_ERR(mhp->umem)) { err = PTR_ERR(mhp->umem); kfree(mhp); return ERR_PTR(err); } shift = ffs(mhp->umem->page_size) - 1; n = 0; list_for_each_entry(chunk, &mhp->umem->chunk_list, list) n += chunk->nents; pages = kmalloc(n * sizeof(u64), GFP_KERNEL); if (!pages) { err = -ENOMEM; goto err; } i = n = 0; list_for_each_entry(chunk, &mhp->umem->chunk_list, list) for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) { pages[i++] = cpu_to_be64(sg_dma_address( &chunk->page_list[j]) + mhp->umem->page_size * k); } } mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = iwch_ib_to_tpt_access(acc); mhp->attr.va_fbo = virt; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) length; mhp->attr.pbl_size = i; err = iwch_register_mem(rhp, php, mhp, shift, pages); kfree(pages); if (err) goto err; if (udata && t3b_device(rhp)) { uresp.pbl_addr = (mhp->attr.pbl_addr - rhp->rdev.rnic_info.pbl_base) >> 3; PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__, uresp.pbl_addr); if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { iwch_dereg_mr(&mhp->ibmr); err = -EFAULT; goto err; } } return &mhp->ibmr;err: ib_umem_release(mhp->umem); kfree(mhp); return ERR_PTR(err);}static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc){ struct ib_phys_buf bl; u64 kva; struct ib_mr *ibmr; PDBG("%s ib_pd %p\n", __FUNCTION__, pd); /* * T3 only supports 32 bits of size. */ bl.size = 0xffffffff; bl.addr = 0; kva = 0; ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva); return ibmr;}static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd){ struct iwch_dev *rhp; struct iwch_pd *php; struct iwch_mw *mhp; u32 mmid; u32 stag = 0; int ret; php = to_iwch_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid); if (ret) { kfree(mhp); return ERR_PTR(ret); } mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.type = TPT_MW; mhp->attr.stag = stag; mmid = (stag) >> 8; insert_handle(rhp, &rhp->mmidr, mhp, mmid); PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__, mmid, mhp, stag); return &(mhp->ibmw);}static int iwch_dealloc_mw(struct ib_mw *mw){ struct iwch_dev *rhp; struct iwch_mw *mhp; u32 mmid; mhp = to_iwch_mw(mw); rhp = mhp->rhp; mmid = (mw->rkey) >> 8; cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); remove_handle(rhp, &rhp->mmidr, mmid); kfree(mhp); PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__, mw, mmid, mhp); return 0;}static int iwch_destroy_qp(struct ib_qp *ib_qp){ struct iwch_dev *rhp; struct iwch_qp *qhp; struct iwch_qp_attributes attrs; struct iwch_ucontext *ucontext; qhp = to_iwch_qp(ib_qp); rhp = qhp->rhp; attrs.next_state = IWCH_QP_STATE_ERROR; iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); wait_event(qhp->wait, !qhp->ep); remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); atomic_dec(&qhp->refcnt); wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context) : NULL; cxio_destroy_qp(&rhp->rdev, &qhp->wq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__, ib_qp, qhp->wq.qpid, qhp); kfree(qhp); return 0;}static struct ib_qp *iwch_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, struct ib_udata *udata){ struct iwch_dev *rhp; struct iwch_qp *qhp; struct iwch_pd *php; struct iwch_cq *schp; struct iwch_cq *rchp; struct iwch_create_qp_resp uresp; int wqsize, sqsize, rqsize; struct iwch_ucontext *ucontext; PDBG("%s ib_pd %p\n", __FUNCTION__, pd); if (attrs->qp_type != IB_QPT_RC) return ERR_PTR(-EINVAL); php = to_iwch_pd(pd); rhp = php->rhp; schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); if (!schp || !rchp) return ERR_PTR(-EINVAL); /* The RQT size must be # of entries + 1 rounded up to a power of two */ rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr); if (rqsize == attrs->cap.max_recv_wr) rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1); /* T3 doesn't support RQT depth < 16 */ if (rqsize < 16) rqsize = 16; if (rqsize > T3_MAX_RQ_SIZE) return ERR_PTR(-EINVAL); if (attrs->cap.max_inline_data > T3_MAX_INLINE) return ERR_PTR(-EINVAL); /* * NOTE: The SQ and total WQ sizes don't need to be * a power of two. However, all the code assumes * they are. EG: Q_FREECNT() and friends. */ sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); wqsize = roundup_pow_of_two(rqsize + sqsize);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -