📄 ehca_mrmw.c
字号:
e_mr->ipz_mr_handle.handle, e_mr->ib.ib_mr.lkey); ret = ehca2ib_return_code(h_ret); goto ehca_rereg_mr_exit0; } /* clean ehca_mr_t, without changing struct ib_mr and lock */ save_mr = *e_mr; ehca_mr_deletenew(e_mr); /* set some MR values */ e_mr->flags = save_mr.flags; e_mr->hwpage_size = save_mr.hwpage_size; e_mr->fmr_page_size = save_mr.fmr_page_size; e_mr->fmr_max_pages = save_mr.fmr_max_pages; e_mr->fmr_max_maps = save_mr.fmr_max_maps; e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, e_pd, pginfo, lkey, rkey); if (ret) { u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; memcpy(&e_mr->flags, &(save_mr.flags), sizeof(struct ehca_mr) - offset); goto ehca_rereg_mr_exit0; } }ehca_rereg_mr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, rereg_1_hcall, rereg_3_hcall); return ret;} /* end ehca_rereg_mr() *//*----------------------------------------------------------------------*/int ehca_unmap_one_fmr(struct ehca_shca *shca, struct ehca_mr *e_fmr){ int ret = 0; u64 h_ret; struct ehca_pd *e_pd = container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); struct ehca_mr save_fmr; u32 tmp_lkey, tmp_rkey; struct ehca_mr_pginfo pginfo; struct ehca_mr_hipzout_parms hipzout; struct ehca_mr save_mr; if (e_fmr->fmr_max_pages <= MAX_RPAGES) { /* * note: after using rereg hcall with len=0, * rereg hcall must be used again for registering pages */ h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0, 0, 0, e_pd->fw_pd, 0, &hipzout); if (h_ret == H_SUCCESS) { /* successful reregistration */ e_fmr->start = NULL; e_fmr->size = 0; tmp_lkey = hipzout.lkey; tmp_rkey = hipzout.rkey; return 0; } /* * should not happen, because length checked above, * FMRs are not shared and no MW bound to FMRs */ ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " "(Rereg1), h_ret=%li e_fmr=%p hca_hndl=%lx " "mr_hndl=%lx lkey=%x lkey_out=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, e_fmr->ib.ib_fmr.lkey, hipzout.lkey); /* try free and rereg */ } /* first free old FMR */ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_free_mr failed, " "h_ret=%li e_fmr=%p hca_hndl=%lx mr_hndl=%lx " "lkey=%x", h_ret, e_fmr, shca->ipz_hca_handle.handle, e_fmr->ipz_mr_handle.handle, e_fmr->ib.ib_fmr.lkey); ret = ehca2ib_return_code(h_ret); goto ehca_unmap_one_fmr_exit0; } /* clean ehca_mr_t, without changing lock */ save_fmr = *e_fmr; ehca_mr_deletenew(e_fmr); /* set some MR values */ e_fmr->flags = save_fmr.flags; e_fmr->hwpage_size = save_fmr.hwpage_size; e_fmr->fmr_page_size = save_fmr.fmr_page_size; e_fmr->fmr_max_pages = save_fmr.fmr_max_pages; e_fmr->fmr_max_maps = save_fmr.fmr_max_maps; e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; e_fmr->acl = save_fmr.acl; memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_FMR; ret = ehca_reg_mr(shca, e_fmr, NULL, (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey); if (ret) { u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; memcpy(&e_fmr->flags, &(save_mr.flags), sizeof(struct ehca_mr) - offset); }ehca_unmap_one_fmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x " "fmr_max_pages=%x", ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages); return ret;} /* end ehca_unmap_one_fmr() *//*----------------------------------------------------------------------*/int ehca_reg_smr(struct ehca_shca *shca, struct ehca_mr *e_origmr, struct ehca_mr *e_newmr, u64 *iova_start, int acl, struct ehca_pd *e_pd, u32 *lkey, /*OUT*/ u32 *rkey) /*OUT*/{ int ret = 0; u64 h_ret; u32 hipz_acl; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl); h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, (u64)iova_start, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, shca->ipz_hca_handle.handle, e_origmr->ipz_mr_handle.handle, e_origmr->ib.ib_mr.lkey); ret = ehca2ib_return_code(h_ret); goto ehca_reg_smr_exit0; } /* successful registration */ e_newmr->num_kpages = e_origmr->num_kpages; e_newmr->num_hwpages = e_origmr->num_hwpages; e_newmr->hwpage_size = e_origmr->hwpage_size; e_newmr->start = iova_start; e_newmr->size = e_origmr->size; e_newmr->acl = acl; e_newmr->ipz_mr_handle = hipzout.handle; *lkey = hipzout.lkey; *rkey = hipzout.rkey; return 0;ehca_reg_smr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p " "e_newmr=%p iova_start=%p acl=%x e_pd=%p", ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd); return ret;} /* end ehca_reg_smr() *//*----------------------------------------------------------------------*//* register internal max-MR to internal SHCA */int ehca_reg_internal_maxmr( struct ehca_shca *shca, struct ehca_pd *e_pd, struct ehca_mr **e_maxmr) /*OUT*/{ int ret; struct ehca_mr *e_mr; u64 *iova_start; u64 size_maxmr; struct ehca_mr_pginfo pginfo; struct ib_phys_buf ib_pbuf; u32 num_kpages; u32 num_hwpages; u64 hw_pgsize; e_mr = ehca_mr_new(); if (!e_mr) { ehca_err(&shca->ib_device, "out of memory"); ret = -ENOMEM; goto ehca_reg_internal_maxmr_exit0; } e_mr->flags |= EHCA_MR_FLAG_MAXMR; /* register internal max-MR on HCA */ size_maxmr = (u64)high_memory - PAGE_OFFSET; iova_start = (u64 *)KERNELBASE; ib_pbuf.addr = 0; ib_pbuf.size = size_maxmr; num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, PAGE_SIZE); hw_pgsize = ehca_get_max_hwpage_size(shca); num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr, hw_pgsize); memset(&pginfo, 0, sizeof(pginfo)); pginfo.type = EHCA_MR_PGI_PHYS; pginfo.num_kpages = num_kpages; pginfo.num_hwpages = num_hwpages; pginfo.hwpage_size = hw_pgsize; pginfo.u.phy.num_phys_buf = 1; pginfo.u.phy.phys_buf_array = &ib_pbuf; ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey); if (ret) { ehca_err(&shca->ib_device, "reg of internal max MR failed, " "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x " "num_hwpages=%x", e_mr, iova_start, size_maxmr, num_kpages, num_hwpages); goto ehca_reg_internal_maxmr_exit1; } /* successful registration of all pages */ e_mr->ib.ib_mr.device = e_pd->ib_pd.device; e_mr->ib.ib_mr.pd = &e_pd->ib_pd; e_mr->ib.ib_mr.uobject = NULL; atomic_inc(&(e_pd->ib_pd.usecnt)); atomic_set(&(e_mr->ib.ib_mr.usecnt), 0); *e_maxmr = e_mr; return 0;ehca_reg_internal_maxmr_exit1: ehca_mr_delete(e_mr);ehca_reg_internal_maxmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p", ret, shca, e_pd, e_maxmr); return ret;} /* end ehca_reg_internal_maxmr() *//*----------------------------------------------------------------------*/int ehca_reg_maxmr(struct ehca_shca *shca, struct ehca_mr *e_newmr, u64 *iova_start, int acl, struct ehca_pd *e_pd, u32 *lkey, u32 *rkey){ u64 h_ret; struct ehca_mr *e_origmr = shca->maxmr; u32 hipz_acl; struct ehca_mr_hipzout_parms hipzout; ehca_mrmw_map_acl(acl, &hipz_acl); ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl); h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, (u64)iova_start, hipz_acl, e_pd->fw_pd, &hipzout); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", h_ret, e_origmr, shca->ipz_hca_handle.handle, e_origmr->ipz_mr_handle.handle, e_origmr->ib.ib_mr.lkey); return ehca2ib_return_code(h_ret); } /* successful registration */ e_newmr->num_kpages = e_origmr->num_kpages; e_newmr->num_hwpages = e_origmr->num_hwpages; e_newmr->hwpage_size = e_origmr->hwpage_size; e_newmr->start = iova_start; e_newmr->size = e_origmr->size; e_newmr->acl = acl; e_newmr->ipz_mr_handle = hipzout.handle; *lkey = hipzout.lkey; *rkey = hipzout.rkey; return 0;} /* end ehca_reg_maxmr() *//*----------------------------------------------------------------------*/int ehca_dereg_internal_maxmr(struct ehca_shca *shca){ int ret; struct ehca_mr *e_maxmr; struct ib_pd *ib_pd; if (!shca->maxmr) { ehca_err(&shca->ib_device, "bad call, shca=%p", shca); ret = -EINVAL; goto ehca_dereg_internal_maxmr_exit0; } e_maxmr = shca->maxmr; ib_pd = e_maxmr->ib.ib_mr.pd; shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */ ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr); if (ret) { ehca_err(&shca->ib_device, "dereg internal max-MR failed, " "ret=%i e_maxmr=%p shca=%p lkey=%x", ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey); shca->maxmr = e_maxmr; goto ehca_dereg_internal_maxmr_exit0; } atomic_dec(&ib_pd->usecnt);ehca_dereg_internal_maxmr_exit0: if (ret) ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p", ret, shca, shca->maxmr); return ret;} /* end ehca_dereg_internal_maxmr() *//*----------------------------------------------------------------------*//* * check physical buffer array of MR verbs for validness and * calculates MR size */int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array, int num_phys_buf, u64 *iova_start, u64 *size){ struct ib_phys_buf *pbuf = phys_buf_array; u64 size_count = 0; u32 i; if (num_phys_buf == 0) { ehca_gen_err("bad phys buf array len, num_phys_buf=0"); return -EINVAL; } /* check first buffer */ if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { ehca_gen_err("iova_start/addr mismatch, iova_start=%p " "pbuf->addr=%lx pbuf->size=%lx", iova_start, pbuf->addr, pbuf->size); return -EINVAL; } if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && (num_phys_buf > 1)) { ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx " "pbuf->size=%lx", pbuf->addr, pbuf->size); return -EINVAL; } for (i = 0; i < num_phys_buf; i++) { if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { ehca_gen_err("bad address, i=%x pbuf->addr=%lx " "pbuf->size=%lx", i, pbuf->addr, pbuf->size); return -EINVAL; } if (((i > 0) && /* not 1st */ (i < (num_phys_buf - 1)) && /* not last */ (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { ehca_gen_err("bad size, i=%x pbuf->size=%lx", i, pbuf->size); return -EINVAL; } size_count += pbuf->size; pbuf++; } *size = size_count; return 0;} /* end ehca_mr_chk_buf_and_calc_size() *//*----------------------------------------------------------------------*//* check page list of map FMR verb for validness */int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, u64 *page_list, int list_len){ u32 i; u64 *page; if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) { ehca_gen_err("bad list_len, list_len=%x " "e_fmr->fmr_max_pages=%x fmr=%p", list_len, e_fmr->fmr_max_pages, e_fmr); return -EINVAL; } /* each page must be aligned */ page = page_list; for (i = 0; i < list_len; i++) { if (*page % e_fmr->fmr_page_size) { ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p " "fmr_page_size=%x", i, *page, page, e_fmr, e_fmr->fmr_page_size); return -EINVAL; } page++; } return 0;} /* end ehca_fmr_check_page_list() *//*----------------------------------------------------------------------*//* PAGE_SIZE >= pginfo->hwpage_size */static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage){ int ret = 0; struct ib_umem_chunk *prev_chunk; struct ib_umem_chunk *chunk; u64 pgaddr; u32 i = 0; u32 j = 0; int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size; /* loop over desired chunk entries */ chunk = pginfo->u.usr.next_chunk; prev_chunk = pginfo->u.usr.next_chunk; list_for_each_entry_continue( chunk, (&(pginfo->u.usr.region->chunk_list)), list) { for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { pgaddr = page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ; *kpage = phys_to_abs(pgaddr + (pginfo->next_hwpage * pginfo->hwpage_size));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -