⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ehca_mrmw.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 5 页
字号:
			 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);		ret = -EINVAL;		goto rereg_phys_mr_exit0;	}	if (mr_rereg_mask & IB_MR_REREG_PD) {		if (!pd) {			ehca_err(mr->device, "rereg with bad pd, pd=%p "				 "mr_rereg_mask=%x", pd, mr_rereg_mask);			ret = -EINVAL;			goto rereg_phys_mr_exit0;		}	}	if ((mr_rereg_mask &	     ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||	    (mr_rereg_mask == 0)) {		ret = -EINVAL;		goto rereg_phys_mr_exit0;	}	/* check other parameters */	if (e_mr == shca->maxmr) {		/* should be impossible, however reject to be sure */		ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "			 "shca->maxmr=%p mr->lkey=%x",			 mr, shca->maxmr, mr->lkey);		ret = -EINVAL;		goto rereg_phys_mr_exit0;	}	if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */		if (e_mr->flags & EHCA_MR_FLAG_FMR) {			ehca_err(mr->device, "not supported for FMR, mr=%p "				 "flags=%x", mr, e_mr->flags);			ret = -EINVAL;			goto rereg_phys_mr_exit0;		}		if (!phys_buf_array || num_phys_buf <= 0) {			ehca_err(mr->device, "bad input values mr_rereg_mask=%x"				 " phys_buf_array=%p num_phys_buf=%x",				 mr_rereg_mask, phys_buf_array, num_phys_buf);			ret = -EINVAL;			goto rereg_phys_mr_exit0;		}	}	if ((mr_rereg_mask & IB_MR_REREG_ACCESS) &&	/* change ACL */	    (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&	      !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||	     ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&	      !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {		/*		 * Remote Write Access requires Local Write Access		 * Remote Atomic Access requires Local Write Access		 */		ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "			 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);		ret = -EINVAL;		goto rereg_phys_mr_exit0;	}	/* set requested values dependent on rereg request */	spin_lock_irqsave(&e_mr->mrlock, sl_flags);	new_start = e_mr->start;	new_size = e_mr->size;	new_acl = e_mr->acl;	new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);	if (mr_rereg_mask & IB_MR_REREG_TRANS) {		u64 hw_pgsize = ehca_get_max_hwpage_size(shca);		new_start = iova_start;	/* change address */		/* check physical buffer list and calculate size */		ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,						    num_phys_buf, iova_start,						    &new_size);		if (ret)			goto rereg_phys_mr_exit1;		if ((new_size == 0) ||		    (((u64)iova_start + new_size) < (u64)iova_start)) {			ehca_err(mr->device, "bad input values: new_size=%lx "				 "iova_start=%p", new_size, iova_start);			ret = -EINVAL;			goto rereg_phys_mr_exit1;		}		num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +					new_size, PAGE_SIZE);		num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +					 new_size, hw_pgsize);		memset(&pginfo, 0, sizeof(pginfo));		pginfo.type = EHCA_MR_PGI_PHYS;		pginfo.num_kpages = num_kpages;		pginfo.hwpage_size = hw_pgsize;		pginfo.num_hwpages = num_hwpages;		pginfo.u.phy.num_phys_buf = num_phys_buf;		pginfo.u.phy.phys_buf_array = phys_buf_array;		pginfo.next_hwpage =			((u64)iova_start & ~PAGE_MASK) / hw_pgsize;	}	if (mr_rereg_mask & IB_MR_REREG_ACCESS)		new_acl = mr_access_flags;	if (mr_rereg_mask & IB_MR_REREG_PD)		new_pd = container_of(pd, struct ehca_pd, ib_pd);	ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,			    new_pd, &pginfo, &tmp_lkey, &tmp_rkey);	if (ret)		goto rereg_phys_mr_exit1;	/* successful reregistration */	if (mr_rereg_mask & IB_MR_REREG_PD)		mr->pd = pd;	mr->lkey = tmp_lkey;	mr->rkey = tmp_rkey;rereg_phys_mr_exit1:	spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);rereg_phys_mr_exit0:	if (ret)		ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p "			 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "			 "iova_start=%p",			 ret, mr, mr_rereg_mask, pd, phys_buf_array,			 num_phys_buf, mr_access_flags, iova_start);	return ret;} /* end ehca_rereg_phys_mr() *//*----------------------------------------------------------------------*/int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr){	int ret = 0;	u64 h_ret;	struct ehca_shca *shca =		container_of(mr->device, struct ehca_shca, ib_device);	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);	u32 cur_pid = current->tgid;	unsigned long sl_flags;	struct ehca_mr_hipzout_parms hipzout;	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&	    (my_pd->ownpid != cur_pid)) {		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",			 cur_pid, my_pd->ownpid);		ret = -EINVAL;		goto query_mr_exit0;	}	if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {		ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "			 "e_mr->flags=%x", mr, e_mr, e_mr->flags);		ret = -EINVAL;		goto query_mr_exit0;	}	memset(mr_attr, 0, sizeof(struct ib_mr_attr));	spin_lock_irqsave(&e_mr->mrlock, sl_flags);	h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);	if (h_ret != H_SUCCESS) {		ehca_err(mr->device, "hipz_mr_query failed, h_ret=%li mr=%p "			 "hca_hndl=%lx mr_hndl=%lx lkey=%x",			 h_ret, mr, shca->ipz_hca_handle.handle,			 e_mr->ipz_mr_handle.handle, mr->lkey);		ret = ehca2ib_return_code(h_ret);		goto query_mr_exit1;	}	mr_attr->pd = mr->pd;	mr_attr->device_virt_addr = hipzout.vaddr;	mr_attr->size = hipzout.len;	mr_attr->lkey = hipzout.lkey;	mr_attr->rkey = hipzout.rkey;	ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);query_mr_exit1:	spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);query_mr_exit0:	if (ret)		ehca_err(mr->device, "ret=%i mr=%p mr_attr=%p",			 ret, mr, mr_attr);	return ret;} /* end ehca_query_mr() *//*----------------------------------------------------------------------*/int ehca_dereg_mr(struct ib_mr *mr){	int ret = 0;	u64 h_ret;	struct ehca_shca *shca =		container_of(mr->device, struct ehca_shca, ib_device);	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);	u32 cur_pid = current->tgid;	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&	    (my_pd->ownpid != cur_pid)) {		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",			 cur_pid, my_pd->ownpid);		ret = -EINVAL;		goto dereg_mr_exit0;	}	if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {		ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "			 "e_mr->flags=%x", mr, e_mr, e_mr->flags);		ret = -EINVAL;		goto dereg_mr_exit0;	} else if (e_mr == shca->maxmr) {		/* should be impossible, however reject to be sure */		ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "			 "shca->maxmr=%p mr->lkey=%x",			 mr, shca->maxmr, mr->lkey);		ret = -EINVAL;		goto dereg_mr_exit0;	}	/* TODO: BUSY: MR still has bound window(s) */	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);	if (h_ret != H_SUCCESS) {		ehca_err(mr->device, "hipz_free_mr failed, h_ret=%li shca=%p "			 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",			 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,			 e_mr->ipz_mr_handle.handle, mr->lkey);		ret = ehca2ib_return_code(h_ret);		goto dereg_mr_exit0;	}	if (e_mr->umem)		ib_umem_release(e_mr->umem);	/* successful deregistration */	ehca_mr_delete(e_mr);dereg_mr_exit0:	if (ret)		ehca_err(mr->device, "ret=%i mr=%p", ret, mr);	return ret;} /* end ehca_dereg_mr() *//*----------------------------------------------------------------------*/struct ib_mw *ehca_alloc_mw(struct ib_pd *pd){	struct ib_mw *ib_mw;	u64 h_ret;	struct ehca_mw *e_mw;	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);	struct ehca_shca *shca =		container_of(pd->device, struct ehca_shca, ib_device);	struct ehca_mw_hipzout_parms hipzout;	e_mw = ehca_mw_new();	if (!e_mw) {		ib_mw = ERR_PTR(-ENOMEM);		goto alloc_mw_exit0;	}	h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,					 e_pd->fw_pd, &hipzout);	if (h_ret != H_SUCCESS) {		ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%li "			 "shca=%p hca_hndl=%lx mw=%p",			 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);		ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));		goto alloc_mw_exit1;	}	/* successful MW allocation */	e_mw->ipz_mw_handle = hipzout.handle;	e_mw->ib_mw.rkey    = hipzout.rkey;	return &e_mw->ib_mw;alloc_mw_exit1:	ehca_mw_delete(e_mw);alloc_mw_exit0:	if (IS_ERR(ib_mw))		ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);	return ib_mw;} /* end ehca_alloc_mw() *//*----------------------------------------------------------------------*/int ehca_bind_mw(struct ib_qp *qp,		 struct ib_mw *mw,		 struct ib_mw_bind *mw_bind){	/* TODO: not supported up to now */	ehca_gen_err("bind MW currently not supported by HCAD");	return -EPERM;} /* end ehca_bind_mw() *//*----------------------------------------------------------------------*/int ehca_dealloc_mw(struct ib_mw *mw){	u64 h_ret;	struct ehca_shca *shca =		container_of(mw->device, struct ehca_shca, ib_device);	struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);	h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);	if (h_ret != H_SUCCESS) {		ehca_err(mw->device, "hipz_free_mw failed, h_ret=%li shca=%p "			 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",			 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,			 e_mw->ipz_mw_handle.handle);		return ehca2ib_return_code(h_ret);	}	/* successful deallocation */	ehca_mw_delete(e_mw);	return 0;} /* end ehca_dealloc_mw() *//*----------------------------------------------------------------------*/struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,			      int mr_access_flags,			      struct ib_fmr_attr *fmr_attr){	struct ib_fmr *ib_fmr;	struct ehca_shca *shca =		container_of(pd->device, struct ehca_shca, ib_device);	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);	struct ehca_mr *e_fmr;	int ret;	u32 tmp_lkey, tmp_rkey;	struct ehca_mr_pginfo pginfo;	u64 hw_pgsize;	/* check other parameters */	if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||	    ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {		/*		 * Remote Write Access requires Local Write Access		 * Remote Atomic Access requires Local Write Access		 */		ehca_err(pd->device, "bad input values: mr_access_flags=%x",			 mr_access_flags);		ib_fmr = ERR_PTR(-EINVAL);		goto alloc_fmr_exit0;	}	if (mr_access_flags & IB_ACCESS_MW_BIND) {		ehca_err(pd->device, "bad input values: mr_access_flags=%x",			 mr_access_flags);		ib_fmr = ERR_PTR(-EINVAL);		goto alloc_fmr_exit0;	}	if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {		ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "			 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",			 fmr_attr->max_pages, fmr_attr->max_maps,			 fmr_attr->page_shift);		ib_fmr = ERR_PTR(-EINVAL);		goto alloc_fmr_exit0;	}	hw_pgsize = 1 << fmr_attr->page_shift;	if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {		ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",			 fmr_attr->page_shift);		ib_fmr = ERR_PTR(-EINVAL);		goto alloc_fmr_exit0;	}	e_fmr = ehca_mr_new();	if (!e_fmr) {		ib_fmr = ERR_PTR(-ENOMEM);		goto alloc_fmr_exit0;	}	e_fmr->flags |= EHCA_MR_FLAG_FMR;	/* register MR on HCA */	memset(&pginfo, 0, sizeof(pginfo));	pginfo.hwpage_size = hw_pgsize;	/*	 * pginfo.num_hwpages==0, ie register_rpages() will not be called	 * but deferred to map_phys_fmr()	 */	ret = ehca_reg_mr(shca, e_fmr, NULL,			  fmr_attr->max_pages * (1 << fmr_attr->page_shift),			  mr_access_flags, e_pd, &pginfo,			  &tmp_lkey, &tmp_rkey);	if (ret) {		ib_fmr = ERR_PTR(ret);		goto alloc_fmr_exit1;	}	/* successful */	e_fmr->hwpage_size = hw_pgsize;	e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;	e_fmr->fmr_max_pages = fmr_attr->max_pages;	e_fmr->fmr_max_maps = fmr_attr->max_maps;	e_fmr->fmr_map_cnt = 0;	return &e_fmr->ib.ib_fmr;alloc_fmr_exit1:	ehca_mr_delete(e_fmr);alloc_fmr_exit0:	return ib_fmr;} /* end ehca_alloc_fmr() *//*----------------------------------------------------------------------*/int ehca_map_phys_fmr(struct ib_fmr *fmr,		      u64 *page_list,		      int list_len,		      u64 iova){	int ret;	struct ehca_shca *shca =		container_of(fmr->device, struct ehca_shca, ib_device);	struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);	struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);	struct ehca_mr_pginfo pginfo;	u32 tmp_lkey, tmp_rkey;	if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {		ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",			 e_fmr, e_fmr->flags);		ret = -EINVAL;		goto map_phys_fmr_exit0;	}	ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);	if (ret)		goto map_phys_fmr_exit0;	if (iova % e_fmr->fmr_page_size) {		/* only whole-numbered pages */		ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",			 iova, e_fmr->fmr_page_size);		ret = -EINVAL;		goto map_phys_fmr_exit0;	}	if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {		/* HCAD does not limit the maps, however trace this anyway */		ehca_info(fmr->device, "map limit exceeded, fmr=%p "			  "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",			  fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);	}	memset(&pginfo, 0, sizeof(pginfo));	pginfo.type = EHCA_MR_PGI_FMR;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -