📄 ehca_mrmw.c
字号:
if ( !(*kpage) ) { ehca_gen_err("pgaddr=%lx " "chunk->page_list[i]=%lx " "i=%x next_hwpage=%lx", pgaddr, (u64)sg_dma_address( &chunk->page_list[i]), i, pginfo->next_hwpage); return -EFAULT; } (pginfo->hwpage_cnt)++; (pginfo->next_hwpage)++; kpage++; if (pginfo->next_hwpage % hwpages_per_kpage == 0) { (pginfo->kpage_cnt)++; (pginfo->u.usr.next_nmap)++; pginfo->next_hwpage = 0; i++; } j++; if (j >= number) break; } if ((pginfo->u.usr.next_nmap >= chunk->nmap) && (j >= number)) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; break; } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; } else if (j >= number) break; else prev_chunk = chunk; } pginfo->u.usr.next_chunk = list_prepare_entry(prev_chunk, (&(pginfo->u.usr.region->chunk_list)), list); return ret;}/* * check given pages for contiguous layout * last page addr is returned in prev_pgaddr for further check */static int ehca_check_kpages_per_ate(struct scatterlist *page_list, int start_idx, int end_idx, u64 *prev_pgaddr){ int t; for (t = start_idx; t <= end_idx; t++) { u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); if (pgaddr - PAGE_SIZE != *prev_pgaddr) { ehca_gen_err("uncontiguous page found pgaddr=%lx " "prev_pgaddr=%lx page_list_i=%x", pgaddr, *prev_pgaddr, t); return -EINVAL; } *prev_pgaddr = pgaddr; } return 0;}/* PAGE_SIZE < pginfo->hwpage_size */static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage){ int ret = 0; struct ib_umem_chunk *prev_chunk; struct ib_umem_chunk *chunk; u64 pgaddr, prev_pgaddr; u32 i = 0; u32 j = 0; int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE; int nr_kpages = kpages_per_hwpage; /* loop over desired chunk entries */ chunk = pginfo->u.usr.next_chunk; prev_chunk = pginfo->u.usr.next_chunk; list_for_each_entry_continue( chunk, (&(pginfo->u.usr.region->chunk_list)), list) { for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { if (nr_kpages == kpages_per_hwpage) { pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ); *kpage = phys_to_abs(pgaddr); if ( !(*kpage) ) { ehca_gen_err("pgaddr=%lx i=%x", pgaddr, i); ret = -EFAULT; return ret; } /* * The first page in a hwpage must be aligned; * the first MR page is exempt from this rule. */ if (pgaddr & (pginfo->hwpage_size - 1)) { if (pginfo->hwpage_cnt) { ehca_gen_err( "invalid alignment " "pgaddr=%lx i=%x " "mr_pgsize=%lx", pgaddr, i, pginfo->hwpage_size); ret = -EFAULT; return ret; } /* first MR page */ pginfo->kpage_cnt = (pgaddr & (pginfo->hwpage_size - 1)) >> PAGE_SHIFT; nr_kpages -= pginfo->kpage_cnt; *kpage = phys_to_abs( pgaddr & ~(pginfo->hwpage_size - 1)); } ehca_gen_dbg("kpage=%lx chunk_page=%lx " "value=%016lx", *kpage, pgaddr, *(u64 *)abs_to_virt( phys_to_abs(pgaddr))); prev_pgaddr = pgaddr; i++; pginfo->kpage_cnt++; pginfo->u.usr.next_nmap++; nr_kpages--; if (!nr_kpages) goto next_kpage; continue; } if (i + nr_kpages > chunk->nmap) { ret = ehca_check_kpages_per_ate( chunk->page_list, i, chunk->nmap - 1, &prev_pgaddr); if (ret) return ret; pginfo->kpage_cnt += chunk->nmap - i; pginfo->u.usr.next_nmap += chunk->nmap - i; nr_kpages -= chunk->nmap - i; break; } ret = ehca_check_kpages_per_ate(chunk->page_list, i, i + nr_kpages - 1, &prev_pgaddr); if (ret) return ret; i += nr_kpages; pginfo->kpage_cnt += nr_kpages; pginfo->u.usr.next_nmap += nr_kpages;next_kpage: nr_kpages = kpages_per_hwpage; (pginfo->hwpage_cnt)++; kpage++; j++; if (j >= number) break; } if ((pginfo->u.usr.next_nmap >= chunk->nmap) && (j >= number)) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; break; } else if (pginfo->u.usr.next_nmap >= chunk->nmap) { pginfo->u.usr.next_nmap = 0; prev_chunk = chunk; } else if (j >= number) break; else prev_chunk = chunk; } pginfo->u.usr.next_chunk = list_prepare_entry(prev_chunk, (&(pginfo->u.usr.region->chunk_list)), list); return ret;}int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage){ int ret = 0; struct ib_phys_buf *pbuf; u64 num_hw, offs_hw; u32 i = 0; /* loop over desired phys_buf_array entries */ while (i < number) { pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf; num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) + pbuf->size, pginfo->hwpage_size); offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size; while (pginfo->next_hwpage < offs_hw + num_hw) { /* sanity check */ if ((pginfo->kpage_cnt >= pginfo->num_kpages) || (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { ehca_gen_err("kpage_cnt >= num_kpages, " "kpage_cnt=%lx num_kpages=%lx " "hwpage_cnt=%lx " "num_hwpages=%lx i=%x", pginfo->kpage_cnt, pginfo->num_kpages, pginfo->hwpage_cnt, pginfo->num_hwpages, i); return -EFAULT; } *kpage = phys_to_abs( (pbuf->addr & ~(pginfo->hwpage_size - 1)) + (pginfo->next_hwpage * pginfo->hwpage_size)); if ( !(*kpage) && pbuf->addr ) { ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx " "next_hwpage=%lx", pbuf->addr, pbuf->size, pginfo->next_hwpage); return -EFAULT; } (pginfo->hwpage_cnt)++; (pginfo->next_hwpage)++; if (PAGE_SIZE >= pginfo->hwpage_size) { if (pginfo->next_hwpage % (PAGE_SIZE / pginfo->hwpage_size) == 0) (pginfo->kpage_cnt)++; } else pginfo->kpage_cnt += pginfo->hwpage_size / PAGE_SIZE; kpage++; i++; if (i >= number) break; } if (pginfo->next_hwpage >= offs_hw + num_hw) { (pginfo->u.phy.next_buf)++; pginfo->next_hwpage = 0; } } return ret;}int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage){ int ret = 0; u64 *fmrlist; u32 i; /* loop over desired page_list entries */ fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; for (i = 0; i < number; i++) { *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + pginfo->next_hwpage * pginfo->hwpage_size); if ( !(*kpage) ) { ehca_gen_err("*fmrlist=%lx fmrlist=%p " "next_listelem=%lx next_hwpage=%lx", *fmrlist, fmrlist, pginfo->u.fmr.next_listelem, pginfo->next_hwpage); return -EFAULT; } (pginfo->hwpage_cnt)++; if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) { if (pginfo->next_hwpage % (pginfo->u.fmr.fmr_pgsize / pginfo->hwpage_size) == 0) { (pginfo->kpage_cnt)++; (pginfo->u.fmr.next_listelem)++; fmrlist++; pginfo->next_hwpage = 0; } else (pginfo->next_hwpage)++; } else { unsigned int cnt_per_hwpage = pginfo->hwpage_size / pginfo->u.fmr.fmr_pgsize; unsigned int j; u64 prev = *kpage; /* check if adrs are contiguous */ for (j = 1; j < cnt_per_hwpage; j++) { u64 p = phys_to_abs(fmrlist[j] & ~(pginfo->hwpage_size - 1)); if (prev + pginfo->u.fmr.fmr_pgsize != p) { ehca_gen_err("uncontiguous fmr pages " "found prev=%lx p=%lx " "idx=%x", prev, p, i + j); return -EINVAL; } prev = p; } pginfo->kpage_cnt += cnt_per_hwpage; pginfo->u.fmr.next_listelem += cnt_per_hwpage; fmrlist += cnt_per_hwpage; } kpage++; } return ret;}/* setup page buffer from page info */int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo, u32 number, u64 *kpage){ int ret; switch (pginfo->type) { case EHCA_MR_PGI_PHYS: ret = ehca_set_pagebuf_phys(pginfo, number, kpage); break; case EHCA_MR_PGI_USER: ret = PAGE_SIZE >= pginfo->hwpage_size ? ehca_set_pagebuf_user1(pginfo, number, kpage) : ehca_set_pagebuf_user2(pginfo, number, kpage); break; case EHCA_MR_PGI_FMR: ret = ehca_set_pagebuf_fmr(pginfo, number, kpage); break; default: ehca_gen_err("bad pginfo->type=%x", pginfo->type); ret = -EFAULT; break; } return ret;} /* end ehca_set_pagebuf() *//*----------------------------------------------------------------------*//* * check MR if it is a max-MR, i.e. uses whole memory * in case it's a max-MR 1 is returned, else 0 */int ehca_mr_is_maxmr(u64 size, u64 *iova_start){ /* a MR is treated as max-MR only if it fits following: */ if ((size == ((u64)high_memory - PAGE_OFFSET)) && (iova_start == (void *)KERNELBASE)) { ehca_gen_dbg("this is a max-MR"); return 1; } else return 0;} /* end ehca_mr_is_maxmr() *//*----------------------------------------------------------------------*//* map access control for MR/MW. This routine is used for MR and MW. */void ehca_mrmw_map_acl(int ib_acl, u32 *hipz_acl){ *hipz_acl = 0; if (ib_acl & IB_ACCESS_REMOTE_READ) *hipz_acl |= HIPZ_ACCESSCTRL_R_READ; if (ib_acl & IB_ACCESS_REMOTE_WRITE) *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE; if (ib_acl & IB_ACCESS_REMOTE_ATOMIC) *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC; if (ib_acl & IB_ACCESS_LOCAL_WRITE) *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE; if (ib_acl & IB_ACCESS_MW_BIND) *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;} /* end ehca_mrmw_map_acl() *//*----------------------------------------------------------------------*//* sets page size in hipz access control for MR/MW. */void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/{ *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);} /* end ehca_mrmw_set_pgsize_hipz_acl() *//*----------------------------------------------------------------------*//* * reverse map access control for MR/MW. * This routine is used for MR and MW. */void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, int *ib_acl) /*OUT*/{ *ib_acl = 0; if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ) *ib_acl |= IB_ACCESS_REMOTE_READ; if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE) *ib_acl |= IB_ACCESS_REMOTE_WRITE; if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC) *ib_acl |= IB_ACCESS_REMOTE_ATOMIC; if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE) *ib_acl |= IB_ACCESS_LOCAL_WRITE; if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND) *ib_acl |= IB_ACCESS_MW_BIND;} /* end ehca_mrmw_reverse_map_acl() *//*----------------------------------------------------------------------*//* * MR destructor and constructor * used in Reregister MR verb, sets all fields in ehca_mr_t to 0, * except struct ib_mr and spinlock */void ehca_mr_deletenew(struct ehca_mr *mr){ mr->flags = 0; mr->num_kpages = 0; mr->num_hwpages = 0; mr->acl = 0; mr->start = NULL; mr->fmr_page_size = 0; mr->fmr_max_pages = 0; mr->fmr_max_maps = 0; mr->fmr_map_cnt = 0; memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); memset(&mr->galpas, 0, sizeof(mr->galpas));} /* end ehca_mr_deletenew() */int ehca_init_mrmw_cache(void){ mr_cache = kmem_cache_create("ehca_cache_mr", sizeof(struct ehca_mr), 0, SLAB_HWCACHE_ALIGN, NULL); if (!mr_cache) return -ENOMEM; mw_cache = kmem_cache_create("ehca_cache_mw", sizeof(struct ehca_mw), 0, SLAB_HWCACHE_ALIGN, NULL); if (!mw_cache) { kmem_cache_destroy(mr_cache); mr_cache = NULL; return -ENOMEM; } return 0;}void ehca_cleanup_mrmw_cache(void){ if (mr_cache) kmem_cache_destroy(mr_cache); if (mw_cache) kmem_cache_destroy(mw_cache);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -