📄 c2_provider.c
字号:
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */#include <linux/module.h>#include <linux/moduleparam.h>#include <linux/pci.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/inetdevice.h>#include <linux/delay.h>#include <linux/ethtool.h>#include <linux/mii.h>#include <linux/if_vlan.h>#include <linux/crc32.h>#include <linux/in.h>#include <linux/ip.h>#include <linux/tcp.h>#include <linux/init.h>#include <linux/dma-mapping.h>#include <linux/if_arp.h>#include <linux/vmalloc.h>#include <asm/io.h>#include <asm/irq.h>#include <asm/byteorder.h>#include <rdma/ib_smi.h>#include <rdma/ib_umem.h>#include <rdma/ib_user_verbs.h>#include "c2.h"#include "c2_provider.h"#include "c2_user.h"static int c2_query_device(struct ib_device *ibdev, struct ib_device_attr *props){ struct c2_dev *c2dev = to_c2dev(ibdev); pr_debug("%s:%u\n", __FUNCTION__, __LINE__); *props = c2dev->props; return 0;}static int c2_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props){ pr_debug("%s:%u\n", __FUNCTION__, __LINE__); props->max_mtu = IB_MTU_4096; props->lid = 0; props->lmc = 0; props->sm_lid = 0; props->sm_sl = 0; props->state = IB_PORT_ACTIVE; props->phys_state = 0; props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->qkey_viol_cntr = 0; props->active_width = 1; props->active_speed = 1; return 0;}static int c2_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props){ pr_debug("%s:%u\n", __FUNCTION__, __LINE__); return 0;}static int c2_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey){ pr_debug("%s:%u\n", __FUNCTION__, __LINE__); *pkey = 0; return 0;}static int c2_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid){ struct c2_dev *c2dev = to_c2dev(ibdev); pr_debug("%s:%u\n", __FUNCTION__, __LINE__); memset(&(gid->raw[0]), 0, sizeof(gid->raw)); memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6); return 0;}/* Allocate the user context data structure. This keeps track * of all objects associated with a particular user-mode client. */static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata){ struct c2_ucontext *context; pr_debug("%s:%u\n", __FUNCTION__, __LINE__); context = kmalloc(sizeof(*context), GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); return &context->ibucontext;}static int c2_dealloc_ucontext(struct ib_ucontext *context){ pr_debug("%s:%u\n", __FUNCTION__, __LINE__); kfree(context); return 0;}static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma){ pr_debug("%s:%u\n", __FUNCTION__, __LINE__); return -ENOSYS;}static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata){ struct c2_pd *pd; int err; pr_debug("%s:%u\n", __FUNCTION__, __LINE__); pd = kmalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); err = c2_pd_alloc(to_c2dev(ibdev), !context, pd); if (err) { kfree(pd); return ERR_PTR(err); } if (context) { if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) { c2_pd_free(to_c2dev(ibdev), pd); kfree(pd); return ERR_PTR(-EFAULT); } } return &pd->ibpd;}static int c2_dealloc_pd(struct ib_pd *pd){ pr_debug("%s:%u\n", __FUNCTION__, __LINE__); c2_pd_free(to_c2dev(pd->device), to_c2pd(pd)); kfree(pd); return 0;}static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr){ pr_debug("%s:%u\n", __FUNCTION__, __LINE__); return ERR_PTR(-ENOSYS);}static int c2_ah_destroy(struct ib_ah *ah){ pr_debug("%s:%u\n", __FUNCTION__, __LINE__); return -ENOSYS;}static void c2_add_ref(struct ib_qp *ibqp){ struct c2_qp *qp; BUG_ON(!ibqp); qp = to_c2qp(ibqp); atomic_inc(&qp->refcount);}static void c2_rem_ref(struct ib_qp *ibqp){ struct c2_qp *qp; BUG_ON(!ibqp); qp = to_c2qp(ibqp); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait);}struct ib_qp *c2_get_qp(struct ib_device *device, int qpn){ struct c2_dev* c2dev = to_c2dev(device); struct c2_qp *qp; qp = c2_find_qpn(c2dev, qpn); pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n", __FUNCTION__, qp, qpn, device, (qp?atomic_read(&qp->refcount):0)); return (qp?&qp->ibqp:NULL);}static struct ib_qp *c2_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata){ struct c2_qp *qp; int err; pr_debug("%s:%u\n", __FUNCTION__, __LINE__); switch (init_attr->qp_type) { case IB_QPT_RC: qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) { pr_debug("%s: Unable to allocate QP\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } spin_lock_init(&qp->lock); if (pd->uobject) { /* userspace specific */ } err = c2_alloc_qp(to_c2dev(pd->device), to_c2pd(pd), init_attr, qp); if (err && pd->uobject) { /* userspace specific */ } break; default: pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__, init_attr->qp_type); return ERR_PTR(-EINVAL); break; } if (err) { kfree(qp); return ERR_PTR(err); } return &qp->ibqp;}static int c2_destroy_qp(struct ib_qp *ib_qp){ struct c2_qp *qp = to_c2qp(ib_qp); pr_debug("%s:%u qp=%p,qp->state=%d\n", __FUNCTION__, __LINE__,ib_qp,qp->state); c2_free_qp(to_c2dev(ib_qp->device), qp); kfree(qp); return 0;}static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *context, struct ib_udata *udata){ struct c2_cq *cq; int err; cq = kmalloc(sizeof(*cq), GFP_KERNEL); if (!cq) { pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq); if (err) { pr_debug("%s: error initializing CQ\n", __FUNCTION__); kfree(cq); return ERR_PTR(err); } return &cq->ibcq;}static int c2_destroy_cq(struct ib_cq *ib_cq){ struct c2_cq *cq = to_c2cq(ib_cq); pr_debug("%s:%u\n", __FUNCTION__, __LINE__); c2_free_cq(to_c2dev(ib_cq->device), cq); kfree(cq); return 0;}static inline u32 c2_convert_access(int acc){ return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) | (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) | C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;}static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 * iova_start){ struct c2_mr *mr; u64 *page_list; u32 total_len; int err, i, j, k, page_shift, pbl_depth; pbl_depth = 0; total_len = 0; page_shift = PAGE_SHIFT; /* * If there is only 1 buffer we assume this could * be a map of all phy mem...use a 32k page_shift. */ if (num_phys_buf == 1) page_shift += 3; for (i = 0; i < num_phys_buf; i++) { if (buffer_list[i].addr & ~PAGE_MASK) { pr_debug("Unaligned Memory Buffer: 0x%x\n", (unsigned int) buffer_list[i].addr); return ERR_PTR(-EINVAL); } if (!buffer_list[i].size) { pr_debug("Invalid Buffer Size\n"); return ERR_PTR(-EINVAL); } total_len += buffer_list[i].size; pbl_depth += ALIGN(buffer_list[i].size, (1 << page_shift)) >> page_shift; } page_list = vmalloc(sizeof(u64) * pbl_depth); if (!page_list) { pr_debug("couldn't vmalloc page_list of size %zd\n", (sizeof(u64) * pbl_depth)); return ERR_PTR(-ENOMEM); } for (i = 0, j = 0; i < num_phys_buf; i++) { int naddrs; naddrs = ALIGN(buffer_list[i].size, (1 << page_shift)) >> page_shift; for (k = 0; k < naddrs; k++) page_list[j++] = (buffer_list[i].addr + (k << page_shift)); } mr = kmalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { vfree(page_list); return ERR_PTR(-ENOMEM); } mr->pd = to_c2pd(ib_pd); mr->umem = NULL; pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " "*iova_start %llx, first pa %llx, last pa %llx\n", __FUNCTION__, page_shift, pbl_depth, total_len, (unsigned long long) *iova_start, (unsigned long long) page_list[0], (unsigned long long) page_list[pbl_depth-1]); err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, (1 << page_shift), pbl_depth, total_len, 0, iova_start, c2_convert_access(acc), mr); vfree(page_list); if (err) { kfree(mr); return ERR_PTR(err); } return &mr->ibmr;}static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc){ struct ib_phys_buf bl; u64 kva = 0; pr_debug("%s:%u\n", __FUNCTION__, __LINE__); /* AMSO1100 limit */ bl.size = 0xffffffff; bl.addr = 0; return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);}static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata){ u64 *pages; u64 kva = 0; int shift, n, len; int i, j, k; int err = 0; struct ib_umem_chunk *chunk; struct c2_pd *c2pd = to_c2pd(pd); struct c2_mr *c2mr;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -