📄 iwch_provider.c
字号:
/* * Copyright (c) 2006 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/module.h>#include <linux/moduleparam.h>#include <linux/device.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/delay.h>#include <linux/errno.h>#include <linux/list.h>#include <linux/spinlock.h>#include <linux/ethtool.h>#include <asm/io.h>#include <asm/irq.h>#include <asm/byteorder.h>#include <rdma/iw_cm.h>#include <rdma/ib_verbs.h>#include <rdma/ib_smi.h>#include <rdma/ib_umem.h>#include <rdma/ib_user_verbs.h>#include "cxio_hal.h"#include "iwch.h"#include "iwch_provider.h"#include "iwch_cm.h"#include "iwch_user.h"static int iwch_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props){ return -ENOSYS;}static struct ib_ah *iwch_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr){ return ERR_PTR(-ENOSYS);}static int iwch_ah_destroy(struct ib_ah *ah){ return -ENOSYS;}static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid){ return -ENOSYS;}static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid){ return -ENOSYS;}static int iwch_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad){ return -ENOSYS;}static int iwch_dealloc_ucontext(struct ib_ucontext *context){ struct iwch_dev *rhp = to_iwch_dev(context->device); struct iwch_ucontext *ucontext = to_iwch_ucontext(context); struct iwch_mm_entry *mm, *tmp; PDBG("%s context %p\n", __FUNCTION__, context); list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) kfree(mm); cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); kfree(ucontext); return 0;}static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata){ struct iwch_ucontext *context; struct iwch_dev *rhp = to_iwch_dev(ibdev); PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); cxio_init_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); return &context->ibucontext;}static int iwch_destroy_cq(struct ib_cq *ib_cq){ struct iwch_cq *chp; PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq); chp = to_iwch_cq(ib_cq); remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); atomic_dec(&chp->refcnt); wait_event(chp->wait, !atomic_read(&chp->refcnt)); cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); kfree(chp); return 0;}static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_ucontext *ib_context, struct ib_udata *udata){ struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; struct iwch_create_cq_req ureq; struct iwch_ucontext *ucontext = NULL; PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries); rhp = to_iwch_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) return ERR_PTR(-ENOMEM); if (ib_context) { ucontext = to_iwch_ucontext(ib_context); if (!t3a_device(rhp)) { if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { kfree(chp); return ERR_PTR(-EFAULT); } chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; } } if (t3a_device(rhp)) { /* * T3A: Add some fluff to handle extra CQEs inserted * for various errors. * Additional CQE possibilities: * TERMINATE, * incoming RDMA WRITE Failures * incoming RDMA READ REQUEST FAILUREs * NOTE: We cannot ensure the CQ won't overflow. */ entries += 16; } entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); if (cxio_create_cq(&rhp->rdev, &chp->cq)) { kfree(chp); return ERR_PTR(-ENOMEM); } chp->rhp = rhp; chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ucontext) { struct iwch_mm_entry *mm; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) { iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-ENOMEM); } uresp.cqid = chp->cq.cqid; uresp.size_log2 = chp->cq.size_log2; spin_lock(&ucontext->mmap_lock); uresp.key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { kfree(mm); iwch_destroy_cq(&chp->ibcq); return ERR_PTR(-EFAULT); } mm->key = uresp.key; mm->addr = virt_to_phys(chp->cq.queue); mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * sizeof (struct t3_cqe)); insert_mmap(ucontext, mm); } PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", chp->cq.cqid, chp, (1 << chp->cq.size_log2), (unsigned long long) chp->cq.dma_addr); return &chp->ibcq;}static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata){#ifdef notyet struct iwch_cq *chp = to_iwch_cq(cq); struct t3_cq oldcq, newcq; int ret; PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe); /* We don't downsize... */ if (cqe <= cq->cqe) return 0; /* create new t3_cq with new size */ cqe = roundup_pow_of_two(cqe+1); newcq.size_log2 = ilog2(cqe); /* Dont allow resize to less than the current wce count */ if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) { return -ENOMEM; } /* Quiesce all QPs using this CQ */ ret = iwch_quiesce_qps(chp); if (ret) { return ret; } ret = cxio_create_cq(&chp->rhp->rdev, &newcq); if (ret) { return ret; } /* copy CQEs */ memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) * sizeof(struct t3_cqe)); /* old iwch_qp gets new t3_cq but keeps old cqid */ oldcq = chp->cq; chp->cq = newcq; chp->cq.cqid = oldcq.cqid; /* resize new t3_cq to update the HW context */ ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq); if (ret) { chp->cq = oldcq; return ret; } chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; /* destroy old t3_cq */ oldcq.cqid = newcq.cqid; ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq); if (ret) { printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n", __FUNCTION__, ret); } /* add user hooks here */ /* resume qps */ ret = iwch_resume_qps(chp); return ret;#else return -ENOSYS;#endif}static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags){ struct iwch_dev *rhp; struct iwch_cq *chp; enum t3_cq_opcode cq_op; int err; unsigned long flag; u32 rptr; chp = to_iwch_cq(ibcq); rhp = chp->rhp; if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) cq_op = CQ_ARM_SE; else cq_op = CQ_ARM_AN; if (chp->user_rptr_addr) { if (get_user(rptr, chp->user_rptr_addr)) return -EFAULT; spin_lock_irqsave(&chp->lock, flag); chp->cq.rptr = rptr; } else spin_lock_irqsave(&chp->lock, flag); PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); spin_unlock_irqrestore(&chp->lock, flag); if (err < 0) printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, chp->cq.cqid); if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) err = 0; return err;}static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma){ int len = vma->vm_end - vma->vm_start; u32 key = vma->vm_pgoff << PAGE_SHIFT; struct cxio_rdev *rdev_p; int ret = 0; struct iwch_mm_entry *mm; struct iwch_ucontext *ucontext; u64 addr; PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, key, len); if (vma->vm_start & (PAGE_SIZE-1)) { return -EINVAL; } rdev_p = &(to_iwch_dev(context->device)->rdev); ucontext = to_iwch_ucontext(context); mm = remove_mmap(ucontext, key, len); if (!mm) return -EINVAL; addr = mm->addr; kfree(mm); if ((addr >= rdev_p->rnic_info.udbell_physbase) && (addr < (rdev_p->rnic_info.udbell_physbase + rdev_p->rnic_info.udbell_len))) { /* * Map T3 DB register. */ if (vma->vm_flags & VM_READ) { return -EPERM; } vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; vma->vm_flags &= ~VM_MAYREAD; ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, len, vma->vm_page_prot); } else { /* * Map WQ or CQ contig dma memory... */ ret = remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, len, vma->vm_page_prot); } return ret;}static int iwch_deallocate_pd(struct ib_pd *pd){ struct iwch_dev *rhp; struct iwch_pd *php; php = to_iwch_pd(pd); rhp = php->rhp; PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__, pd, php->pdid); cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); kfree(php); return 0;}static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata){ struct iwch_pd *php; u32 pdid;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -