⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cxio_hal.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * Copyright (c) 2006 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <asm/delay.h>#include <linux/mutex.h>#include <linux/netdevice.h>#include <linux/sched.h>#include <linux/spinlock.h>#include <linux/pci.h>#include <linux/dma-mapping.h>#include <net/net_namespace.h>#include "cxio_resource.h"#include "cxio_hal.h"#include "cxgb3_offload.h"#include "sge_defs.h"static LIST_HEAD(rdev_list);static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name){	struct cxio_rdev *rdev;	list_for_each_entry(rdev, &rdev_list, entry)		if (!strcmp(rdev->dev_name, dev_name))			return rdev;	return NULL;}static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev){	struct cxio_rdev *rdev;	list_for_each_entry(rdev, &rdev_list, entry)		if (rdev->t3cdev_p == tdev)			return rdev;	return NULL;}int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,		   enum t3_cq_opcode op, u32 credit){	int ret;	struct t3_cqe *cqe;	u32 rptr;	struct rdma_cq_op setup;	setup.id = cq->cqid;	setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;	setup.op = op;	ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);	if ((ret < 0) || (op == CQ_CREDIT_UPDATE))		return ret;	/*	 * If the rearm returned an index other than our current index,	 * then there might be CQE's in flight (being DMA'd).  We must wait	 * here for them to complete or the consumer can miss a notification.	 */	if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {		int i=0;		rptr = cq->rptr;		/*		 * Keep the generation correct by bumping rptr until it		 * matches the index returned by the rearm - 1.		 */		while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)			rptr++;		/*		 * Now rptr is the index for the (last) cqe that was		 * in-flight at the time the HW rearmed the CQ.  We		 * spin until that CQE is valid.		 */		cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);		while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {			udelay(1);			if (i++ > 1000000) {				BUG_ON(1);				printk(KERN_ERR "%s: stalled rnic\n",				       rdev_p->dev_name);				return -EIO;			}		}		return 1;	}	return 0;}static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid){	struct rdma_cq_setup setup;	setup.id = cqid;	setup.base_addr = 0;	/* NULL address */	setup.size = 0;		/* disaable the CQ */	setup.credits = 0;	setup.credit_thres = 0;	setup.ovfl_mode = 0;	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));}static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid){	u64 sge_cmd;	struct t3_modify_qp_wr *wqe;	struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);	if (!skb) {		PDBG("%s alloc_skb failed\n", __FUNCTION__);		return -ENOMEM;	}	wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));	memset(wqe, 0, sizeof(*wqe));	build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 3, 0, qpid, 7);	wqe->flags = cpu_to_be32(MODQP_WRITE_EC);	sge_cmd = qpid << 8 | 3;	wqe->sge_cmd = cpu_to_be64(sge_cmd);	skb->priority = CPL_PRIORITY_CONTROL;	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));}int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq){	struct rdma_cq_setup setup;	int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);	cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);	if (!cq->cqid)		return -ENOMEM;	cq->sw_queue = kzalloc(size, GFP_KERNEL);	if (!cq->sw_queue)		return -ENOMEM;	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),					     (1UL << (cq->size_log2)) *					     sizeof(struct t3_cqe),					     &(cq->dma_addr), GFP_KERNEL);	if (!cq->queue) {		kfree(cq->sw_queue);		return -ENOMEM;	}	pci_unmap_addr_set(cq, mapping, cq->dma_addr);	memset(cq->queue, 0, size);	setup.id = cq->cqid;	setup.base_addr = (u64) (cq->dma_addr);	setup.size = 1UL << cq->size_log2;	setup.credits = 65535;	setup.credit_thres = 1;	if (rdev_p->t3cdev_p->type == T3B)		setup.ovfl_mode = 0;	else		setup.ovfl_mode = 1;	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));}int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq){	struct rdma_cq_setup setup;	setup.id = cq->cqid;	setup.base_addr = (u64) (cq->dma_addr);	setup.size = 1UL << cq->size_log2;	setup.credits = setup.size;	setup.credit_thres = setup.size;	/* TBD: overflow recovery */	setup.ovfl_mode = 1;	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));}static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx){	struct cxio_qpid_list *entry;	u32 qpid;	int i;	mutex_lock(&uctx->lock);	if (!list_empty(&uctx->qpids)) {		entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,				   entry);		list_del(&entry->entry);		qpid = entry->qpid;		kfree(entry);	} else {		qpid = cxio_hal_get_qpid(rdev_p->rscp);		if (!qpid)			goto out;		for (i = qpid+1; i & rdev_p->qpmask; i++) {			entry = kmalloc(sizeof *entry, GFP_KERNEL);			if (!entry)				break;			entry->qpid = i;			list_add_tail(&entry->entry, &uctx->qpids);		}	}out:	mutex_unlock(&uctx->lock);	PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);	return qpid;}static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,		     struct cxio_ucontext *uctx){	struct cxio_qpid_list *entry;	entry = kmalloc(sizeof *entry, GFP_KERNEL);	if (!entry)		return;	PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);	entry->qpid = qpid;	mutex_lock(&uctx->lock);	list_add_tail(&entry->entry, &uctx->qpids);	mutex_unlock(&uctx->lock);}void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx){	struct list_head *pos, *nxt;	struct cxio_qpid_list *entry;	mutex_lock(&uctx->lock);	list_for_each_safe(pos, nxt, &uctx->qpids) {		entry = list_entry(pos, struct cxio_qpid_list, entry);		list_del_init(&entry->entry);		if (!(entry->qpid & rdev_p->qpmask))			cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);		kfree(entry);	}	mutex_unlock(&uctx->lock);}void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx){	INIT_LIST_HEAD(&uctx->qpids);	mutex_init(&uctx->lock);}int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,		   struct t3_wq *wq, struct cxio_ucontext *uctx){	int depth = 1UL << wq->size_log2;	int rqsize = 1UL << wq->rq_size_log2;	wq->qpid = get_qpid(rdev_p, uctx);	if (!wq->qpid)		return -ENOMEM;	wq->rq = kzalloc(depth * sizeof(u64), GFP_KERNEL);	if (!wq->rq)		goto err1;	wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);	if (!wq->rq_addr)		goto err2;	wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL);	if (!wq->sq)		goto err3;	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),					     depth * sizeof(union t3_wr),					     &(wq->dma_addr), GFP_KERNEL);	if (!wq->queue)		goto err4;	memset(wq->queue, 0, depth * sizeof(union t3_wr));	pci_unmap_addr_set(wq, mapping, wq->dma_addr);	wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;	if (!kernel_domain)		wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +					(wq->qpid << rdev_p->qpshift);	PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__,	     wq->qpid, wq->doorbell, (unsigned long long) wq->udb);	return 0;err4:	kfree(wq->sq);err3:	cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);err2:	kfree(wq->rq);err1:	put_qpid(rdev_p, wq->qpid, uctx);	return -ENOMEM;}int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq){	int err;	err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);	kfree(cq->sw_queue);	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),			  (1UL << (cq->size_log2))			  * sizeof(struct t3_cqe), cq->queue,			  pci_unmap_addr(cq, mapping));	cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);	return err;}int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,		    struct cxio_ucontext *uctx){	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),			  (1UL << (wq->size_log2))			  * sizeof(union t3_wr), wq->queue,			  pci_unmap_addr(wq, mapping));	kfree(wq->sq);	cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));	kfree(wq->rq);	put_qpid(rdev_p, wq->qpid, uctx);	return 0;}static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq){	struct t3_cqe cqe;	PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,	     wq, cq, cq->sw_rptr, cq->sw_wptr);	memset(&cqe, 0, sizeof(cqe));	cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |			         V_CQE_OPCODE(T3_SEND) |				 V_CQE_TYPE(0) |				 V_CQE_SWCQE(1) |				 V_CQE_QPID(wq->qpid) |				 V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,						       cq->size_log2)));	*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;	cq->sw_wptr++;}void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count){	u32 ptr;	PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq);	/* flush RQ */	PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__,	    wq->rq_rptr, wq->rq_wptr, count);	ptr = wq->rq_rptr + count;	while (ptr++ != wq->rq_wptr)		insert_recv_cqe(wq, cq);}static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,		          struct t3_swsq *sqp){	struct t3_cqe cqe;	PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,	     wq, cq, cq->sw_rptr, cq->sw_wptr);	memset(&cqe, 0, sizeof(cqe));	cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |			         V_CQE_OPCODE(sqp->opcode) |			         V_CQE_TYPE(1) |			         V_CQE_SWCQE(1) |			         V_CQE_QPID(wq->qpid) |			         V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,						       cq->size_log2)));	cqe.u.scqe.wrid_hi = sqp->sq_wptr;	*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;	cq->sw_wptr++;}void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count){	__u32 ptr;	struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);	ptr = wq->sq_rptr + count;	sqp += count;	while (ptr != wq->sq_wptr) {		insert_sq_cqe(wq, cq, sqp);		sqp++;		ptr++;	}}/* * Move all CQEs from the HWCQ into the SWCQ. */void cxio_flush_hw_cq(struct t3_cq *cq){	struct t3_cqe *cqe, *swcqe;	PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid);	cqe = cxio_next_hw_cqe(cq);	while (cqe) {		PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",		     __FUNCTION__, cq->rptr, cq->sw_wptr);		swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);		*swcqe = *cqe;		swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));		cq->sw_wptr++;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -