⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cxio_hal.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 3 页
字号:
		cq->rptr++;		cqe = cxio_next_hw_cqe(cq);	}}static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq){	if (CQE_OPCODE(*cqe) == T3_TERMINATE)		return 0;	if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))		return 0;	if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))		return 0;	if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) &&	    Q_EMPTY(wq->rq_rptr, wq->rq_wptr))		return 0;	return 1;}void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count){	struct t3_cqe *cqe;	u32 ptr;	*count = 0;	ptr = cq->sw_rptr;	while (!Q_EMPTY(ptr, cq->sw_wptr)) {		cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));		if ((SQ_TYPE(*cqe) || (CQE_OPCODE(*cqe) == T3_READ_RESP)) &&		    (CQE_QPID(*cqe) == wq->qpid))			(*count)++;		ptr++;	}	PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);}void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count){	struct t3_cqe *cqe;	u32 ptr;	*count = 0;	PDBG("%s count zero %d\n", __FUNCTION__, *count);	ptr = cq->sw_rptr;	while (!Q_EMPTY(ptr, cq->sw_wptr)) {		cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));		if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&		    (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))			(*count)++;		ptr++;	}	PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);}static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p){	struct rdma_cq_setup setup;	setup.id = 0;	setup.base_addr = 0;	/* NULL address */	setup.size = 1;		/* enable the CQ */	setup.credits = 0;	/* force SGE to redirect to RspQ and interrupt */	setup.credit_thres = 0;	setup.ovfl_mode = 1;	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));}static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p){	int err;	u64 sge_cmd, ctx0, ctx1;	u64 base_addr;	struct t3_modify_qp_wr *wqe;	struct sk_buff *skb;	skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);	if (!skb) {		PDBG("%s alloc_skb failed\n", __FUNCTION__);		return -ENOMEM;	}	err = cxio_hal_init_ctrl_cq(rdev_p);	if (err) {		PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err);		goto err;	}	rdev_p->ctrl_qp.workq = dma_alloc_coherent(					&(rdev_p->rnic_info.pdev->dev),					(1 << T3_CTRL_QP_SIZE_LOG2) *					sizeof(union t3_wr),					&(rdev_p->ctrl_qp.dma_addr),					GFP_KERNEL);	if (!rdev_p->ctrl_qp.workq) {		PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__);		err = -ENOMEM;		goto err;	}	pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,			   rdev_p->ctrl_qp.dma_addr);	rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;	memset(rdev_p->ctrl_qp.workq, 0,	       (1 << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr));	mutex_init(&rdev_p->ctrl_qp.lock);	init_waitqueue_head(&rdev_p->ctrl_qp.waitq);	/* update HW Ctrl QP context */	base_addr = rdev_p->ctrl_qp.dma_addr;	base_addr >>= 12;	ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |		V_EC_BASE_LO((u32) base_addr & 0xffff));	ctx0 <<= 32;	ctx0 |= V_EC_CREDITS(FW_WR_NUM);	base_addr >>= 16;	ctx1 = (u32) base_addr;	base_addr >>= 32;	ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |			V_EC_TYPE(0) | V_EC_GEN(1) |			V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;	wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));	memset(wqe, 0, sizeof(*wqe));	build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,		       T3_CTL_QP_TID, 7);	wqe->flags = cpu_to_be32(MODQP_WRITE_EC);	sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;	wqe->sge_cmd = cpu_to_be64(sge_cmd);	wqe->ctx1 = cpu_to_be64(ctx1);	wqe->ctx0 = cpu_to_be64(ctx0);	PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n",	     (unsigned long long) rdev_p->ctrl_qp.dma_addr,	     rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);	skb->priority = CPL_PRIORITY_CONTROL;	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));err:	kfree_skb(skb);	return err;}static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p){	dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),			  (1UL << T3_CTRL_QP_SIZE_LOG2)			  * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,			  pci_unmap_addr(&rdev_p->ctrl_qp, mapping));	return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);}/* write len bytes of data into addr (32B aligned address) * If data is NULL, clear len byte of memory to zero. * caller aquires the ctrl_qp lock before the call */static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,				      u32 len, void *data, int completion){	u32 i, nr_wqe, copy_len;	u8 *copy_data;	u8 wr_len, utx_len;	/* lenght in 8 byte flit */	enum t3_wr_flags flag;	__be64 *wqe;	u64 utx_cmd;	addr &= 0x7FFFFFF;	nr_wqe = len % 96 ? len / 96 + 1 : len / 96;	/* 96B max per WQE */	PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",	     __FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,	     nr_wqe, data, addr);	utx_len = 3;		/* in 32B unit */	for (i = 0; i < nr_wqe; i++) {		if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,		           T3_CTRL_QP_SIZE_LOG2)) {			PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "			     "wait for more space i %d\n", __FUNCTION__,			     rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);			if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,					     !Q_FULL(rdev_p->ctrl_qp.rptr,						     rdev_p->ctrl_qp.wptr,						     T3_CTRL_QP_SIZE_LOG2))) {				PDBG("%s ctrl_qp workq interrupted\n",				     __FUNCTION__);				return -ERESTARTSYS;			}			PDBG("%s ctrl_qp wakeup, continue posting work request "			     "i %d\n", __FUNCTION__, i);		}		wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %						(1 << T3_CTRL_QP_SIZE_LOG2)));		flag = 0;		if (i == (nr_wqe - 1)) {			/* last WQE */			flag = completion ? T3_COMPLETION_FLAG : 0;			if (len % 32)				utx_len = len / 32 + 1;			else				utx_len = len / 32;		}		/*		 * Force a CQE to return the credit to the workq in case		 * we posted more than half the max QP size of WRs		 */		if ((i != 0) &&		    (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {			flag = T3_COMPLETION_FLAG;			PDBG("%s force completion at i %d\n", __FUNCTION__, i);		}		/* build the utx mem command */		wqe += (sizeof(struct t3_bypass_wr) >> 3);		utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);		utx_cmd <<= 32;		utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);		*wqe = cpu_to_be64(utx_cmd);		wqe++;		copy_data = (u8 *) data + i * 96;		copy_len = len > 96 ? 96 : len;		/* clear memory content if data is NULL */		if (data)			memcpy(wqe, copy_data, copy_len);		else			memset(wqe, 0, copy_len);		if (copy_len % 32)			memset(((u8 *) wqe) + copy_len, 0,			       32 - (copy_len % 32));		wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +			 (utx_len << 2);		wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %			      (1 << T3_CTRL_QP_SIZE_LOG2)));		/* wptr in the WRID[31:0] */		((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;		/*		 * This must be the last write with a memory barrier		 * for the genbit		 */		build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,			       Q_GENBIT(rdev_p->ctrl_qp.wptr,					T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,			       wr_len);		if (flag == T3_COMPLETION_FLAG)			ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);		len -= 96;		rdev_p->ctrl_qp.wptr++;	}	return 0;}/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size * OUT: stag index, actual pbl_size, pbl_addr allocated. * TBD: shared memory region support */static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,			 u32 *stag, u8 stag_state, u32 pdid,			 enum tpt_mem_type type, enum tpt_mem_perm perm,			 u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl,			 u32 *pbl_size, u32 *pbl_addr){	int err;	struct tpt_entry tpt;	u32 stag_idx;	u32 wptr;	int rereg = (*stag != T3_STAG_UNSET);	stag_state = stag_state > 0;	stag_idx = (*stag) >> 8;	if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {		stag_idx = cxio_hal_get_stag(rdev_p->rscp);		if (!stag_idx)			return -ENOMEM;		*stag = (stag_idx << 8) | ((*stag) & 0xFF);	}	PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",	     __FUNCTION__, stag_state, type, pdid, stag_idx);	if (reset_tpt_entry)		cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);	else if (!rereg) {		*pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3);		if (!*pbl_addr) {			return -ENOMEM;		}	}	mutex_lock(&rdev_p->ctrl_qp.lock);	/* write PBL first if any - update pbl only if pbl list exist */	if (pbl) {		PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",		     __FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base,		     *pbl_size);		err = cxio_hal_ctrl_qp_write_mem(rdev_p,				(*pbl_addr >> 5),				(*pbl_size << 3), pbl, 0);		if (err)			goto ret;	}	/* write TPT entry */	if (reset_tpt_entry)		memset(&tpt, 0, sizeof(tpt));	else {		tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |				V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |				V_TPT_STAG_STATE(stag_state) |				V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));		BUG_ON(page_size >= 28);		tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |				F_TPT_MW_BIND_ENABLE |				V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |				V_TPT_PAGE_SIZE(page_size));		tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :				    cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3));		tpt.len = cpu_to_be32(len);		tpt.va_hi = cpu_to_be32((u32) (to >> 32));		tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));		tpt.rsvd_bind_cnt_or_pstag = 0;		tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :				  cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2));	}	err = cxio_hal_ctrl_qp_write_mem(rdev_p,				       stag_idx +				       (rdev_p->rnic_info.tpt_base >> 5),				       sizeof(tpt), &tpt, 1);	/* release the stag index to free pool */	if (reset_tpt_entry)		cxio_hal_put_stag(rdev_p->rscp, stag_idx);ret:	wptr = rdev_p->ctrl_qp.wptr;	mutex_unlock(&rdev_p->ctrl_qp.lock);	if (!err)		if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,					     SEQ32_GE(rdev_p->ctrl_qp.rptr,						      wptr)))			return -ERESTARTSYS;	return err;}int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,			   enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,			   u8 page_size, __be64 *pbl, u32 *pbl_size,			   u32 *pbl_addr){	*stag = T3_STAG_UNSET;	return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,			     zbva, to, len, page_size, pbl, pbl_size, pbl_addr);}int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,			   enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,			   u8 page_size, __be64 *pbl, u32 *pbl_size,			   u32 *pbl_addr){	return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,			     zbva, to, len, page_size, pbl, pbl_size, pbl_addr);}int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,		   u32 pbl_addr){	return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,			     &pbl_size, &pbl_addr);}int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid){	u32 pbl_size = 0;	*stag = T3_STAG_UNSET;	return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,			     NULL, &pbl_size, NULL);}int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag){	return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,			     NULL, NULL);}int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr){	struct t3_rdma_init_wr *wqe;	struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);	if (!skb)		return -ENOMEM;	PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p);	wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));	wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));	wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |					   V_FW_RIWR_LEN(sizeof(*wqe) >> 3));	wqe->wrid.id1 = 0;	wqe->qpid = cpu_to_be32(attr->qpid);	wqe->pdid = cpu_to_be32(attr->pdid);	wqe->scqid = cpu_to_be32(attr->scqid);	wqe->rcqid = cpu_to_be32(attr->rcqid);	wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);	wqe->rq_size = cpu_to_be32(attr->rq_size);	wqe->mpaattrs = attr->mpaattrs;	wqe->qpcaps = attr->qpcaps;	wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);	wqe->flags = cpu_to_be32(attr->flags);	wqe->ord = cpu_to_be32(attr->ord);	wqe->ird = cpu_to_be32(attr->ird);	wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);	wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);	wqe->irs = cpu_to_be32(attr->irs);	skb->priority = 0;	/* 0=>ToeQ; 1=>CtrlQ */	return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));}void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb){	cxio_ev_cb = ev_cb;}void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb){	cxio_ev_cb = NULL;}static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -