⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 c2_qp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
	qp->state = IB_QPS_RESET;	qp->send_sgl_depth = qp_attrs->cap.max_send_sge;	qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;	qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;	init_waitqueue_head(&qp->wait);	/* Initialize the SQ MQ */	q_size = be32_to_cpu(reply->sq_depth);	msg_size = be32_to_cpu(reply->sq_msg_size);	peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);	mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);	mmap = ioremap_nocache(peer_pa, mmap_size);	if (!mmap) {		err = -ENOMEM;		goto bail5;	}	c2_mq_req_init(&qp->sq_mq,		       be32_to_cpu(reply->sq_mq_index),		       q_size,		       msg_size,		       mmap + sizeof(struct c2_mq_shared),	/* pool start */		       mmap,				/* peer */		       C2_MQ_ADAPTER_TARGET);	/* Initialize the RQ mq */	q_size = be32_to_cpu(reply->rq_depth);	msg_size = be32_to_cpu(reply->rq_msg_size);	peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);	mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);	mmap = ioremap_nocache(peer_pa, mmap_size);	if (!mmap) {		err = -ENOMEM;		goto bail6;	}	c2_mq_req_init(&qp->rq_mq,		       be32_to_cpu(reply->rq_mq_index),		       q_size,		       msg_size,		       mmap + sizeof(struct c2_mq_shared),	/* pool start */		       mmap,				/* peer */		       C2_MQ_ADAPTER_TARGET);	vq_repbuf_free(c2dev, reply);	vq_req_free(c2dev, vq_req);	return 0;      bail6:	iounmap(qp->sq_mq.peer);      bail5:	destroy_qp(c2dev, qp);      bail4:	vq_repbuf_free(c2dev, reply);      bail3:	vq_req_free(c2dev, vq_req);      bail2:	c2_free_mqsp(qp->rq_mq.shared);      bail1:	c2_free_mqsp(qp->sq_mq.shared);      bail0:	c2_free_qpn(c2dev, qp->qpn);	return err;}static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq){	if (send_cq == recv_cq)		spin_lock_irq(&send_cq->lock);	else if (send_cq > recv_cq) {		spin_lock_irq(&send_cq->lock);		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);	} else {		spin_lock_irq(&recv_cq->lock);		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);	}}static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq){	if (send_cq == recv_cq)		spin_unlock_irq(&send_cq->lock);	else if (send_cq > recv_cq) {		spin_unlock(&recv_cq->lock);		spin_unlock_irq(&send_cq->lock);	} else {		spin_unlock(&send_cq->lock);		spin_unlock_irq(&recv_cq->lock);	}}void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp){	struct c2_cq *send_cq;	struct c2_cq *recv_cq;	send_cq = to_c2cq(qp->ibqp.send_cq);	recv_cq = to_c2cq(qp->ibqp.recv_cq);	/*	 * Lock CQs here, so that CQ polling code can do QP lookup	 * without taking a lock.	 */	c2_lock_cqs(send_cq, recv_cq);	c2_free_qpn(c2dev, qp->qpn);	c2_unlock_cqs(send_cq, recv_cq);	/*	 * Destory qp in the rnic...	 */	destroy_qp(c2dev, qp);	/*	 * Mark any unreaped CQEs as null and void.	 */	c2_cq_clean(c2dev, qp, send_cq->cqn);	if (send_cq != recv_cq)		c2_cq_clean(c2dev, qp, recv_cq->cqn);	/*	 * Unmap the MQs and return the shared pointers	 * to the message pool.	 */	iounmap(qp->sq_mq.peer);	iounmap(qp->rq_mq.peer);	c2_free_mqsp(qp->sq_mq.shared);	c2_free_mqsp(qp->rq_mq.shared);	atomic_dec(&qp->refcount);	wait_event(qp->wait, !atomic_read(&qp->refcount));}/* * Function: move_sgl * * Description: * Move an SGL from the user's work request struct into a CCIL Work Request * message, swapping to WR byte order and ensure the total length doesn't * overflow. * * IN: * dst		- ptr to CCIL Work Request message SGL memory. * src		- ptr to the consumers SGL memory. * * OUT: none * * Return: * CCIL status codes. */static intmove_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,	 u8 * actual_count){	u32 tot = 0;		/* running total */	u8 acount = 0;		/* running total non-0 len sge's */	while (count > 0) {		/*		 * If the addition of this SGE causes the		 * total SGL length to exceed 2^32-1, then		 * fail-n-bail.		 *		 * If the current total plus the next element length		 * wraps, then it will go negative and be less than the		 * current total...		 */		if ((tot + src->length) < tot) {			return -EINVAL;		}		/*		 * Bug: 1456 (as well as 1498 & 1643)		 * Skip over any sge's supplied with len=0		 */		if (src->length) {			tot += src->length;			dst->stag = cpu_to_be32(src->lkey);			dst->to = cpu_to_be64(src->addr);			dst->length = cpu_to_be32(src->length);			dst++;			acount++;		}		src++;		count--;	}	if (acount == 0) {		/*		 * Bug: 1476 (as well as 1498, 1456 and 1643)		 * Setup the SGL in the WR to make it easier for the RNIC.		 * This way, the FW doesn't have to deal with special cases.		 * Setting length=0 should be sufficient.		 */		dst->stag = 0;		dst->to = 0;		dst->length = 0;	}	*p_len = tot;	*actual_count = acount;	return 0;}/* * Function: c2_activity (private function) * * Description: * Post an mq index to the host->adapter activity fifo. * * IN: * c2dev	- ptr to c2dev structure * mq_index	- mq index to post * shared	- value most recently written to shared * * OUT: * * Return: * none */static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared){	/*	 * First read the register to see if the FIFO is full, and if so,	 * spin until it's not.  This isn't perfect -- there is no	 * synchronization among the clients of the register, but in	 * practice it prevents multiple CPU from hammering the bus	 * with PCI RETRY. Note that when this does happen, the card	 * cannot get on the bus and the card and system hang in a	 * deadlock -- thus the need for this code. [TOT]	 */	while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)		udelay(10);	__raw_writel(C2_HINT_MAKE(mq_index, shared),		     c2dev->regs + PCI_BAR0_ADAPTER_HINT);}/* * Function: qp_wr_post * * Description: * This in-line function allocates a MQ msg, then moves the host-copy of * the completed WR into msg.  Then it posts the message. * * IN: * q		- ptr to user MQ. * wr		- ptr to host-copy of the WR. * qp		- ptr to user qp * size		- Number of bytes to post.  Assumed to be divisible by 4. * * OUT: none * * Return: * CCIL status codes. */static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size){	union c2wr *msg;	msg = c2_mq_alloc(q);	if (msg == NULL) {		return -EINVAL;	}#ifdef CCMSGMAGIC	((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);#endif	/*	 * Since all header fields in the WR are the same as the	 * CQE, set the following so the adapter need not.	 */	c2_wr_set_result(wr, CCERR_PENDING);	/*	 * Copy the wr down to the adapter	 */	memcpy((void *) msg, (void *) wr, size);	c2_mq_produce(q);	return 0;}int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,		 struct ib_send_wr **bad_wr){	struct c2_dev *c2dev = to_c2dev(ibqp->device);	struct c2_qp *qp = to_c2qp(ibqp);	union c2wr wr;	unsigned long lock_flags;	int err = 0;	u32 flags;	u32 tot_len;	u8 actual_sge_count;	u32 msg_size;	if (qp->state > IB_QPS_RTS)		return -EINVAL;	while (ib_wr) {		flags = 0;		wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;		if (ib_wr->send_flags & IB_SEND_SIGNALED) {			flags |= SQ_SIGNALED;		}		switch (ib_wr->opcode) {		case IB_WR_SEND:			if (ib_wr->send_flags & IB_SEND_SOLICITED) {				c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);				msg_size = sizeof(struct c2wr_send_req);			} else {				c2_wr_set_id(&wr, C2_WR_TYPE_SEND);				msg_size = sizeof(struct c2wr_send_req);			}			wr.sqwr.send.remote_stag = 0;			msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge;			if (ib_wr->num_sge > qp->send_sgl_depth) {				err = -EINVAL;				break;			}			if (ib_wr->send_flags & IB_SEND_FENCE) {				flags |= SQ_READ_FENCE;			}			err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),				       ib_wr->sg_list,				       ib_wr->num_sge,				       &tot_len, &actual_sge_count);			wr.sqwr.send.sge_len = cpu_to_be32(tot_len);			c2_wr_set_sge_count(&wr, actual_sge_count);			break;		case IB_WR_RDMA_WRITE:			c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);			msg_size = sizeof(struct c2wr_rdma_write_req) +			    (sizeof(struct c2_data_addr) * ib_wr->num_sge);			if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {				err = -EINVAL;				break;			}			if (ib_wr->send_flags & IB_SEND_FENCE) {				flags |= SQ_READ_FENCE;			}			wr.sqwr.rdma_write.remote_stag =			    cpu_to_be32(ib_wr->wr.rdma.rkey);			wr.sqwr.rdma_write.remote_to =			    cpu_to_be64(ib_wr->wr.rdma.remote_addr);			err = move_sgl((struct c2_data_addr *)				       & (wr.sqwr.rdma_write.data),				       ib_wr->sg_list,				       ib_wr->num_sge,				       &tot_len, &actual_sge_count);			wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);			c2_wr_set_sge_count(&wr, actual_sge_count);			break;		case IB_WR_RDMA_READ:			c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);			msg_size = sizeof(struct c2wr_rdma_read_req);			/* IWarp only suppots 1 sge for RDMA reads */			if (ib_wr->num_sge > 1) {				err = -EINVAL;				break;			}			/*			 * Move the local and remote stag/to/len into the WR.			 */			wr.sqwr.rdma_read.local_stag =			    cpu_to_be32(ib_wr->sg_list->lkey);			wr.sqwr.rdma_read.local_to =			    cpu_to_be64(ib_wr->sg_list->addr);			wr.sqwr.rdma_read.remote_stag =			    cpu_to_be32(ib_wr->wr.rdma.rkey);			wr.sqwr.rdma_read.remote_to =			    cpu_to_be64(ib_wr->wr.rdma.remote_addr);			wr.sqwr.rdma_read.length =			    cpu_to_be32(ib_wr->sg_list->length);			break;		default:			/* error */			msg_size = 0;			err = -EINVAL;			break;		}		/*		 * If we had an error on the last wr build, then		 * break out.  Possible errors include bogus WR		 * type, and a bogus SGL length...		 */		if (err) {			break;		}		/*		 * Store flags		 */		c2_wr_set_flags(&wr, flags);		/*		 * Post the puppy!		 */		spin_lock_irqsave(&qp->lock, lock_flags);		err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);		if (err) {			spin_unlock_irqrestore(&qp->lock, lock_flags);			break;		}		/*		 * Enqueue mq index to activity FIFO.		 */		c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);		spin_unlock_irqrestore(&qp->lock, lock_flags);		ib_wr = ib_wr->next;	}	if (err)		*bad_wr = ib_wr;	return err;}int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,		    struct ib_recv_wr **bad_wr){	struct c2_dev *c2dev = to_c2dev(ibqp->device);	struct c2_qp *qp = to_c2qp(ibqp);	union c2wr wr;	unsigned long lock_flags;	int err = 0;	if (qp->state > IB_QPS_RTS)		return -EINVAL;	/*	 * Try and post each work request	 */	while (ib_wr) {		u32 tot_len;		u8 actual_sge_count;		if (ib_wr->num_sge > qp->recv_sgl_depth) {			err = -EINVAL;			break;		}		/*		 * Create local host-copy of the WR		 */		wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;		c2_wr_set_id(&wr, CCWR_RECV);		c2_wr_set_flags(&wr, 0);		/* sge_count is limited to eight bits. */		BUG_ON(ib_wr->num_sge >= 256);		err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),			       ib_wr->sg_list,			       ib_wr->num_sge, &tot_len, &actual_sge_count);		c2_wr_set_sge_count(&wr, actual_sge_count);		/*		 * If we had an error on the last wr build, then		 * break out.  Possible errors include bogus WR		 * type, and a bogus SGL length...		 */		if (err) {			break;		}		spin_lock_irqsave(&qp->lock, lock_flags);		err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);		if (err) {			spin_unlock_irqrestore(&qp->lock, lock_flags);			break;		}		/*		 * Enqueue mq index to activity FIFO		 */		c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);		spin_unlock_irqrestore(&qp->lock, lock_flags);		ib_wr = ib_wr->next;	}	if (err)		*bad_wr = ib_wr;	return err;}void __devinit c2_init_qp_table(struct c2_dev *c2dev){	spin_lock_init(&c2dev->qp_table.lock);	idr_init(&c2dev->qp_table.idr);}void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev){	idr_destroy(&c2dev->qp_table.idr);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -