⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_srq.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
}int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,		     enum ib_srq_attr_mask attr_mask, struct ib_udata *udata){	struct mthca_dev *dev = to_mdev(ibsrq->device);	struct mthca_srq *srq = to_msrq(ibsrq);	int ret;	u8 status;	/* We don't support resizing SRQs (yet?) */	if (attr_mask & IB_SRQ_MAX_WR)		return -EINVAL;	if (attr_mask & IB_SRQ_LIMIT) {		u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;		if (attr->srq_limit > max_wr)			return -EINVAL;		mutex_lock(&srq->mutex);		ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);		mutex_unlock(&srq->mutex);		if (ret)			return ret;		if (status)			return -EINVAL;	}	return 0;}int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr){	struct mthca_dev *dev = to_mdev(ibsrq->device);	struct mthca_srq *srq = to_msrq(ibsrq);	struct mthca_mailbox *mailbox;	struct mthca_arbel_srq_context *arbel_ctx;	struct mthca_tavor_srq_context *tavor_ctx;	u8 status;	int err;	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);	if (IS_ERR(mailbox))		return PTR_ERR(mailbox);	err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);	if (err)		goto out;	if (mthca_is_memfree(dev)) {		arbel_ctx = mailbox->buf;		srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);	} else {		tavor_ctx = mailbox->buf;		srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);	}	srq_attr->max_wr  = srq->max - 1;	srq_attr->max_sge = srq->max_gs;out:	mthca_free_mailbox(dev, mailbox);	return err;}void mthca_srq_event(struct mthca_dev *dev, u32 srqn,		     enum ib_event_type event_type){	struct mthca_srq *srq;	struct ib_event event;	spin_lock(&dev->srq_table.lock);	srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));	if (srq)		++srq->refcount;	spin_unlock(&dev->srq_table.lock);	if (!srq) {		mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);		return;	}	if (!srq->ibsrq.event_handler)		goto out;	event.device      = &dev->ib_dev;	event.event       = event_type;	event.element.srq = &srq->ibsrq;	srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);out:	spin_lock(&dev->srq_table.lock);	if (!--srq->refcount)		wake_up(&srq->wait);	spin_unlock(&dev->srq_table.lock);}/* * This function must be called with IRQs disabled. */void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr){	int ind;	ind = wqe_addr >> srq->wqe_shift;	spin_lock(&srq->lock);	if (likely(srq->first_free >= 0))		*wqe_to_link(get_wqe(srq, srq->last_free)) = ind;	else		srq->first_free = ind;	*wqe_to_link(get_wqe(srq, ind)) = -1;	srq->last_free = ind;	spin_unlock(&srq->lock);}int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,			      struct ib_recv_wr **bad_wr){	struct mthca_dev *dev = to_mdev(ibsrq->device);	struct mthca_srq *srq = to_msrq(ibsrq);	unsigned long flags;	int err = 0;	int first_ind;	int ind;	int next_ind;	int nreq;	int i;	void *wqe;	void *prev_wqe;	spin_lock_irqsave(&srq->lock, flags);	first_ind = srq->first_free;	for (nreq = 0; wr; wr = wr->next) {		ind = srq->first_free;		if (unlikely(ind < 0)) {			mthca_err(dev, "SRQ %06x full\n", srq->srqn);			err = -ENOMEM;			*bad_wr = wr;			break;		}		wqe       = get_wqe(srq, ind);		next_ind  = *wqe_to_link(wqe);		if (unlikely(next_ind < 0)) {			mthca_err(dev, "SRQ %06x full\n", srq->srqn);			err = -ENOMEM;			*bad_wr = wr;			break;		}		prev_wqe  = srq->last;		srq->last = wqe;		((struct mthca_next_seg *) wqe)->nda_op = 0;		((struct mthca_next_seg *) wqe)->ee_nds = 0;		/* flags field will always remain 0 */		wqe += sizeof (struct mthca_next_seg);		if (unlikely(wr->num_sge > srq->max_gs)) {			err = -EINVAL;			*bad_wr = wr;			srq->last = prev_wqe;			break;		}		for (i = 0; i < wr->num_sge; ++i) {			mthca_set_data_seg(wqe, wr->sg_list + i);			wqe += sizeof (struct mthca_data_seg);		}		if (i < srq->max_gs)			mthca_set_data_seg_inval(wqe);		((struct mthca_next_seg *) prev_wqe)->nda_op =			cpu_to_be32((ind << srq->wqe_shift) | 1);		wmb();		((struct mthca_next_seg *) prev_wqe)->ee_nds =			cpu_to_be32(MTHCA_NEXT_DBD);		srq->wrid[ind]  = wr->wr_id;		srq->first_free = next_ind;		++nreq;		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {			nreq = 0;			/*			 * Make sure that descriptors are written			 * before doorbell is rung.			 */			wmb();			mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,				      dev->kar + MTHCA_RECEIVE_DOORBELL,				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));			first_ind = srq->first_free;		}	}	if (likely(nreq)) {		/*		 * Make sure that descriptors are written before		 * doorbell is rung.		 */		wmb();		mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,			      dev->kar + MTHCA_RECEIVE_DOORBELL,			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));	}	/*	 * Make sure doorbells don't leak out of SRQ spinlock and	 * reach the HCA out of order:	 */	mmiowb();	spin_unlock_irqrestore(&srq->lock, flags);	return err;}int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,			      struct ib_recv_wr **bad_wr){	struct mthca_dev *dev = to_mdev(ibsrq->device);	struct mthca_srq *srq = to_msrq(ibsrq);	unsigned long flags;	int err = 0;	int ind;	int next_ind;	int nreq;	int i;	void *wqe;	spin_lock_irqsave(&srq->lock, flags);	for (nreq = 0; wr; ++nreq, wr = wr->next) {		ind = srq->first_free;		if (unlikely(ind < 0)) {			mthca_err(dev, "SRQ %06x full\n", srq->srqn);			err = -ENOMEM;			*bad_wr = wr;			break;		}		wqe       = get_wqe(srq, ind);		next_ind  = *wqe_to_link(wqe);		if (unlikely(next_ind < 0)) {			mthca_err(dev, "SRQ %06x full\n", srq->srqn);			err = -ENOMEM;			*bad_wr = wr;			break;		}		((struct mthca_next_seg *) wqe)->nda_op =			cpu_to_be32((next_ind << srq->wqe_shift) | 1);		((struct mthca_next_seg *) wqe)->ee_nds = 0;		/* flags field will always remain 0 */		wqe += sizeof (struct mthca_next_seg);		if (unlikely(wr->num_sge > srq->max_gs)) {			err = -EINVAL;			*bad_wr = wr;			break;		}		for (i = 0; i < wr->num_sge; ++i) {			mthca_set_data_seg(wqe, wr->sg_list + i);			wqe += sizeof (struct mthca_data_seg);		}		if (i < srq->max_gs)			mthca_set_data_seg_inval(wqe);		srq->wrid[ind]  = wr->wr_id;		srq->first_free = next_ind;	}	if (likely(nreq)) {		srq->counter += nreq;		/*		 * Make sure that descriptors are written before		 * we write doorbell record.		 */		wmb();		*srq->db = cpu_to_be32(srq->counter);	}	spin_unlock_irqrestore(&srq->lock, flags);	return err;}int mthca_max_srq_sge(struct mthca_dev *dev){	if (mthca_is_memfree(dev))		return dev->limits.max_sg;	/*	 * SRQ allocations are based on powers of 2 for Tavor,	 * (although they only need to be multiples of 16 bytes).	 *	 * Therefore, we need to base the max number of sg entries on	 * the largest power of 2 descriptor size that is <= to the	 * actual max WQE descriptor size, rather than return the	 * max_sg value given by the firmware (which is based on WQE	 * sizes as multiples of 16, not powers of 2).	 *	 * If SRQ implementation is changed for Tavor to be based on	 * multiples of 16, the calculation below can be deleted and	 * the FW max_sg value returned.	 */	return min_t(int, dev->limits.max_sg,		     ((1 << (fls(dev->limits.max_desc_sz) - 1)) -		      sizeof (struct mthca_next_seg)) /		     sizeof (struct mthca_data_seg));}int mthca_init_srq_table(struct mthca_dev *dev){	int err;	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))		return 0;	spin_lock_init(&dev->srq_table.lock);	err = mthca_alloc_init(&dev->srq_table.alloc,			       dev->limits.num_srqs,			       dev->limits.num_srqs - 1,			       dev->limits.reserved_srqs);	if (err)		return err;	err = mthca_array_init(&dev->srq_table.srq,			       dev->limits.num_srqs);	if (err)		mthca_alloc_cleanup(&dev->srq_table.alloc);	return err;}void mthca_cleanup_srq_table(struct mthca_dev *dev){	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))		return;	mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);	mthca_alloc_cleanup(&dev->srq_table.alloc);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -