⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_qp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * Copyright (c) 2004 Topspin Communications.  All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ */#include <linux/string.h>#include <linux/slab.h>#include <linux/sched.h>#include <asm/io.h>#include <rdma/ib_verbs.h>#include <rdma/ib_cache.h>#include <rdma/ib_pack.h>#include "mthca_dev.h"#include "mthca_cmd.h"#include "mthca_memfree.h"#include "mthca_wqe.h"enum {	MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,	MTHCA_ACK_REQ_FREQ       = 10,	MTHCA_FLIGHT_LIMIT       = 9,	MTHCA_UD_HEADER_SIZE     = 72, /* largest UD header possible */	MTHCA_INLINE_HEADER_SIZE = 4,  /* data segment overhead for inline */	MTHCA_INLINE_CHUNK_SIZE  = 16  /* inline data segment chunk */};enum {	MTHCA_QP_STATE_RST  = 0,	MTHCA_QP_STATE_INIT = 1,	MTHCA_QP_STATE_RTR  = 2,	MTHCA_QP_STATE_RTS  = 3,	MTHCA_QP_STATE_SQE  = 4,	MTHCA_QP_STATE_SQD  = 5,	MTHCA_QP_STATE_ERR  = 6,	MTHCA_QP_STATE_DRAINING = 7};enum {	MTHCA_QP_ST_RC 	= 0x0,	MTHCA_QP_ST_UC 	= 0x1,	MTHCA_QP_ST_RD 	= 0x2,	MTHCA_QP_ST_UD 	= 0x3,	MTHCA_QP_ST_MLX = 0x7};enum {	MTHCA_QP_PM_MIGRATED = 0x3,	MTHCA_QP_PM_ARMED    = 0x0,	MTHCA_QP_PM_REARM    = 0x1};enum {	/* qp_context flags */	MTHCA_QP_BIT_DE  = 1 <<  8,	/* params1 */	MTHCA_QP_BIT_SRE = 1 << 15,	MTHCA_QP_BIT_SWE = 1 << 14,	MTHCA_QP_BIT_SAE = 1 << 13,	MTHCA_QP_BIT_SIC = 1 <<  4,	MTHCA_QP_BIT_SSC = 1 <<  3,	/* params2 */	MTHCA_QP_BIT_RRE = 1 << 15,	MTHCA_QP_BIT_RWE = 1 << 14,	MTHCA_QP_BIT_RAE = 1 << 13,	MTHCA_QP_BIT_RIC = 1 <<  4,	MTHCA_QP_BIT_RSC = 1 <<  3};enum {	MTHCA_SEND_DOORBELL_FENCE = 1 << 5};struct mthca_qp_path {	__be32 port_pkey;	u8     rnr_retry;	u8     g_mylmc;	__be16 rlid;	u8     ackto;	u8     mgid_index;	u8     static_rate;	u8     hop_limit;	__be32 sl_tclass_flowlabel;	u8     rgid[16];} __attribute__((packed));struct mthca_qp_context {	__be32 flags;	__be32 tavor_sched_queue; /* Reserved on Arbel */	u8     mtu_msgmax;	u8     rq_size_stride;	/* Reserved on Tavor */	u8     sq_size_stride;	/* Reserved on Tavor */	u8     rlkey_arbel_sched_queue;	/* Reserved on Tavor */	__be32 usr_page;	__be32 local_qpn;	__be32 remote_qpn;	u32    reserved1[2];	struct mthca_qp_path pri_path;	struct mthca_qp_path alt_path;	__be32 rdd;	__be32 pd;	__be32 wqe_base;	__be32 wqe_lkey;	__be32 params1;	__be32 reserved2;	__be32 next_send_psn;	__be32 cqn_snd;	__be32 snd_wqe_base_l;	/* Next send WQE on Tavor */	__be32 snd_db_index;	/* (debugging only entries) */	__be32 last_acked_psn;	__be32 ssn;	__be32 params2;	__be32 rnr_nextrecvpsn;	__be32 ra_buff_indx;	__be32 cqn_rcv;	__be32 rcv_wqe_base_l;	/* Next recv WQE on Tavor */	__be32 rcv_db_index;	/* (debugging only entries) */	__be32 qkey;	__be32 srqn;	__be32 rmsn;	__be16 rq_wqe_counter;	/* reserved on Tavor */	__be16 sq_wqe_counter;	/* reserved on Tavor */	u32    reserved3[18];} __attribute__((packed));struct mthca_qp_param {	__be32 opt_param_mask;	u32    reserved1;	struct mthca_qp_context context;	u32    reserved2[62];} __attribute__((packed));enum {	MTHCA_QP_OPTPAR_ALT_ADDR_PATH     = 1 << 0,	MTHCA_QP_OPTPAR_RRE               = 1 << 1,	MTHCA_QP_OPTPAR_RAE               = 1 << 2,	MTHCA_QP_OPTPAR_RWE               = 1 << 3,	MTHCA_QP_OPTPAR_PKEY_INDEX        = 1 << 4,	MTHCA_QP_OPTPAR_Q_KEY             = 1 << 5,	MTHCA_QP_OPTPAR_RNR_TIMEOUT       = 1 << 6,	MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,	MTHCA_QP_OPTPAR_SRA_MAX           = 1 << 8,	MTHCA_QP_OPTPAR_RRA_MAX           = 1 << 9,	MTHCA_QP_OPTPAR_PM_STATE          = 1 << 10,	MTHCA_QP_OPTPAR_PORT_NUM          = 1 << 11,	MTHCA_QP_OPTPAR_RETRY_COUNT       = 1 << 12,	MTHCA_QP_OPTPAR_ALT_RNR_RETRY     = 1 << 13,	MTHCA_QP_OPTPAR_ACK_TIMEOUT       = 1 << 14,	MTHCA_QP_OPTPAR_RNR_RETRY         = 1 << 15,	MTHCA_QP_OPTPAR_SCHED_QUEUE       = 1 << 16};static const u8 mthca_opcode[] = {	[IB_WR_SEND]                 = MTHCA_OPCODE_SEND,	[IB_WR_SEND_WITH_IMM]        = MTHCA_OPCODE_SEND_IMM,	[IB_WR_RDMA_WRITE]           = MTHCA_OPCODE_RDMA_WRITE,	[IB_WR_RDMA_WRITE_WITH_IMM]  = MTHCA_OPCODE_RDMA_WRITE_IMM,	[IB_WR_RDMA_READ]            = MTHCA_OPCODE_RDMA_READ,	[IB_WR_ATOMIC_CMP_AND_SWP]   = MTHCA_OPCODE_ATOMIC_CS,	[IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,};static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp){	return qp->qpn >= dev->qp_table.sqp_start &&		qp->qpn <= dev->qp_table.sqp_start + 3;}static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp){	return qp->qpn >= dev->qp_table.sqp_start &&		qp->qpn <= dev->qp_table.sqp_start + 1;}static void *get_recv_wqe(struct mthca_qp *qp, int n){	if (qp->is_direct)		return qp->queue.direct.buf + (n << qp->rq.wqe_shift);	else		return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +			((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));}static void *get_send_wqe(struct mthca_qp *qp, int n){	if (qp->is_direct)		return qp->queue.direct.buf + qp->send_wqe_offset +			(n << qp->sq.wqe_shift);	else		return qp->queue.page_list[(qp->send_wqe_offset +					    (n << qp->sq.wqe_shift)) >>					   PAGE_SHIFT].buf +			((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &			 (PAGE_SIZE - 1));}static void mthca_wq_reset(struct mthca_wq *wq){	wq->next_ind  = 0;	wq->last_comp = wq->max - 1;	wq->head      = 0;	wq->tail      = 0;}void mthca_qp_event(struct mthca_dev *dev, u32 qpn,		    enum ib_event_type event_type){	struct mthca_qp *qp;	struct ib_event event;	spin_lock(&dev->qp_table.lock);	qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));	if (qp)		++qp->refcount;	spin_unlock(&dev->qp_table.lock);	if (!qp) {		mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);		return;	}	if (event_type == IB_EVENT_PATH_MIG)		qp->port = qp->alt_port;	event.device      = &dev->ib_dev;	event.event       = event_type;	event.element.qp  = &qp->ibqp;	if (qp->ibqp.event_handler)		qp->ibqp.event_handler(&event, qp->ibqp.qp_context);	spin_lock(&dev->qp_table.lock);	if (!--qp->refcount)		wake_up(&qp->wait);	spin_unlock(&dev->qp_table.lock);}static int to_mthca_state(enum ib_qp_state ib_state){	switch (ib_state) {	case IB_QPS_RESET: return MTHCA_QP_STATE_RST;	case IB_QPS_INIT:  return MTHCA_QP_STATE_INIT;	case IB_QPS_RTR:   return MTHCA_QP_STATE_RTR;	case IB_QPS_RTS:   return MTHCA_QP_STATE_RTS;	case IB_QPS_SQD:   return MTHCA_QP_STATE_SQD;	case IB_QPS_SQE:   return MTHCA_QP_STATE_SQE;	case IB_QPS_ERR:   return MTHCA_QP_STATE_ERR;	default:                return -1;	}}enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };static int to_mthca_st(int transport){	switch (transport) {	case RC:  return MTHCA_QP_ST_RC;	case UC:  return MTHCA_QP_ST_UC;	case UD:  return MTHCA_QP_ST_UD;	case RD:  return MTHCA_QP_ST_RD;	case MLX: return MTHCA_QP_ST_MLX;	default:  return -1;	}}static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,			int attr_mask){	if (attr_mask & IB_QP_PKEY_INDEX)		sqp->pkey_index = attr->pkey_index;	if (attr_mask & IB_QP_QKEY)		sqp->qkey = attr->qkey;	if (attr_mask & IB_QP_SQ_PSN)		sqp->send_psn = attr->sq_psn;}static void init_port(struct mthca_dev *dev, int port){	int err;	u8 status;	struct mthca_init_ib_param param;	memset(&param, 0, sizeof param);	param.port_width = dev->limits.port_width_cap;	param.vl_cap     = dev->limits.vl_cap;	param.mtu_cap    = dev->limits.mtu_cap;	param.gid_cap    = dev->limits.gid_table_len;	param.pkey_cap   = dev->limits.pkey_table_len;	err = mthca_INIT_IB(dev, &param, port, &status);	if (err)		mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);	if (status)		mthca_warn(dev, "INIT_IB returned status %02x.\n", status);}static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,				  int attr_mask){	u8 dest_rd_atomic;	u32 access_flags;	u32 hw_access_flags = 0;	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)		dest_rd_atomic = attr->max_dest_rd_atomic;	else		dest_rd_atomic = qp->resp_depth;	if (attr_mask & IB_QP_ACCESS_FLAGS)		access_flags = attr->qp_access_flags;	else		access_flags = qp->atomic_rd_en;	if (!dest_rd_atomic)		access_flags &= IB_ACCESS_REMOTE_WRITE;	if (access_flags & IB_ACCESS_REMOTE_READ)		hw_access_flags |= MTHCA_QP_BIT_RRE;	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)		hw_access_flags |= MTHCA_QP_BIT_RAE;	if (access_flags & IB_ACCESS_REMOTE_WRITE)		hw_access_flags |= MTHCA_QP_BIT_RWE;	return cpu_to_be32(hw_access_flags);}static inline enum ib_qp_state to_ib_qp_state(int mthca_state){	switch (mthca_state) {	case MTHCA_QP_STATE_RST:      return IB_QPS_RESET;	case MTHCA_QP_STATE_INIT:     return IB_QPS_INIT;	case MTHCA_QP_STATE_RTR:      return IB_QPS_RTR;	case MTHCA_QP_STATE_RTS:      return IB_QPS_RTS;	case MTHCA_QP_STATE_DRAINING:	case MTHCA_QP_STATE_SQD:      return IB_QPS_SQD;	case MTHCA_QP_STATE_SQE:      return IB_QPS_SQE;	case MTHCA_QP_STATE_ERR:      return IB_QPS_ERR;	default:                      return -1;	}}static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state){	switch (mthca_mig_state) {	case 0:  return IB_MIG_ARMED;	case 1:  return IB_MIG_REARM;	case 3:  return IB_MIG_MIGRATED;	default: return -1;	}}static int to_ib_qp_access_flags(int mthca_flags){	int ib_flags = 0;	if (mthca_flags & MTHCA_QP_BIT_RRE)		ib_flags |= IB_ACCESS_REMOTE_READ;	if (mthca_flags & MTHCA_QP_BIT_RWE)		ib_flags |= IB_ACCESS_REMOTE_WRITE;	if (mthca_flags & MTHCA_QP_BIT_RAE)		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;	return ib_flags;}static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,				struct mthca_qp_path *path){	memset(ib_ah_attr, 0, sizeof *ib_ah_attr);	ib_ah_attr->port_num 	  = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;	if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)		return;	ib_ah_attr->dlid     	  = be16_to_cpu(path->rlid);	ib_ah_attr->sl       	  = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;	ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;	ib_ah_attr->static_rate   = mthca_rate_to_ib(dev,						     path->static_rate & 0xf,						     ib_ah_attr->port_num);	ib_ah_attr->ah_flags      = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;	if (ib_ah_attr->ah_flags) {		ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);		ib_ah_attr->grh.hop_limit  = path->hop_limit;		ib_ah_attr->grh.traffic_class =			(be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;		ib_ah_attr->grh.flow_label =			be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;		memcpy(ib_ah_attr->grh.dgid.raw,			path->rgid, sizeof ib_ah_attr->grh.dgid.raw);	}}int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,		   struct ib_qp_init_attr *qp_init_attr){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	int err = 0;	struct mthca_mailbox *mailbox = NULL;	struct mthca_qp_param *qp_param;	struct mthca_qp_context *context;	int mthca_state;	u8 status;	if (qp->state == IB_QPS_RESET) {		qp_attr->qp_state = IB_QPS_RESET;		goto done;	}	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);	if (IS_ERR(mailbox))		return PTR_ERR(mailbox);	err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);	if (err)		goto out;	if (status) {		mthca_warn(dev, "QUERY_QP returned status %02x\n", status);		err = -EINVAL;		goto out;	}	qp_param    = mailbox->buf;	context     = &qp_param->context;	mthca_state = be32_to_cpu(context->flags) >> 28;	qp_attr->qp_state 	     = to_ib_qp_state(mthca_state);	qp_attr->path_mtu 	     = context->mtu_msgmax >> 5;	qp_attr->path_mig_state      =		to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);	qp_attr->qkey 		     = be32_to_cpu(context->qkey);	qp_attr->rq_psn 	     = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;	qp_attr->sq_psn 	     = be32_to_cpu(context->next_send_psn) & 0xffffff;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -