eq.c

来自「linux 内核源代码」· C语言 代码 · 共 657 行 · 第 1/2 页

C
657
字号
/* * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *	- Redistributions of source code must retain the above *	  copyright notice, this list of conditions and the following *	  disclaimer. * *	- Redistributions in binary form must reproduce the above *	  copyright notice, this list of conditions and the following *	  disclaimer in the documentation and/or other materials *	  provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/init.h>#include <linux/interrupt.h>#include <linux/dma-mapping.h>#include <linux/mlx4/cmd.h>#include "mlx4.h"#include "fw.h"enum {	MLX4_NUM_ASYNC_EQE	= 0x100,	MLX4_NUM_SPARE_EQE	= 0x80,	MLX4_EQ_ENTRY_SIZE	= 0x20};/* * Must be packed because start is 64 bits but only aligned to 32 bits. */struct mlx4_eq_context {	__be32			flags;	u16			reserved1[3];	__be16			page_offset;	u8			log_eq_size;	u8			reserved2[4];	u8			eq_period;	u8			reserved3;	u8			eq_max_count;	u8			reserved4[3];	u8			intr;	u8			log_page_size;	u8			reserved5[2];	u8			mtt_base_addr_h;	__be32			mtt_base_addr_l;	u32			reserved6[2];	__be32			consumer_index;	__be32			producer_index;	u32			reserved7[4];};#define MLX4_EQ_STATUS_OK	   ( 0 << 28)#define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)#define MLX4_EQ_OWNER_SW	   ( 0 << 24)#define MLX4_EQ_OWNER_HW	   ( 1 << 24)#define MLX4_EQ_FLAG_EC		   ( 1 << 18)#define MLX4_EQ_FLAG_OI		   ( 1 << 17)#define MLX4_EQ_STATE_ARMED	   ( 9 <<  8)#define MLX4_EQ_STATE_FIRED	   (10 <<  8)#define MLX4_EQ_STATE_ALWAYS_ARMED (11 <<  8)#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG)	    | \			       (1ull << MLX4_EVENT_TYPE_COMM_EST)	    | \			       (1ull << MLX4_EVENT_TYPE_SQ_DRAINED)	    | \			       (1ull << MLX4_EVENT_TYPE_CQ_ERROR)	    | \			       (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR)	    | \			       (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR)    | \			       (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED)    | \			       (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \			       (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR)    | \			       (1ull << MLX4_EVENT_TYPE_PORT_CHANGE)	    | \			       (1ull << MLX4_EVENT_TYPE_ECC_DETECT)	    | \			       (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \			       (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \			       (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)	    | \			       (1ull << MLX4_EVENT_TYPE_CMD))struct mlx4_eqe {	u8			reserved1;	u8			type;	u8			reserved2;	u8			subtype;	union {		u32		raw[6];		struct {			__be32	cqn;		} __attribute__((packed)) comp;		struct {			u16	reserved1;			__be16	token;			u32	reserved2;			u8	reserved3[3];			u8	status;			__be64	out_param;		} __attribute__((packed)) cmd;		struct {			__be32	qpn;		} __attribute__((packed)) qp;		struct {			__be32	srqn;		} __attribute__((packed)) srq;		struct {			__be32	cqn;			u32	reserved1;			u8	reserved2[3];			u8	syndrome;		} __attribute__((packed)) cq_err;		struct {			u32	reserved1[2];			__be32	port;		} __attribute__((packed)) port_change;	}			event;	u8			reserved3[3];	u8			owner;} __attribute__((packed));static void eq_set_ci(struct mlx4_eq *eq, int req_not){	__raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |					       req_not << 31),		     eq->doorbell);	/* We still want ordering, just not swabbing, so add a barrier */	mb();}static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry){	unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;	return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;}static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq){	struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);	return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;}static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq){	struct mlx4_eqe *eqe;	int cqn;	int eqes_found = 0;	int set_ci = 0;	while ((eqe = next_eqe_sw(eq))) {		/*		 * Make sure we read EQ entry contents after we've		 * checked the ownership bit.		 */		rmb();		switch (eqe->type) {		case MLX4_EVENT_TYPE_COMP:			cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;			mlx4_cq_completion(dev, cqn);			break;		case MLX4_EVENT_TYPE_PATH_MIG:		case MLX4_EVENT_TYPE_COMM_EST:		case MLX4_EVENT_TYPE_SQ_DRAINED:		case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:		case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:		case MLX4_EVENT_TYPE_PATH_MIG_FAILED:		case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:		case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:			mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,				      eqe->type);			break;		case MLX4_EVENT_TYPE_SRQ_LIMIT:		case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:			mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,				      eqe->type);			break;		case MLX4_EVENT_TYPE_CMD:			mlx4_cmd_event(dev,				       be16_to_cpu(eqe->event.cmd.token),				       eqe->event.cmd.status,				       be64_to_cpu(eqe->event.cmd.out_param));			break;		case MLX4_EVENT_TYPE_PORT_CHANGE:			mlx4_dispatch_event(dev, eqe->type, eqe->subtype,					    be32_to_cpu(eqe->event.port_change.port) >> 28);			break;		case MLX4_EVENT_TYPE_CQ_ERROR:			mlx4_warn(dev, "CQ %s on CQN %06x\n",				  eqe->event.cq_err.syndrome == 1 ?				  "overrun" : "access violation",				  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);			mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),				      eqe->type);			break;		case MLX4_EVENT_TYPE_EQ_OVERFLOW:			mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);			break;		case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:		case MLX4_EVENT_TYPE_ECC_DETECT:		default:			mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",				  eqe->type, eqe->subtype, eq->eqn, eq->cons_index);			break;		};		++eq->cons_index;		eqes_found = 1;		++set_ci;		/*		 * The HCA will think the queue has overflowed if we		 * don't tell it we've been processing events.  We		 * create our EQs with MLX4_NUM_SPARE_EQE extra		 * entries, so we must update our consumer index at		 * least that often.		 */		if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {			/*			 * Conditional on hca_type is OK here because			 * this is a rare case, not the fast path.			 */			eq_set_ci(eq, 0);			set_ci = 0;		}	}	eq_set_ci(eq, 1);	return eqes_found;}static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr){	struct mlx4_dev *dev = dev_ptr;	struct mlx4_priv *priv = mlx4_priv(dev);	int work = 0;	int i;	writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);	for (i = 0; i < MLX4_NUM_EQ; ++i)		work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);	return IRQ_RETVAL(work);}static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr){	struct mlx4_eq  *eq  = eq_ptr;	struct mlx4_dev *dev = eq->dev;	mlx4_eq_int(dev, eq);	/* MSI-X vectors always belong to us */	return IRQ_HANDLED;}static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,			int eq_num){	return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,			0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);}static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,			 int eq_num){	return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,			MLX4_CMD_TIME_CLASS_A);}static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,			 int eq_num){	return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,			    MLX4_CMD_TIME_CLASS_A);}static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq){	struct mlx4_priv *priv = mlx4_priv(dev);	int index;	index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;	if (!priv->eq_table.uar_map[index]) {		priv->eq_table.uar_map[index] =			ioremap(pci_resource_start(dev->pdev, 2) +				((eq->eqn / 4) << PAGE_SHIFT),				PAGE_SIZE);		if (!priv->eq_table.uar_map[index]) {			mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",				 eq->eqn);			return NULL;		}	}	return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);}static int mlx4_create_eq(struct mlx4_dev *dev, int nent,			  u8 intr, struct mlx4_eq *eq){	struct mlx4_priv *priv = mlx4_priv(dev);	struct mlx4_cmd_mailbox *mailbox;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?