⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_eq.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $ */#include <linux/init.h>#include <linux/errno.h>#include <linux/interrupt.h>#include <linux/pci.h>#include "mthca_dev.h"#include "mthca_cmd.h"#include "mthca_config_reg.h"enum {	MTHCA_NUM_ASYNC_EQE = 0x80,	MTHCA_NUM_CMD_EQE   = 0x80,	MTHCA_EQ_ENTRY_SIZE = 0x20};/* * Must be packed because start is 64 bits but only aligned to 32 bits. */struct mthca_eq_context {	__be32 flags;	__be64 start;	__be32 logsize_usrpage;	__be32 tavor_pd;	/* reserved for Arbel */	u8     reserved1[3];	u8     intr;	__be32 arbel_pd;	/* lost_count for Tavor */	__be32 lkey;	u32    reserved2[2];	__be32 consumer_index;	__be32 producer_index;	u32    reserved3[4];} __attribute__((packed));#define MTHCA_EQ_STATUS_OK          ( 0 << 28)#define MTHCA_EQ_STATUS_OVERFLOW    ( 9 << 28)#define MTHCA_EQ_STATUS_WRITE_FAIL  (10 << 28)#define MTHCA_EQ_OWNER_SW           ( 0 << 24)#define MTHCA_EQ_OWNER_HW           ( 1 << 24)#define MTHCA_EQ_FLAG_TR            ( 1 << 18)#define MTHCA_EQ_FLAG_OI            ( 1 << 17)#define MTHCA_EQ_STATE_ARMED        ( 1 <<  8)#define MTHCA_EQ_STATE_FIRED        ( 2 <<  8)#define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 <<  8)#define MTHCA_EQ_STATE_ARBEL        ( 8 <<  8)enum {	MTHCA_EVENT_TYPE_COMP       	    = 0x00,	MTHCA_EVENT_TYPE_PATH_MIG   	    = 0x01,	MTHCA_EVENT_TYPE_COMM_EST   	    = 0x02,	MTHCA_EVENT_TYPE_SQ_DRAINED 	    = 0x03,	MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE    = 0x13,	MTHCA_EVENT_TYPE_SRQ_LIMIT	    = 0x14,	MTHCA_EVENT_TYPE_CQ_ERROR   	    = 0x04,	MTHCA_EVENT_TYPE_WQ_CATAS_ERROR     = 0x05,	MTHCA_EVENT_TYPE_EEC_CATAS_ERROR    = 0x06,	MTHCA_EVENT_TYPE_PATH_MIG_FAILED    = 0x07,	MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,	MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR    = 0x11,	MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR    = 0x12,	MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08,	MTHCA_EVENT_TYPE_PORT_CHANGE        = 0x09,	MTHCA_EVENT_TYPE_EQ_OVERFLOW        = 0x0f,	MTHCA_EVENT_TYPE_ECC_DETECT         = 0x0e,	MTHCA_EVENT_TYPE_CMD                = 0x0a};#define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG)           | \				(1ULL << MTHCA_EVENT_TYPE_COMM_EST)           | \				(1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED)         | \				(1ULL << MTHCA_EVENT_TYPE_CQ_ERROR)           | \				(1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR)     | \				(1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR)    | \				(1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED)    | \				(1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \				(1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR)    | \				(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR)  | \				(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE)        | \				(1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))#define MTHCA_SRQ_EVENT_MASK   ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR)    | \				(1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \				(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))#define MTHCA_CMD_EVENT_MASK    (1ULL << MTHCA_EVENT_TYPE_CMD)#define MTHCA_EQ_DB_INC_CI     (1 << 24)#define MTHCA_EQ_DB_REQ_NOT    (2 << 24)#define MTHCA_EQ_DB_DISARM_CQ  (3 << 24)#define MTHCA_EQ_DB_SET_CI     (4 << 24)#define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)struct mthca_eqe {	u8 reserved1;	u8 type;	u8 reserved2;	u8 subtype;	union {		u32 raw[6];		struct {			__be32 cqn;		} __attribute__((packed)) comp;		struct {			u16    reserved1;			__be16 token;			u32    reserved2;			u8     reserved3[3];			u8     status;			__be64 out_param;		} __attribute__((packed)) cmd;		struct {			__be32 qpn;		} __attribute__((packed)) qp;		struct {			__be32 srqn;		} __attribute__((packed)) srq;		struct {			__be32 cqn;			u32    reserved1;			u8     reserved2[3];			u8     syndrome;		} __attribute__((packed)) cq_err;		struct {			u32    reserved1[2];			__be32 port;		} __attribute__((packed)) port_change;	} event;	u8 reserved3[3];	u8 owner;} __attribute__((packed));#define  MTHCA_EQ_ENTRY_OWNER_SW      (0 << 7)#define  MTHCA_EQ_ENTRY_OWNER_HW      (1 << 7)static inline u64 async_mask(struct mthca_dev *dev){	return dev->mthca_flags & MTHCA_FLAG_SRQ ?		MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :		MTHCA_ASYNC_EVENT_MASK;}static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci){	__be32 doorbell[2];	doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn);	doorbell[1] = cpu_to_be32(ci & (eq->nent - 1));	/*	 * This barrier makes sure that all updates to ownership bits	 * done by set_eqe_hw() hit memory before the consumer index	 * is updated.  set_eq_ci() allows the HCA to possibly write	 * more EQ entries, and we want to avoid the exceedingly	 * unlikely possibility of the HCA writing an entry and then	 * having set_eqe_hw() overwrite the owner field.	 */	wmb();	mthca_write64(doorbell,		      dev->kar + MTHCA_EQ_DOORBELL,		      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));}static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci){	/* See comment in tavor_set_eq_ci() above. */	wmb();	__raw_writel((__force u32) cpu_to_be32(ci),		     dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);	/* We still want ordering, just not swabbing, so add a barrier */	mb();}static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci){	if (mthca_is_memfree(dev))		arbel_set_eq_ci(dev, eq, ci);	else		tavor_set_eq_ci(dev, eq, ci);}static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn){	__be32 doorbell[2];	doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn);	doorbell[1] = 0;	mthca_write64(doorbell,		      dev->kar + MTHCA_EQ_DOORBELL,		      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));}static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask){	writel(eqn_mask, dev->eq_regs.arbel.eq_arm);}static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn){	if (!mthca_is_memfree(dev)) {		__be32 doorbell[2];		doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn);		doorbell[1] = cpu_to_be32(cqn);		mthca_write64(doorbell,			      dev->kar + MTHCA_EQ_DOORBELL,			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));	}}static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry){	unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;	return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;}static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq){	struct mthca_eqe* eqe;	eqe = get_eqe(eq, eq->cons_index);	return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;}static inline void set_eqe_hw(struct mthca_eqe *eqe){	eqe->owner =  MTHCA_EQ_ENTRY_OWNER_HW;}static void port_change(struct mthca_dev *dev, int port, int active){	struct ib_event record;	mthca_dbg(dev, "Port change to %s for port %d\n",		  active ? "active" : "down", port);	record.device = &dev->ib_dev;	record.event  = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;	record.element.port_num = port;	ib_dispatch_event(&record);}static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq){	struct mthca_eqe *eqe;	int disarm_cqn;	int  eqes_found = 0;	while ((eqe = next_eqe_sw(eq))) {		int set_ci = 0;		/*		 * Make sure we read EQ entry contents after we've		 * checked the ownership bit.		 */		rmb();		switch (eqe->type) {		case MTHCA_EVENT_TYPE_COMP:			disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;			disarm_cq(dev, eq->eqn, disarm_cqn);			mthca_cq_completion(dev, disarm_cqn);			break;		case MTHCA_EVENT_TYPE_PATH_MIG:			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,				       IB_EVENT_PATH_MIG);			break;		case MTHCA_EVENT_TYPE_COMM_EST:			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,				       IB_EVENT_COMM_EST);			break;		case MTHCA_EVENT_TYPE_SQ_DRAINED:			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,				       IB_EVENT_SQ_DRAINED);			break;		case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,				       IB_EVENT_QP_LAST_WQE_REACHED);			break;		case MTHCA_EVENT_TYPE_SRQ_LIMIT:			mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,					IB_EVENT_SRQ_LIMIT_REACHED);			break;		case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,				       IB_EVENT_QP_FATAL);			break;		case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,				       IB_EVENT_PATH_MIG_ERR);			break;		case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,				       IB_EVENT_QP_REQ_ERR);			break;		case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:			mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,				       IB_EVENT_QP_ACCESS_ERR);			break;		case MTHCA_EVENT_TYPE_CMD:			mthca_cmd_event(dev,					be16_to_cpu(eqe->event.cmd.token),					eqe->event.cmd.status,					be64_to_cpu(eqe->event.cmd.out_param));			/*			 * cmd_event() may add more commands.			 * The card will think the queue has overflowed if			 * we don't tell it we've been processing events.			 */			set_ci = 1;			break;		case MTHCA_EVENT_TYPE_PORT_CHANGE:			port_change(dev,				    (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,				    eqe->subtype == 0x4);			break;		case MTHCA_EVENT_TYPE_CQ_ERROR:			mthca_warn(dev, "CQ %s on CQN %06x\n",				   eqe->event.cq_err.syndrome == 1 ?				   "overrun" : "access violation",				   be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);			mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),				       IB_EVENT_CQ_ERR);			break;		case MTHCA_EVENT_TYPE_EQ_OVERFLOW:			mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);			break;		case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:		case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:		case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:		case MTHCA_EVENT_TYPE_ECC_DETECT:		default:			mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",				   eqe->type, eqe->subtype, eq->eqn);			break;		};		set_eqe_hw(eqe);		++eq->cons_index;		eqes_found = 1;		if (unlikely(set_ci)) {			/*			 * Conditional on hca_type is OK here because			 * this is a rare case, not the fast path.			 */			set_eq_ci(dev, eq, eq->cons_index);			set_ci = 0;		}	}	/*	 * Rely on caller to set consumer index so that we don't have	 * to test hca_type in our interrupt handling fast path.	 */	return eqes_found;}static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr, struct pt_regs *regs){	struct mthca_dev *dev = dev_ptr;	u32 ecr;	int i;	if (dev->eq_table.clr_mask)		writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);	ecr = readl(dev->eq_regs.tavor.ecr_base + 4);	if (!ecr)		return IRQ_NONE;	writel(ecr, dev->eq_regs.tavor.ecr_base +	       MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);	for (i = 0; i < MTHCA_NUM_EQ; ++i)		if (ecr & dev->eq_table.eq[i].eqn_mask) {			if (mthca_eq_int(dev, &dev->eq_table.eq[i]))				tavor_set_eq_ci(dev, &dev->eq_table.eq[i],						dev->eq_table.eq[i].cons_index);			tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);		}	return IRQ_HANDLED;}static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr,					 struct pt_regs *regs){	struct mthca_eq  *eq  = eq_ptr;	struct mthca_dev *dev = eq->dev;	mthca_eq_int(dev, eq);	tavor_set_eq_ci(dev, eq, eq->cons_index);	tavor_eq_req_not(dev, eq->eqn);	/* MSI-X vectors always belong to us */	return IRQ_HANDLED;}static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr, struct pt_regs *regs){	struct mthca_dev *dev = dev_ptr;	int work = 0;	int i;	if (dev->eq_table.clr_mask)		writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);	for (i = 0; i < MTHCA_NUM_EQ; ++i)		if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {			work = 1;			arbel_set_eq_ci(dev, &dev->eq_table.eq[i],					dev->eq_table.eq[i].cons_index);		}	arbel_eq_req_not(dev, dev->eq_table.arm_mask);	return IRQ_RETVAL(work);}static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr,					       struct pt_regs *regs){	struct mthca_eq  *eq  = eq_ptr;	struct mthca_dev *dev = eq->dev;	mthca_eq_int(dev, eq);	arbel_set_eq_ci(dev, eq, eq->cons_index);	arbel_eq_req_not(dev, eq->eqn_mask);	/* MSI-X vectors always belong to us */	return IRQ_HANDLED;}static int __devinit mthca_create_eq(struct mthca_dev *dev,				     int nent,				     u8 intr,				     struct mthca_eq *eq){	int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -