ipath_intr.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 855 行 · 第 1/2 页

C
855
字号
/* * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/pci.h>#include "ipath_kernel.h"#include "ips_common.h"#include "ipath_layer.h"#define E_SUM_PKTERRS \	(INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \	 INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \	 INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \	 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \	 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \	 INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)#define E_SUM_ERRS \	(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \	 INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \	 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \	 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \	 INFINIPATH_E_INVALIDADDR)static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs){	unsigned long sbuf[4];	u64 ignore_this_time = 0;	u32 piobcnt;	/* if possible that sendbuffererror could be valid */	piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;	/* read these before writing errorclear */	sbuf[0] = ipath_read_kreg64(		dd, dd->ipath_kregs->kr_sendbuffererror);	sbuf[1] = ipath_read_kreg64(		dd, dd->ipath_kregs->kr_sendbuffererror + 1);	if (piobcnt > 128) {		sbuf[2] = ipath_read_kreg64(			dd, dd->ipath_kregs->kr_sendbuffererror + 2);		sbuf[3] = ipath_read_kreg64(			dd, dd->ipath_kregs->kr_sendbuffererror + 3);	}	if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {		int i;		ipath_cdbg(PKT, "SendbufErrs %lx %lx ", sbuf[0], sbuf[1]);		if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)			printk("%lx %lx ", sbuf[2], sbuf[3]);		for (i = 0; i < piobcnt; i++) {			if (test_bit(i, sbuf)) {				u32 __iomem *piobuf;				if (i < dd->ipath_piobcnt2k)					piobuf = (u32 __iomem *)						(dd->ipath_pio2kbase +						 i * dd->ipath_palign);				else					piobuf = (u32 __iomem *)						(dd->ipath_pio4kbase +						 (i - dd->ipath_piobcnt2k) *						 dd->ipath_4kalign);				ipath_cdbg(PKT,					   "PIObuf[%u] @%p pbc is %x; ",					   i, piobuf, readl(piobuf));				ipath_disarm_piobufs(dd, i, 1);			}		}		if (ipath_debug & __IPATH_PKTDBG)			printk("\n");	}	if ((errs & (INFINIPATH_E_SDROPPEDDATAPKT |		     INFINIPATH_E_SDROPPEDSMPPKT |		     INFINIPATH_E_SMINPKTLEN)) &&	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {		/*		 * This can happen when SMA is trying to bring the link		 * up, but the IB link changes state at the "wrong" time.		 * The IB logic then complains that the packet isn't		 * valid.  We don't want to confuse people, so we just		 * don't print them, except at debug		 */		ipath_dbg("Ignoring pktsend errors %llx, because not "			  "yet active\n", (unsigned long long) errs);		ignore_this_time = INFINIPATH_E_SDROPPEDDATAPKT |			INFINIPATH_E_SDROPPEDSMPPKT |			INFINIPATH_E_SMINPKTLEN;	}	return ignore_this_time;}/* return the strings for the most common link states */static char *ib_linkstate(u32 linkstate){	char *ret;	switch (linkstate) {	case IPATH_IBSTATE_INIT:		ret = "Init";		break;	case IPATH_IBSTATE_ARM:		ret = "Arm";		break;	case IPATH_IBSTATE_ACTIVE:		ret = "Active";		break;	default:		ret = "Down";	}	return ret;}static void handle_e_ibstatuschanged(struct ipath_devdata *dd,				     ipath_err_t errs, int noprint){	u64 val;	u32 ltstate, lstate;	/*	 * even if diags are enabled, we want to notice LINKINIT, etc.	 * We just don't want to change the LED state, or	 * dd->ipath_kregs->kr_ibcctrl	 */	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);	lstate = val & IPATH_IBSTATE_MASK;	if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||	    lstate == IPATH_IBSTATE_ACTIVE) {		/*		 * only print at SMA if there is a change, debug if not		 * (sometimes we want to know that, usually not).		 */		if (lstate == ((unsigned) dd->ipath_lastibcstat			       & IPATH_IBSTATE_MASK)) {			ipath_dbg("Status change intr but no change (%s)\n",				  ib_linkstate(lstate));		}		else			ipath_cdbg(SMA, "Unit %u link state %s, last "				   "was %s\n", dd->ipath_unit,				   ib_linkstate(lstate),				   ib_linkstate((unsigned)						dd->ipath_lastibcstat						& IPATH_IBSTATE_MASK));	}	else {		lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;		if (lstate == IPATH_IBSTATE_INIT ||		    lstate == IPATH_IBSTATE_ARM ||		    lstate == IPATH_IBSTATE_ACTIVE)			ipath_cdbg(SMA, "Unit %u link state down"				   " (state 0x%x), from %s\n",				   dd->ipath_unit,				   (u32)val & IPATH_IBSTATE_MASK,				   ib_linkstate(lstate));		else			ipath_cdbg(VERBOSE, "Unit %u link state changed "				   "to 0x%x from down (%x)\n",				   dd->ipath_unit, (u32) val, lstate);	}	ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &		INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;	lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &		INFINIPATH_IBCS_LINKSTATE_MASK;	if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||	    ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {		u32 last_ltstate;		/*		 * Ignore cycling back and forth from Polling.Active		 * to Polling.Quiet while waiting for the other end of		 * the link to come up. We will cycle back and forth		 * between them if no cable is plugged in,		 * the other device is powered off or disabled, etc.		 */		last_ltstate = (dd->ipath_lastibcstat >>				INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT)			& INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;		if (last_ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE		    || last_ltstate ==		    INFINIPATH_IBCS_LT_STATE_POLLQUIET) {			if (dd->ipath_ibpollcnt > 40) {				dd->ipath_flags |= IPATH_NOCABLE;				*dd->ipath_statusp |=					IPATH_STATUS_IB_NOCABLE;			} else				dd->ipath_ibpollcnt++;			goto skip_ibchange;		}	}	dd->ipath_ibpollcnt = 0;	/* some state other than 2 or 3 */	ipath_stats.sps_iblink++;	if (ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {		dd->ipath_flags |= IPATH_LINKDOWN;		dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT				     | IPATH_LINKACTIVE |				     IPATH_LINKARMED);		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;		if (!noprint) {			if (((dd->ipath_lastibcstat >>			      INFINIPATH_IBCS_LINKSTATE_SHIFT) &			     INFINIPATH_IBCS_LINKSTATE_MASK)			    == INFINIPATH_IBCS_L_STATE_ACTIVE)				/* if from up to down be more vocal */				ipath_cdbg(SMA,					   "Unit %u link now down (%s)\n",					   dd->ipath_unit,					   ipath_ibcstatus_str[ltstate]);			else				ipath_cdbg(VERBOSE, "Unit %u link is "					   "down (%s)\n", dd->ipath_unit,					   ipath_ibcstatus_str[ltstate]);		}		dd->ipath_f_setextled(dd, lstate, ltstate);	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ACTIVE) {		dd->ipath_flags |= IPATH_LINKACTIVE;		dd->ipath_flags &=			~(IPATH_LINKUNK | IPATH_LINKINIT | IPATH_LINKDOWN |			  IPATH_LINKARMED | IPATH_NOCABLE);		*dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;		*dd->ipath_statusp |=			IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;		dd->ipath_f_setextled(dd, lstate, ltstate);		__ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP);	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {		/*		 * set INIT and DOWN.  Down is checked by most of the other		 * code, but INIT is useful to know in a few places.		 */		dd->ipath_flags |= IPATH_LINKINIT | IPATH_LINKDOWN;		dd->ipath_flags &=			~(IPATH_LINKUNK | IPATH_LINKACTIVE | IPATH_LINKARMED			  | IPATH_NOCABLE);		*dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE					| IPATH_STATUS_IB_READY);		dd->ipath_f_setextled(dd, lstate, ltstate);	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ARM) {		dd->ipath_flags |= IPATH_LINKARMED;		dd->ipath_flags &=			~(IPATH_LINKUNK | IPATH_LINKDOWN | IPATH_LINKINIT |			  IPATH_LINKACTIVE | IPATH_NOCABLE);		*dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE					| IPATH_STATUS_IB_READY);		dd->ipath_f_setextled(dd, lstate, ltstate);	} else {		if (!noprint)			ipath_dbg("IBstatuschange unit %u: %s (%x)\n",				  dd->ipath_unit,				  ipath_ibcstatus_str[ltstate], ltstate);	}skip_ibchange:	dd->ipath_lastibcstat = val;}static void handle_supp_msgs(struct ipath_devdata *dd,			     unsigned supp_msgs, char msg[512]){	/*	 * Print the message unless it's ibc status change only, which	 * happens so often we never want to count it.	 */	if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {		ipath_decode_err(msg, sizeof msg, dd->ipath_lasterror &				 ~INFINIPATH_E_IBSTATUSCHANGED);		if (dd->ipath_lasterror &		    ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))			ipath_dev_err(dd, "Suppressed %u messages for "				      "fast-repeating errors (%s) (%llx)\n",				      supp_msgs, msg,				      (unsigned long long)				      dd->ipath_lasterror);		else {			/*			 * rcvegrfull and rcvhdrqfull are "normal", for some			 * types of processes (mostly benchmarks) that send			 * huge numbers of messages, while not processing			 * them. So only complain about these at debug			 * level.			 */			ipath_dbg("Suppressed %u messages for %s\n",				  supp_msgs, msg);		}	}}static unsigned handle_frequent_errors(struct ipath_devdata *dd,				       ipath_err_t errs, char msg[512],				       int *noprint){	unsigned long nc;	static unsigned long nextmsg_time;	static unsigned nmsgs, supp_msgs;	/*	 * Throttle back "fast" messages to no more than 10 per 5 seconds.	 * This isn't perfect, but it's a reasonable heuristic. If we get	 * more than 10, give a 6x longer delay.	 */	nc = jiffies;	if (nmsgs > 10) {		if (time_before(nc, nextmsg_time)) {			*noprint = 1;			if (!supp_msgs++)				nextmsg_time = nc + HZ * 3;		}		else if (supp_msgs) {			handle_supp_msgs(dd, supp_msgs, msg);			supp_msgs = 0;			nmsgs = 0;		}	}	else if (!nmsgs++ || time_after(nc, nextmsg_time))		nextmsg_time = nc + HZ / 2;	return supp_msgs;}static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs){	char msg[512];	u64 ignore_this_time = 0;	int i;	int chkerrpkts = 0, noprint = 0;	unsigned supp_msgs;	supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint);	/*	 * don't report errors that are masked (includes those always	 * ignored)	 */	errs &= ~dd->ipath_maskederrs;	/* do these first, they are most important */	if (errs & INFINIPATH_E_HARDWARE) {		/* reuse same msg buf */		dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);	}	if (!noprint && (errs & ~infinipath_e_bitsextant))		ipath_dev_err(dd, "error interrupt with unknown errors "			      "%llx set\n", (unsigned long long)			      (errs & ~infinipath_e_bitsextant));	if (errs & E_SUM_ERRS)		ignore_this_time = handle_e_sum_errs(dd, errs);	if (supp_msgs == 250000) {		/*		 * It's not entirely reasonable assuming that the errors set		 * in the last clear period are all responsible for the		 * problem, but the alternative is to assume it's the only		 * ones on this particular interrupt, which also isn't great		 */		dd->ipath_maskederrs |= dd->ipath_lasterror | errs;		ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,				 ~dd->ipath_maskederrs);		ipath_decode_err(msg, sizeof msg,				 (dd->ipath_maskederrs & ~dd->				  ipath_ignorederrs));		if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &		    ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))			ipath_dev_err(dd, "Disabling error(s) %llx because "				      "occuring too frequently (%s)\n",				      (unsigned long long)				      (dd->ipath_maskederrs &				       ~dd->ipath_ignorederrs), msg);		else {			/*			 * rcvegrfull and rcvhdrqfull are "normal",			 * for some types of processes (mostly benchmarks)			 * that send huge numbers of messages, while not			 * processing them.  So only complain about			 * these at debug level.			 */			ipath_dbg("Disabling frequent queue full errors "				  "(%s)\n", msg);		}		/*		 * Re-enable the masked errors after around 3 minutes.  in		 * ipath_get_faststats().  If we have a series of fast		 * repeating but different errors, the interval will keep		 * stretching out, but that's OK, as that's pretty		 * catastrophic.		 */		dd->ipath_unmasktime = jiffies + HZ * 180;	}	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);	if (ignore_this_time)		errs &= ~ignore_this_time;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?