ipath_intr.c

来自「linux 内核源代码」· C语言 代码 · 共 1,209 行 · 第 1/3 页

C
1,209
字号
			else				ipath_cdbg(VERBOSE, "Unit %u link is "					   "down (%s)\n", dd->ipath_unit,					   ipath_ibcstatus_str[ltstate]);		}		dd->ipath_f_setextled(dd, lstate, ltstate);	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ACTIVE) {		dd->ipath_flags |= IPATH_LINKACTIVE;		dd->ipath_flags &=			~(IPATH_LINKUNK | IPATH_LINKINIT | IPATH_LINKDOWN |			  IPATH_LINKARMED | IPATH_NOCABLE);		*dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;		*dd->ipath_statusp |=			IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;		dd->ipath_f_setextled(dd, lstate, ltstate);		signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {		if (dd->ipath_flags & IPATH_LINKACTIVE)			signal_ib_event(dd, IB_EVENT_PORT_ERR);		/*		 * set INIT and DOWN.  Down is checked by most of the other		 * code, but INIT is useful to know in a few places.		 */		dd->ipath_flags |= IPATH_LINKINIT | IPATH_LINKDOWN;		dd->ipath_flags &=			~(IPATH_LINKUNK | IPATH_LINKACTIVE | IPATH_LINKARMED			  | IPATH_NOCABLE);		*dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE					| IPATH_STATUS_IB_READY);		dd->ipath_f_setextled(dd, lstate, ltstate);	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ARM) {		if (dd->ipath_flags & IPATH_LINKACTIVE)			signal_ib_event(dd, IB_EVENT_PORT_ERR);		dd->ipath_flags |= IPATH_LINKARMED;		dd->ipath_flags &=			~(IPATH_LINKUNK | IPATH_LINKDOWN | IPATH_LINKINIT |			  IPATH_LINKACTIVE | IPATH_NOCABLE);		*dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE					| IPATH_STATUS_IB_READY);		dd->ipath_f_setextled(dd, lstate, ltstate);	} else {		if (!noprint)			ipath_dbg("IBstatuschange unit %u: %s (%x)\n",				  dd->ipath_unit,				  ipath_ibcstatus_str[ltstate], ltstate);	}skip_ibchange:	dd->ipath_lastibcstat = val;}static void handle_supp_msgs(struct ipath_devdata *dd,			     unsigned supp_msgs, char *msg, int msgsz){	/*	 * Print the message unless it's ibc status change only, which	 * happens so often we never want to count it.	 */	if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {		int iserr;		iserr = ipath_decode_err(msg, msgsz,					 dd->ipath_lasterror &					 ~INFINIPATH_E_IBSTATUSCHANGED);		if (dd->ipath_lasterror &			~(INFINIPATH_E_RRCVEGRFULL |			INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))			ipath_dev_err(dd, "Suppressed %u messages for "				      "fast-repeating errors (%s) (%llx)\n",				      supp_msgs, msg,				      (unsigned long long)				      dd->ipath_lasterror);		else {			/*			 * rcvegrfull and rcvhdrqfull are "normal", for some			 * types of processes (mostly benchmarks) that send			 * huge numbers of messages, while not processing			 * them. So only complain about these at debug			 * level.			 */			if (iserr)				ipath_dbg("Suppressed %u messages for %s\n",					  supp_msgs, msg);			else				ipath_cdbg(ERRPKT,					"Suppressed %u messages for %s\n",					  supp_msgs, msg);		}	}}static unsigned handle_frequent_errors(struct ipath_devdata *dd,				       ipath_err_t errs, char *msg,				       int msgsz, int *noprint){	unsigned long nc;	static unsigned long nextmsg_time;	static unsigned nmsgs, supp_msgs;	/*	 * Throttle back "fast" messages to no more than 10 per 5 seconds.	 * This isn't perfect, but it's a reasonable heuristic. If we get	 * more than 10, give a 6x longer delay.	 */	nc = jiffies;	if (nmsgs > 10) {		if (time_before(nc, nextmsg_time)) {			*noprint = 1;			if (!supp_msgs++)				nextmsg_time = nc + HZ * 3;		}		else if (supp_msgs) {			handle_supp_msgs(dd, supp_msgs, msg, msgsz);			supp_msgs = 0;			nmsgs = 0;		}	}	else if (!nmsgs++ || time_after(nc, nextmsg_time))		nextmsg_time = nc + HZ / 2;	return supp_msgs;}static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs){	char msg[128];	u64 ignore_this_time = 0;	int i, iserr = 0;	int chkerrpkts = 0, noprint = 0;	unsigned supp_msgs;	int log_idx;	supp_msgs = handle_frequent_errors(dd, errs, msg, sizeof msg, &noprint);	/* don't report errors that are masked */	errs &= ~dd->ipath_maskederrs;	/* do these first, they are most important */	if (errs & INFINIPATH_E_HARDWARE) {		/* reuse same msg buf */		dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);	} else {		u64 mask;		for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {			mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;			if (errs & mask)				ipath_inc_eeprom_err(dd, log_idx, 1);		}	}	if (!noprint && (errs & ~dd->ipath_e_bitsextant))		ipath_dev_err(dd, "error interrupt with unknown errors "			      "%llx set\n", (unsigned long long)			      (errs & ~dd->ipath_e_bitsextant));	if (errs & E_SUM_ERRS)		ignore_this_time = handle_e_sum_errs(dd, errs);	else if ((errs & E_SUM_LINK_PKTERRS) &&	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {		/*		 * This can happen when SMA is trying to bring the link		 * up, but the IB link changes state at the "wrong" time.		 * The IB logic then complains that the packet isn't		 * valid.  We don't want to confuse people, so we just		 * don't print them, except at debug		 */		ipath_dbg("Ignoring packet errors %llx, because link not "			  "ACTIVE\n", (unsigned long long) errs);		ignore_this_time = errs & E_SUM_LINK_PKTERRS;	}	if (supp_msgs == 250000) {		int s_iserr;		/*		 * It's not entirely reasonable assuming that the errors set		 * in the last clear period are all responsible for the		 * problem, but the alternative is to assume it's the only		 * ones on this particular interrupt, which also isn't great		 */		dd->ipath_maskederrs |= dd->ipath_lasterror | errs;		dd->ipath_errormask &= ~dd->ipath_maskederrs;		ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,			dd->ipath_errormask);		s_iserr = ipath_decode_err(msg, sizeof msg,			dd->ipath_maskederrs);		if (dd->ipath_maskederrs &			~(INFINIPATH_E_RRCVEGRFULL |			INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))			ipath_dev_err(dd, "Temporarily disabling "			    "error(s) %llx reporting; too frequent (%s)\n",				(unsigned long long)dd->ipath_maskederrs,				msg);		else {			/*			 * rcvegrfull and rcvhdrqfull are "normal",			 * for some types of processes (mostly benchmarks)			 * that send huge numbers of messages, while not			 * processing them.  So only complain about			 * these at debug level.			 */			if (s_iserr)				ipath_dbg("Temporarily disabling reporting "				    "too frequent queue full errors (%s)\n",				    msg);			else				ipath_cdbg(ERRPKT,				    "Temporarily disabling reporting too"				    " frequent packet errors (%s)\n",				    msg);		}		/*		 * Re-enable the masked errors after around 3 minutes.  in		 * ipath_get_faststats().  If we have a series of fast		 * repeating but different errors, the interval will keep		 * stretching out, but that's OK, as that's pretty		 * catastrophic.		 */		dd->ipath_unmasktime = jiffies + HZ * 180;	}	ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);	if (ignore_this_time)		errs &= ~ignore_this_time;	if (errs & ~dd->ipath_lasterror) {		errs &= ~dd->ipath_lasterror;		/* never suppress duplicate hwerrors or ibstatuschange */		dd->ipath_lasterror |= errs &			~(INFINIPATH_E_HARDWARE |			  INFINIPATH_E_IBSTATUSCHANGED);	}	/* likely due to cancel, so suppress */	if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&		dd->ipath_lastcancel > jiffies) {		ipath_dbg("Suppressed armlaunch/spktlen after error send cancel\n");		errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);	}	if (!errs)		return 0;	if (!noprint)		/*		 * the ones we mask off are handled specially below or above		 */		ipath_decode_err(msg, sizeof msg,				 errs & ~(INFINIPATH_E_IBSTATUSCHANGED |					  INFINIPATH_E_RRCVEGRFULL |					  INFINIPATH_E_RRCVHDRFULL |					  INFINIPATH_E_HARDWARE));	else		/* so we don't need if (!noprint) at strlcat's below */		*msg = 0;	if (errs & E_SUM_PKTERRS) {		ipath_stats.sps_pkterrs++;		chkerrpkts = 1;	}	if (errs & E_SUM_ERRS)		ipath_stats.sps_errs++;	if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {		ipath_stats.sps_crcerrs++;		chkerrpkts = 1;	}	iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);	/*	 * We don't want to print these two as they happen, or we can make	 * the situation even worse, because it takes so long to print	 * messages to serial consoles.  Kernel ports get printed from	 * fast_stats, no more than every 5 seconds, user ports get printed	 * on close	 */	if (errs & INFINIPATH_E_RRCVHDRFULL) {		u32 hd, tl;		ipath_stats.sps_hdrqfull++;		for (i = 0; i < dd->ipath_cfgports; i++) {			struct ipath_portdata *pd = dd->ipath_pd[i];			if (i == 0) {				hd = dd->ipath_port0head;				tl = (u32) le64_to_cpu(					*dd->ipath_hdrqtailptr);			} else if (pd && pd->port_cnt &&				   pd->port_rcvhdrtail_kvaddr) {				/*				 * don't report same point multiple times,				 * except kernel				 */				tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;				if (tl == dd->ipath_lastrcvhdrqtails[i])					continue;				hd = ipath_read_ureg32(dd, ur_rcvhdrhead,						       i);			} else				continue;			if (hd == (tl + 1) ||			    (!hd && tl == dd->ipath_hdrqlast)) {				if (i == 0)					chkerrpkts = 1;				dd->ipath_lastrcvhdrqtails[i] = tl;				pd->port_hdrqfull++;				/* flush hdrqfull so that poll() sees it */				wmb();				wake_up_interruptible(&pd->port_wait);			}		}	}	if (errs & INFINIPATH_E_RRCVEGRFULL) {		/*		 * since this is of less importance and not likely to		 * happen without also getting hdrfull, only count		 * occurrences; don't check each port (or even the kernel		 * vs user)		 */		ipath_stats.sps_etidfull++;		if (dd->ipath_port0head !=		    (u32) le64_to_cpu(*dd->ipath_hdrqtailptr))			chkerrpkts = 1;	}	/*	 * do this before IBSTATUSCHANGED, in case both bits set in a single	 * interrupt; we want the STATUSCHANGE to "win", so we do our	 * internal copy of state machine correctly	 */	if (errs & INFINIPATH_E_RIBLOSTLINK) {		/*		 * force through block below		 */		errs |= INFINIPATH_E_IBSTATUSCHANGED;		ipath_stats.sps_iblink++;		dd->ipath_flags |= IPATH_LINKDOWN;		dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT				     | IPATH_LINKARMED | IPATH_LINKACTIVE);		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;		if (!noprint) {			u64 st = ipath_read_kreg64(				dd, dd->ipath_kregs->kr_ibcstatus);			ipath_dbg("Lost link, link now down (%s)\n",				  ipath_ibcstatus_str[st & 0xf]);		}	}	if (errs & INFINIPATH_E_IBSTATUSCHANGED)		handle_e_ibstatuschanged(dd, errs, noprint);	if (errs & INFINIPATH_E_RESET) {		if (!noprint)			ipath_dev_err(dd, "Got reset, requires re-init "				      "(unload and reload driver)\n");		dd->ipath_flags &= ~IPATH_INITTED;	/* needs re-init */		/* mark as having had error */		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;		*dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;	}	if (!noprint && *msg) {		if (iserr)			ipath_dev_err(dd, "%s error\n", msg);		else			dev_info(&dd->pcidev->dev, "%s packet problems\n",				msg);	}	if (dd->ipath_state_wanted & dd->ipath_flags) {		ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "			   "waking\n", dd->ipath_state_wanted,			   dd->ipath_flags);		wake_up_interruptible(&ipath_state_wait);	}	return chkerrpkts;}/* * try to cleanup as much as possible for anything that might have gone * wrong while in freeze mode, such as pio buffers being written by user * processes (causing armlaunch), send errors due to going into freeze mode, * etc., and try to avoid causing extra interrupts while doing so. * Forcibly update the in-memory pioavail register copies after cleanup * because the chip won't do it for anything changing while in freeze mode * (we don't want to wait for the next pio buffer state change). * Make sure that we don't lose any important interrupts by using the chip * feature that says that writing 0 to a bit in *clear that is set in * *status will cause an interrupt to be generated again (if allowed by * the *mask value). */void ipath_clear_freeze(struct ipath_devdata *dd){	int i, im;	__le64 val;	/* disable error interrupts, to avoid confusion */	ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);	/* also disable interrupts; errormask is sometimes overwriten */	ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);	/*	 * clear all sends, because they have may been

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?