mal.c

来自「linux 内核源代码」· C语言 代码 · 共 727 行 · 第 1/2 页

C
727
字号
/* * drivers/net/ibm_newemac/mal.c * * Memory Access Layer (MAL) support * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. *                <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * * Based on original work by *      Benjamin Herrenschmidt <benh@kernel.crashing.org>, *      David Gibson <hermes@gibson.dropbear.id.au>, * *      Armin Kuster <akuster@mvista.com> *      Copyright 2002 MontaVista Softare Inc. * * This program is free software; you can redistribute  it and/or modify it * under  the terms of  the GNU General  Public License as published by the * Free Software Foundation;  either version 2 of the  License, or (at your * option) any later version. * */#include <linux/delay.h>#include "core.h"static int mal_count;int __devinit mal_register_commac(struct mal_instance	*mal,				  struct mal_commac	*commac){	unsigned long flags;	spin_lock_irqsave(&mal->lock, flags);	MAL_DBG(mal, "reg(%08x, %08x)" NL,		commac->tx_chan_mask, commac->rx_chan_mask);	/* Don't let multiple commacs claim the same channel(s) */	if ((mal->tx_chan_mask & commac->tx_chan_mask) ||	    (mal->rx_chan_mask & commac->rx_chan_mask)) {		spin_unlock_irqrestore(&mal->lock, flags);		printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",		       mal->index);		return -EBUSY;	}	if (list_empty(&mal->list))		napi_enable(&mal->napi);	mal->tx_chan_mask |= commac->tx_chan_mask;	mal->rx_chan_mask |= commac->rx_chan_mask;	list_add(&commac->list, &mal->list);	spin_unlock_irqrestore(&mal->lock, flags);	return 0;}void __devexit mal_unregister_commac(struct mal_instance	*mal,				     struct mal_commac		*commac){	unsigned long flags;	spin_lock_irqsave(&mal->lock, flags);	MAL_DBG(mal, "unreg(%08x, %08x)" NL,		commac->tx_chan_mask, commac->rx_chan_mask);	mal->tx_chan_mask &= ~commac->tx_chan_mask;	mal->rx_chan_mask &= ~commac->rx_chan_mask;	list_del_init(&commac->list);	if (list_empty(&mal->list))		napi_disable(&mal->napi);	spin_unlock_irqrestore(&mal->lock, flags);}int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size){	BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||	       size > MAL_MAX_RX_SIZE);	MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);	if (size & 0xf) {		printk(KERN_WARNING		       "mal%d: incorrect RX size %lu for the channel %d\n",		       mal->index, size, channel);		return -EINVAL;	}	set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);	return 0;}int mal_tx_bd_offset(struct mal_instance *mal, int channel){	BUG_ON(channel < 0 || channel >= mal->num_tx_chans);	return channel * NUM_TX_BUFF;}int mal_rx_bd_offset(struct mal_instance *mal, int channel){	BUG_ON(channel < 0 || channel >= mal->num_rx_chans);	return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;}void mal_enable_tx_channel(struct mal_instance *mal, int channel){	unsigned long flags;	spin_lock_irqsave(&mal->lock, flags);	MAL_DBG(mal, "enable_tx(%d)" NL, channel);	set_mal_dcrn(mal, MAL_TXCASR,		     get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));	spin_unlock_irqrestore(&mal->lock, flags);}void mal_disable_tx_channel(struct mal_instance *mal, int channel){	set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));	MAL_DBG(mal, "disable_tx(%d)" NL, channel);}void mal_enable_rx_channel(struct mal_instance *mal, int channel){	unsigned long flags;	spin_lock_irqsave(&mal->lock, flags);	MAL_DBG(mal, "enable_rx(%d)" NL, channel);	set_mal_dcrn(mal, MAL_RXCASR,		     get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));	spin_unlock_irqrestore(&mal->lock, flags);}void mal_disable_rx_channel(struct mal_instance *mal, int channel){	set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));	MAL_DBG(mal, "disable_rx(%d)" NL, channel);}void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac){	unsigned long flags;	spin_lock_irqsave(&mal->lock, flags);	MAL_DBG(mal, "poll_add(%p)" NL, commac);	/* starts disabled */	set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);	list_add_tail(&commac->poll_list, &mal->poll_list);	spin_unlock_irqrestore(&mal->lock, flags);}void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac){	unsigned long flags;	spin_lock_irqsave(&mal->lock, flags);	MAL_DBG(mal, "poll_del(%p)" NL, commac);	list_del(&commac->poll_list);	spin_unlock_irqrestore(&mal->lock, flags);}/* synchronized by mal_poll() */static inline void mal_enable_eob_irq(struct mal_instance *mal){	MAL_DBG2(mal, "enable_irq" NL);	// XXX might want to cache MAL_CFG as the DCR read can be slooooow	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);}/* synchronized by NAPI state */static inline void mal_disable_eob_irq(struct mal_instance *mal){	// XXX might want to cache MAL_CFG as the DCR read can be slooooow	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);	MAL_DBG2(mal, "disable_irq" NL);}static irqreturn_t mal_serr(int irq, void *dev_instance){	struct mal_instance *mal = dev_instance;	u32 esr = get_mal_dcrn(mal, MAL_ESR);	/* Clear the error status register */	set_mal_dcrn(mal, MAL_ESR, esr);	MAL_DBG(mal, "SERR %08x" NL, esr);	if (esr & MAL_ESR_EVB) {		if (esr & MAL_ESR_DE) {			/* We ignore Descriptor error,			 * TXDE or RXDE interrupt will be generated anyway.			 */			return IRQ_HANDLED;		}		if (esr & MAL_ESR_PEIN) {			/* PLB error, it's probably buggy hardware or			 * incorrect physical address in BD (i.e. bug)			 */			if (net_ratelimit())				printk(KERN_ERR				       "mal%d: system error, "				       "PLB (ESR = 0x%08x)\n",				       mal->index, esr);			return IRQ_HANDLED;		}		/* OPB error, it's probably buggy hardware or incorrect		 * EBC setup		 */		if (net_ratelimit())			printk(KERN_ERR			       "mal%d: system error, OPB (ESR = 0x%08x)\n",			       mal->index, esr);	}	return IRQ_HANDLED;}static inline void mal_schedule_poll(struct mal_instance *mal){	if (likely(napi_schedule_prep(&mal->napi))) {		MAL_DBG2(mal, "schedule_poll" NL);		mal_disable_eob_irq(mal);		__napi_schedule(&mal->napi);	} else		MAL_DBG2(mal, "already in poll" NL);}static irqreturn_t mal_txeob(int irq, void *dev_instance){	struct mal_instance *mal = dev_instance;	u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);	MAL_DBG2(mal, "txeob %08x" NL, r);	mal_schedule_poll(mal);	set_mal_dcrn(mal, MAL_TXEOBISR, r);	return IRQ_HANDLED;}static irqreturn_t mal_rxeob(int irq, void *dev_instance){	struct mal_instance *mal = dev_instance;	u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);	MAL_DBG2(mal, "rxeob %08x" NL, r);	mal_schedule_poll(mal);	set_mal_dcrn(mal, MAL_RXEOBISR, r);	return IRQ_HANDLED;}static irqreturn_t mal_txde(int irq, void *dev_instance){	struct mal_instance *mal = dev_instance;	u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);	set_mal_dcrn(mal, MAL_TXDEIR, deir);	MAL_DBG(mal, "txde %08x" NL, deir);	if (net_ratelimit())		printk(KERN_ERR		       "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",		       mal->index, deir);	return IRQ_HANDLED;}static irqreturn_t mal_rxde(int irq, void *dev_instance){	struct mal_instance *mal = dev_instance;	struct list_head *l;	u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);	MAL_DBG(mal, "rxde %08x" NL, deir);	list_for_each(l, &mal->list) {		struct mal_commac *mc = list_entry(l, struct mal_commac, list);		if (deir & mc->rx_chan_mask) {			set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);			mc->ops->rxde(mc->dev);		}	}	mal_schedule_poll(mal);	set_mal_dcrn(mal, MAL_RXDEIR, deir);	return IRQ_HANDLED;}void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac){	/* Spinlock-type semantics: only one caller disable poll at a time */	while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))		msleep(1);	/* Synchronize with the MAL NAPI poller */	napi_synchronize(&mal->napi);}void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac){	smp_wmb();	clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);	/* Feels better to trigger a poll here to catch up with events that	 * may have happened on this channel while disabled. It will most	 * probably be delayed until the next interrupt but that's mostly a	 * non-issue in the context where this is called.	 */	napi_schedule(&mal->napi);}static int mal_poll(struct napi_struct *napi, int budget){	struct mal_instance *mal = container_of(napi, struct mal_instance, napi);	struct list_head *l;	int received = 0;	unsigned long flags;	MAL_DBG2(mal, "poll(%d)" NL, budget); again:	/* Process TX skbs */	list_for_each(l, &mal->poll_list) {		struct mal_commac *mc =			list_entry(l, struct mal_commac, poll_list);		mc->ops->poll_tx(mc->dev);	}	/* Process RX skbs.	 *	 * We _might_ need something more smart here to enforce polling

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?