📄 ibm_emac_mal.c
字号:
/* * drivers/net/ibm_emac/ibm_emac_mal.c * * Memory Access Layer (MAL) support * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * * Based on original work by * Benjamin Herrenschmidt <benh@kernel.crashing.org>, * David Gibson <hermes@gibson.dropbear.id.au>, * * Armin Kuster <akuster@mvista.com> * Copyright 2002 MontaVista Softare Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */#include <linux/config.h>#include <linux/module.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/netdevice.h>#include <linux/init.h>#include <linux/interrupt.h>#include <linux/dma-mapping.h>#include <asm/ocp.h>#include "ibm_emac_core.h"#include "ibm_emac_mal.h"#include "ibm_emac_debug.h"int __init mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac){ unsigned long flags; local_irq_save(flags); MAL_DBG("%d: reg(%08x, %08x)" NL, mal->def->index, commac->tx_chan_mask, commac->rx_chan_mask); /* Don't let multiple commacs claim the same channel(s) */ if ((mal->tx_chan_mask & commac->tx_chan_mask) || (mal->rx_chan_mask & commac->rx_chan_mask)) { local_irq_restore(flags); printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", mal->def->index); return -EBUSY; } mal->tx_chan_mask |= commac->tx_chan_mask; mal->rx_chan_mask |= commac->rx_chan_mask; list_add(&commac->list, &mal->list); local_irq_restore(flags); return 0;}void __exit mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac){ unsigned long flags; local_irq_save(flags); MAL_DBG("%d: unreg(%08x, %08x)" NL, mal->def->index, commac->tx_chan_mask, commac->rx_chan_mask); mal->tx_chan_mask &= ~commac->tx_chan_mask; mal->rx_chan_mask &= ~commac->rx_chan_mask; list_del_init(&commac->list); local_irq_restore(flags);}int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size){ struct ocp_func_mal_data *maldata = mal->def->additions; BUG_ON(channel < 0 || channel >= maldata->num_rx_chans || size > MAL_MAX_RX_SIZE); MAL_DBG("%d: set_rbcs(%d, %lu)" NL, mal->def->index, channel, size); if (size & 0xf) { printk(KERN_WARNING "mal%d: incorrect RX size %lu for the channel %d\n", mal->def->index, size, channel); return -EINVAL; } set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); return 0;}int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel){ struct ocp_func_mal_data *maldata = mal->def->additions; BUG_ON(channel < 0 || channel >= maldata->num_tx_chans); return channel * NUM_TX_BUFF;}int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel){ struct ocp_func_mal_data *maldata = mal->def->additions; BUG_ON(channel < 0 || channel >= maldata->num_rx_chans); return maldata->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;}void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel){ local_bh_disable(); MAL_DBG("%d: enable_tx(%d)" NL, mal->def->index, channel); set_mal_dcrn(mal, MAL_TXCASR, get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); local_bh_enable();}void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel){ set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); MAL_DBG("%d: disable_tx(%d)" NL, mal->def->index, channel);}void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel){ local_bh_disable(); MAL_DBG("%d: enable_rx(%d)" NL, mal->def->index, channel); set_mal_dcrn(mal, MAL_RXCASR, get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); local_bh_enable();}void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel){ set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); MAL_DBG("%d: disable_rx(%d)" NL, mal->def->index, channel);}void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac){ local_bh_disable(); MAL_DBG("%d: poll_add(%p)" NL, mal->def->index, commac); list_add_tail(&commac->poll_list, &mal->poll_list); local_bh_enable();}void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac){ local_bh_disable(); MAL_DBG("%d: poll_del(%p)" NL, mal->def->index, commac); list_del(&commac->poll_list); local_bh_enable();}/* synchronized by mal_poll() */static inline void mal_enable_eob_irq(struct ibm_ocp_mal *mal){ MAL_DBG2("%d: enable_irq" NL, mal->def->index); set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);}/* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */static inline void mal_disable_eob_irq(struct ibm_ocp_mal *mal){ set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); MAL_DBG2("%d: disable_irq" NL, mal->def->index);}static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs){ struct ibm_ocp_mal *mal = dev_instance; u32 esr = get_mal_dcrn(mal, MAL_ESR); /* Clear the error status register */ set_mal_dcrn(mal, MAL_ESR, esr); MAL_DBG("%d: SERR %08x" NL, mal->def->index, esr); if (esr & MAL_ESR_EVB) { if (esr & MAL_ESR_DE) { /* We ignore Descriptor error, * TXDE or RXDE interrupt will be generated anyway. */ return IRQ_HANDLED; } if (esr & MAL_ESR_PEIN) { /* PLB error, it's probably buggy hardware or * incorrect physical address in BD (i.e. bug) */ if (net_ratelimit()) printk(KERN_ERR "mal%d: system error, PLB (ESR = 0x%08x)\n", mal->def->index, esr); return IRQ_HANDLED; } /* OPB error, it's probably buggy hardware or incorrect EBC setup */ if (net_ratelimit()) printk(KERN_ERR "mal%d: system error, OPB (ESR = 0x%08x)\n", mal->def->index, esr); } return IRQ_HANDLED;}static inline void mal_schedule_poll(struct ibm_ocp_mal *mal){ if (likely(netif_rx_schedule_prep(&mal->poll_dev))) { MAL_DBG2("%d: schedule_poll" NL, mal->def->index); mal_disable_eob_irq(mal); __netif_rx_schedule(&mal->poll_dev); } else MAL_DBG2("%d: already in poll" NL, mal->def->index);}static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs){ struct ibm_ocp_mal *mal = dev_instance; u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); MAL_DBG2("%d: txeob %08x" NL, mal->def->index, r); mal_schedule_poll(mal); set_mal_dcrn(mal, MAL_TXEOBISR, r); return IRQ_HANDLED;}static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs){ struct ibm_ocp_mal *mal = dev_instance; u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); MAL_DBG2("%d: rxeob %08x" NL, mal->def->index, r); mal_schedule_poll(mal); set_mal_dcrn(mal, MAL_RXEOBISR, r); return IRQ_HANDLED;}static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs){ struct ibm_ocp_mal *mal = dev_instance; u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); set_mal_dcrn(mal, MAL_TXDEIR, deir); MAL_DBG("%d: txde %08x" NL, mal->def->index, deir); if (net_ratelimit()) printk(KERN_ERR "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", mal->def->index, deir); return IRQ_HANDLED;}static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs){ struct ibm_ocp_mal *mal = dev_instance; struct list_head *l; u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); MAL_DBG("%d: rxde %08x" NL, mal->def->index, deir); list_for_each(l, &mal->list) { struct mal_commac *mc = list_entry(l, struct mal_commac, list); if (deir & mc->rx_chan_mask) { mc->rx_stopped = 1; mc->ops->rxde(mc->dev); } } mal_schedule_poll(mal); set_mal_dcrn(mal, MAL_RXDEIR, deir); return IRQ_HANDLED;}static int mal_poll(struct net_device *ndev, int *budget){ struct ibm_ocp_mal *mal = ndev->priv; struct list_head *l; int rx_work_limit = min(ndev->quota, *budget), received = 0, done; MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget, rx_work_limit); again: /* Process TX skbs */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); mc->ops->poll_tx(mc->dev); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -