core.c

来自「linux 内核源代码」· C语言 代码 · 共 2,457 行 · 第 1/5 页

C
2,457
字号
/* * drivers/net/ibm_newemac/core.c * * Driver for PowerPC 4xx on-chip ethernet controller. * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. *                <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * * Based on original work by * 	Matt Porter <mporter@kernel.crashing.org> *	(c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org> *      Armin Kuster <akuster@mvista.com> * 	Johnnie Peters <jpeters@mvista.com> * * This program is free software; you can redistribute  it and/or modify it * under  the terms of  the GNU General  Public License as published by the * Free Software Foundation;  either version 2 of the  License, or (at your * option) any later version. * */#include <linux/sched.h>#include <linux/string.h>#include <linux/errno.h>#include <linux/delay.h>#include <linux/types.h>#include <linux/pci.h>#include <linux/etherdevice.h>#include <linux/skbuff.h>#include <linux/crc32.h>#include <linux/ethtool.h>#include <linux/mii.h>#include <linux/bitops.h>#include <linux/workqueue.h>#include <asm/processor.h>#include <asm/io.h>#include <asm/dma.h>#include <asm/uaccess.h>#include "core.h"/* * Lack of dma_unmap_???? calls is intentional. * * API-correct usage requires additional support state information to be * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to * EMAC design (e.g. TX buffer passed from network stack can be split into * several BDs, dma_map_single/dma_map_page can be used to map particular BD), * maintaining such information will add additional overhead. * Current DMA API implementation for 4xx processors only ensures cache coherency * and dma_unmap_???? routines are empty and are likely to stay this way. * I decided to omit dma_unmap_??? calls because I don't want to add additional * complexity just for the sake of following some abstract API, when it doesn't * add any real benefit to the driver. I understand that this decision maybe * controversial, but I really tried to make code API-correct and efficient * at the same time and didn't come up with code I liked :(.                --ebs */#define DRV_NAME        "emac"#define DRV_VERSION     "3.54"#define DRV_DESC        "PPC 4xx OCP EMAC driver"MODULE_DESCRIPTION(DRV_DESC);MODULE_AUTHOR    ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");MODULE_LICENSE("GPL");/* * PPC64 doesn't (yet) have a cacheable_memcpy */#ifdef CONFIG_PPC64#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))#endif/* minimum number of free TX descriptors required to wake up TX process */#define EMAC_TX_WAKEUP_THRESH		(NUM_TX_BUFF / 4)/* If packet size is less than this number, we allocate small skb and copy packet * contents into it instead of just sending original big skb up */#define EMAC_RX_COPY_THRESH		CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD/* Since multiple EMACs share MDIO lines in various ways, we need * to avoid re-using the same PHY ID in cases where the arch didn't * setup precise phy_map entries * * XXX This is something that needs to be reworked as we can have multiple * EMAC "sets" (multiple ASICs containing several EMACs) though we can * probably require in that case to have explicit PHY IDs in the device-tree */static u32 busy_phy_map;static DEFINE_MUTEX(emac_phy_map_lock);/* This is the wait queue used to wait on any event related to probe, that * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc... */static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);/* Having stable interface names is a doomed idea. However, it would be nice * if we didn't have completely random interface names at boot too :-) It's * just a matter of making everybody's life easier. Since we are doing * threaded probing, it's a bit harder though. The base idea here is that * we make up a list of all emacs in the device-tree before we register the * driver. Every emac will then wait for the previous one in the list to * initialize before itself. We should also keep that list ordered by * cell_index. * That list is only 4 entries long, meaning that additional EMACs don't * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased. */#define EMAC_BOOT_LIST_SIZE	4static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];/* How long should I wait for dependent devices ? */#define EMAC_PROBE_DEP_TIMEOUT	(HZ * 5)/* I don't want to litter system log with timeout errors * when we have brain-damaged PHY. */static inline void emac_report_timeout_error(struct emac_instance *dev,					     const char *error){	if (net_ratelimit())		printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);}/* PHY polling intervals */#define PHY_POLL_LINK_ON	HZ#define PHY_POLL_LINK_OFF	(HZ / 5)/* Graceful stop timeouts in us. * We should allow up to 1 frame time (full-duplex, ignoring collisions) */#define STOP_TIMEOUT_10		1230#define STOP_TIMEOUT_100	124#define STOP_TIMEOUT_1000	13#define STOP_TIMEOUT_1000_JUMBO	73/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {	"rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",	"tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",	"rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",	"rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",	"rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",	"rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",	"rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",	"rx_bad_packet", "rx_runt_packet", "rx_short_event",	"rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",	"rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",	"tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",	"tx_bd_excessive_collisions", "tx_bd_late_collision",	"tx_bd_multple_collisions", "tx_bd_single_collision",	"tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",	"tx_errors"};static irqreturn_t emac_irq(int irq, void *dev_instance);static void emac_clean_tx_ring(struct emac_instance *dev);static void __emac_set_multicast_list(struct emac_instance *dev);static inline int emac_phy_supports_gige(int phy_mode){	return  phy_mode == PHY_MODE_GMII ||		phy_mode == PHY_MODE_RGMII ||		phy_mode == PHY_MODE_TBI ||		phy_mode == PHY_MODE_RTBI;}static inline int emac_phy_gpcs(int phy_mode){	return  phy_mode == PHY_MODE_TBI ||		phy_mode == PHY_MODE_RTBI;}static inline void emac_tx_enable(struct emac_instance *dev){	struct emac_regs __iomem *p = dev->emacp;	u32 r;	DBG(dev, "tx_enable" NL);	r = in_be32(&p->mr0);	if (!(r & EMAC_MR0_TXE))		out_be32(&p->mr0, r | EMAC_MR0_TXE);}static void emac_tx_disable(struct emac_instance *dev){	struct emac_regs __iomem *p = dev->emacp;	u32 r;	DBG(dev, "tx_disable" NL);	r = in_be32(&p->mr0);	if (r & EMAC_MR0_TXE) {		int n = dev->stop_timeout;		out_be32(&p->mr0, r & ~EMAC_MR0_TXE);		while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {			udelay(1);			--n;		}		if (unlikely(!n))			emac_report_timeout_error(dev, "TX disable timeout");	}}static void emac_rx_enable(struct emac_instance *dev){	struct emac_regs __iomem *p = dev->emacp;	u32 r;	if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))		goto out;	DBG(dev, "rx_enable" NL);	r = in_be32(&p->mr0);	if (!(r & EMAC_MR0_RXE)) {		if (unlikely(!(r & EMAC_MR0_RXI))) {			/* Wait if previous async disable is still in progress */			int n = dev->stop_timeout;			while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {				udelay(1);				--n;			}			if (unlikely(!n))				emac_report_timeout_error(dev,							  "RX disable timeout");		}		out_be32(&p->mr0, r | EMAC_MR0_RXE);	} out:	;}static void emac_rx_disable(struct emac_instance *dev){	struct emac_regs __iomem *p = dev->emacp;	u32 r;	DBG(dev, "rx_disable" NL);	r = in_be32(&p->mr0);	if (r & EMAC_MR0_RXE) {		int n = dev->stop_timeout;		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);		while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {			udelay(1);			--n;		}		if (unlikely(!n))			emac_report_timeout_error(dev, "RX disable timeout");	}}static inline void emac_netif_stop(struct emac_instance *dev){	netif_tx_lock_bh(dev->ndev);	dev->no_mcast = 1;	netif_tx_unlock_bh(dev->ndev);	dev->ndev->trans_start = jiffies;	/* prevent tx timeout */	mal_poll_disable(dev->mal, &dev->commac);	netif_tx_disable(dev->ndev);}static inline void emac_netif_start(struct emac_instance *dev){	netif_tx_lock_bh(dev->ndev);	dev->no_mcast = 0;	if (dev->mcast_pending && netif_running(dev->ndev))		__emac_set_multicast_list(dev);	netif_tx_unlock_bh(dev->ndev);	netif_wake_queue(dev->ndev);	/* NOTE: unconditional netif_wake_queue is only appropriate	 * so long as all callers are assured to have free tx slots	 * (taken from tg3... though the case where that is wrong is	 *  not terribly harmful)	 */	mal_poll_enable(dev->mal, &dev->commac);}static inline void emac_rx_disable_async(struct emac_instance *dev){	struct emac_regs __iomem *p = dev->emacp;	u32 r;	DBG(dev, "rx_disable_async" NL);	r = in_be32(&p->mr0);	if (r & EMAC_MR0_RXE)		out_be32(&p->mr0, r & ~EMAC_MR0_RXE);}static int emac_reset(struct emac_instance *dev){	struct emac_regs __iomem *p = dev->emacp;	int n = 20;	DBG(dev, "reset" NL);	if (!dev->reset_failed) {		/* 40x erratum suggests stopping RX channel before reset,		 * we stop TX as well		 */		emac_rx_disable(dev);		emac_tx_disable(dev);	}	out_be32(&p->mr0, EMAC_MR0_SRST);	while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)		--n;	if (n) {		dev->reset_failed = 0;		return 0;	} else {		emac_report_timeout_error(dev, "reset timeout");		dev->reset_failed = 1;		return -ETIMEDOUT;	}}static void emac_hash_mc(struct emac_instance *dev){	struct emac_regs __iomem *p = dev->emacp;	u16 gaht[4] = { 0 };	struct dev_mc_list *dmi;	DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);	for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {		int bit;		DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,		     dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],		     dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);		bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);		gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);	}	out_be32(&p->gaht1, gaht[0]);	out_be32(&p->gaht2, gaht[1]);	out_be32(&p->gaht3, gaht[2]);	out_be32(&p->gaht4, gaht[3]);}static inline u32 emac_iff2rmr(struct net_device *ndev){	struct emac_instance *dev = netdev_priv(ndev);	u32 r;	r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;	if (emac_has_feature(dev, EMAC_FTR_EMAC4))	    r |= EMAC4_RMR_BASE;	else	    r |= EMAC_RMR_BASE;	if (ndev->flags & IFF_PROMISC)		r |= EMAC_RMR_PME;	else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)		r |= EMAC_RMR_PMME;	else if (ndev->mc_count > 0)		r |= EMAC_RMR_MAE;	return r;}static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size){	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;	DBG2(dev, "__emac_calc_base_mr1" NL);	switch(tx_size) {	case 2048:		ret |= EMAC_MR1_TFS_2K;		break;	default:		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",		       dev->ndev->name, tx_size);	}	switch(rx_size) {	case 16384:		ret |= EMAC_MR1_RFS_16K;		break;	case 4096:		ret |= EMAC_MR1_RFS_4K;		break;	default:		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",		       dev->ndev->name, rx_size);	}	return ret;}static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size){	u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |		EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);	DBG2(dev, "__emac4_calc_base_mr1" NL);	switch(tx_size) {	case 4096:		ret |= EMAC4_MR1_TFS_4K;		break;	case 2048:		ret |= EMAC4_MR1_TFS_2K;		break;	default:		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",		       dev->ndev->name, tx_size);	}	switch(rx_size) {	case 16384:		ret |= EMAC4_MR1_RFS_16K;		break;	case 4096:		ret |= EMAC4_MR1_RFS_4K;		break;	case 2048:		ret |= EMAC4_MR1_RFS_2K;		break;	default:		printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",		       dev->ndev->name, rx_size);	}	return ret;}static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size){	return emac_has_feature(dev, EMAC_FTR_EMAC4) ?		__emac4_calc_base_mr1(dev, tx_size, rx_size) :		__emac_calc_base_mr1(dev, tx_size, rx_size);}static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size){	if (emac_has_feature(dev, EMAC_FTR_EMAC4))		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;	else		return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;}static inline u32 emac_calc_rwmr(struct emac_instance *dev,				 unsigned int low, unsigned int high){	if (emac_has_feature(dev, EMAC_FTR_EMAC4))		return (low << 22) | ( (high & 0x3ff) << 6);	else		return (low << 23) | ( (high & 0x1ff) << 7);}static int emac_configure(struct emac_instance *dev){	struct emac_regs __iomem *p = dev->emacp;	struct net_device *ndev = dev->ndev;	int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);	u32 r, mr1 = 0;	DBG(dev, "configure" NL);	if (!link) {		out_be32(&p->mr1, in_be32(&p->mr1)			 | EMAC_MR1_FDE | EMAC_MR1_ILE);		udelay(100);	} else if (emac_reset(dev) < 0)		return -ETIMEDOUT;	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))		tah_reset(dev->tah_dev);	DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",	    link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);	/* Default fifo sizes */	tx_size = dev->tx_fifo_size;	rx_size = dev->rx_fifo_size;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?