📄 fcc_enet.c
字号:
if (bdp->cbd_sc & BD_ENET_TX_WRAP) bdp = cep->tx_bd_base; else bdp++; if (!--cep->tx_free) netif_stop_queue(dev); cep->cur_tx = (cbd_t *)bdp; spin_unlock_irq(&cep->lock); return 0;}static voidfcc_enet_timeout(struct net_device *dev){ struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv; printk("%s: transmit timed out.\n", dev->name); cep->stats.tx_errors++;#ifndef final_version { int i; cbd_t *bdp; printk(" Ring data dump: cur_tx %p tx_free %d cur_rx %p.\n", cep->cur_tx, cep->tx_free, cep->cur_rx); bdp = cep->tx_bd_base; printk(" Tx @base %p :\n", bdp); for (i = 0 ; i < TX_RING_SIZE; i++, bdp++) printk("%04x %04x %08x\n", bdp->cbd_sc, bdp->cbd_datlen, bdp->cbd_bufaddr); bdp = cep->rx_bd_base; printk(" Rx @base %p :\n", bdp); for (i = 0 ; i < RX_RING_SIZE; i++, bdp++) printk("%04x %04x %08x\n", bdp->cbd_sc, bdp->cbd_datlen, bdp->cbd_bufaddr); }#endif if (cep->tx_free) netif_wake_queue(dev);}/* The interrupt handler. */static irqreturn_tfcc_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs){ struct net_device *dev = dev_id; volatile struct fcc_enet_private *cep; volatile cbd_t *bdp; ushort int_events; int must_restart; cep = (struct fcc_enet_private *)dev->priv; /* Get the interrupt events that caused us to be here. */ int_events = cep->fccp->fcc_fcce; cep->fccp->fcc_fcce = (int_events & cep->fccp->fcc_fccm); must_restart = 0;#ifdef PHY_INTERRUPT /* We have to be careful here to make sure that we aren't * interrupted by a PHY interrupt. */ disable_irq_nosync(PHY_INTERRUPT);#endif /* Handle receive event in its own function. */ if (int_events & FCC_ENET_RXF) fcc_enet_rx(dev_id); /* Check for a transmit error. The manual is a little unclear * about this, so the debug code until I get it figured out. It * appears that if TXE is set, then TXB is not set. However, * if carrier sense is lost during frame transmission, the TXE * bit is set, "and continues the buffer transmission normally." * I don't know if "normally" implies TXB is set when the buffer * descriptor is closed.....trial and error :-). */ /* Transmit OK, or non-fatal error. Update the buffer descriptors. */ if (int_events & (FCC_ENET_TXE | FCC_ENET_TXB)) { spin_lock(&cep->lock); bdp = cep->dirty_tx; while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) { if (cep->tx_free == TX_RING_SIZE) break; if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ cep->stats.tx_heartbeat_errors++; if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ cep->stats.tx_window_errors++; if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ cep->stats.tx_aborted_errors++; if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ cep->stats.tx_fifo_errors++; if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ cep->stats.tx_carrier_errors++; /* No heartbeat or Lost carrier are not really bad errors. * The others require a restart transmit command. */ if (bdp->cbd_sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { must_restart = 1; cep->stats.tx_errors++; } cep->stats.tx_packets++; /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (bdp->cbd_sc & BD_ENET_TX_DEF) cep->stats.collisions++; /* Free the sk buffer associated with this last transmit. */ dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]); cep->tx_skbuff[cep->skb_dirty] = NULL; cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK; /* Update pointer to next buffer descriptor to be transmitted. */ if (bdp->cbd_sc & BD_ENET_TX_WRAP) bdp = cep->tx_bd_base; else bdp++; /* I don't know if we can be held off from processing these * interrupts for more than one frame time. I really hope * not. In such a case, we would now want to check the * currently available BD (cur_tx) and determine if any * buffers between the dirty_tx and cur_tx have also been * sent. We would want to process anything in between that * does not have BD_ENET_TX_READY set. */ /* Since we have freed up a buffer, the ring is no longer * full. */ if (!cep->tx_free++) { if (netif_queue_stopped(dev)) { netif_wake_queue(dev); } } cep->dirty_tx = (cbd_t *)bdp; } if (must_restart) { volatile cpm_cpm2_t *cp; /* Some transmit errors cause the transmitter to shut * down. We now issue a restart transmit. Since the * errors close the BD and update the pointers, the restart * _should_ pick up without having to reset any of our * pointers either. Also, To workaround 8260 device erratum * CPM37, we must disable and then re-enable the transmitter * following a Late Collision, Underrun, or Retry Limit error. */ cep->fccp->fcc_gfmr &= ~FCC_GFMR_ENT; udelay(10); /* wait a few microseconds just on principle */ cep->fccp->fcc_gfmr |= FCC_GFMR_ENT; cp = cpmp; cp->cp_cpcr = mk_cr_cmd(cep->fip->fc_cpmpage, cep->fip->fc_cpmblock, 0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG; while (cp->cp_cpcr & CPM_CR_FLG); } spin_unlock(&cep->lock); } /* Check for receive busy, i.e. packets coming but no place to * put them. */ if (int_events & FCC_ENET_BSY) { cep->fccp->fcc_fcce = FCC_ENET_BSY; cep->stats.rx_dropped++; }#ifdef PHY_INTERRUPT enable_irq(PHY_INTERRUPT);#endif return IRQ_HANDLED;}/* During a receive, the cur_rx points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. */static intfcc_enet_rx(struct net_device *dev){ struct fcc_enet_private *cep; volatile cbd_t *bdp; struct sk_buff *skb; ushort pkt_len; cep = (struct fcc_enet_private *)dev->priv; /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ bdp = cep->cur_rx;for (;;) { if (bdp->cbd_sc & BD_ENET_RX_EMPTY) break;#ifndef final_version /* Since we have allocated space to hold a complete frame, both * the first and last indicators should be set. */ if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) != (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) printk("CPM ENET: rcv is not first+last\n");#endif /* Frame too long or too short. */ if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) cep->stats.rx_length_errors++; if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ cep->stats.rx_frame_errors++; if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ cep->stats.rx_crc_errors++; if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ cep->stats.rx_crc_errors++; if (bdp->cbd_sc & BD_ENET_RX_CL) /* Late Collision */ cep->stats.rx_frame_errors++; if (!(bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_CL))) { /* Process the incoming frame. */ cep->stats.rx_packets++; /* Remove the FCS from the packet length. */ pkt_len = bdp->cbd_datlen - 4; cep->stats.rx_bytes += pkt_len; /* This does 16 byte alignment, much more than we need. */ skb = dev_alloc_skb(pkt_len); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); cep->stats.rx_dropped++; } else { skb->dev = dev; skb_put(skb,pkt_len); /* Make room */ eth_copy_and_sum(skb, (unsigned char *)__va(bdp->cbd_bufaddr), pkt_len, 0); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); } } /* Clear the status flags for this buffer. */ bdp->cbd_sc &= ~BD_ENET_RX_STATS; /* Mark the buffer empty. */ bdp->cbd_sc |= BD_ENET_RX_EMPTY; /* Update BD pointer to next entry. */ if (bdp->cbd_sc & BD_ENET_RX_WRAP) bdp = cep->rx_bd_base; else bdp++; } cep->cur_rx = (cbd_t *)bdp; return 0;}static intfcc_enet_close(struct net_device *dev){#ifdef CONFIG_USE_MDIO struct fcc_enet_private *fep = dev->priv;#endif netif_stop_queue(dev); fcc_stop(dev);#ifdef CONFIG_USE_MDIO if (fep->phy) mii_do_cmd(dev, fep->phy->shutdown);#endif return 0;}static struct net_device_stats *fcc_enet_get_stats(struct net_device *dev){ struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv; return &cep->stats;}#ifdef CONFIG_USE_MDIO/* NOTE: Most of the following comes from the FEC driver for 860. The * overall structure of MII code has been retained (as it's proved stable * and well-tested), but actual transfer requests are processed "at once" * instead of being queued (there's no interrupt-driven MII transfer * mechanism, one has to toggle the data/clock bits manually). */static intmii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *)){ struct fcc_enet_private *fep; int retval, tmp; /* Add PHY address to register command. */ fep = dev->priv; regval |= fep->phy_addr << 23; retval = 0; tmp = mii_send_receive(fep->fip, regval); if (func) func(tmp, dev); return retval;}static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c){ int k; if(!c) return; for(k = 0; (c+k)->mii_data != mk_mii_end; k++) mii_queue(dev, (c+k)->mii_data, (c+k)->funct);}static void mii_parse_sr(uint mii_reg, struct net_device *dev){ volatile struct fcc_enet_private *fep = dev->priv; uint s = fep->phy_status; s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); if (mii_reg & BMSR_LSTATUS) s |= PHY_STAT_LINK; if (mii_reg & BMSR_RFAULT) s |= PHY_STAT_FAULT; if (mii_reg & BMSR_ANEGCOMPLETE) s |= PHY_STAT_ANC; fep->phy_status = s;}static void mii_parse_cr(uint mii_reg, struct net_device *dev){ volatile struct fcc_enet_private *fep = dev->priv; uint s = fep->phy_status; s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP); if (mii_reg & BMCR_ANENABLE) s |= PHY_CONF_ANE; if (mii_reg & BMCR_LOOPBACK) s |= PHY_CONF_LOOP; fep->phy_status = s;}static void mii_parse_anar(uint mii_reg, struct net_device *dev){ volatile struct fcc_enet_private *fep = dev->priv; uint s = fep->phy_status; s &= ~(PHY_CONF_SPMASK); if (mii_reg & ADVERTISE_10HALF) s |= PHY_CONF_10HDX; if (mii_reg & ADVERTISE_10FULL) s |= PHY_CONF_10FDX; if (mii_reg & ADVERTISE_100HALF) s |= PHY_CONF_100HDX; if (mii_reg & ADVERTISE_100FULL) s |= PHY_CONF_100FDX; fep->phy_status = s;}/* ------------------------------------------------------------------------- *//* Generic PHY support. Should work for all PHYs, but does not support link * change interrupts. */#ifdef CONFIG_FCC_GENERIC_PHYstatic phy_info_t phy_info_generic = { 0x00000000, /* 0-->match any PHY */ "GENERIC", (const phy_cmd_t []) { /* config */ /* advertise only half-duplex capabilities */ { mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_HALF), mii_parse_anar }, /* enable auto-negotiation */ { mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr }, { mk_mii_end, } }, (const phy_cmd_t []) { /* startup */ /* restart auto-negotiation */ { mk_mii_write(MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART), NULL }, { mk_mii_end, } }, (const phy_cmd_t []) { /* ack_int */ /* We don't actually use the ack_int table with a generic * PHY, but putting a reference to mii_parse_sr here keeps * us from getting a compiler warning about unused static * functions in the case where we only compile in generic * PHY support. */ { mk_mii_read(MII_BMSR), mii_parse_sr }, { mk_mii_end, } }, (const phy_cmd_t []) { /* shutdown */ { mk_mii_end, } },};#endif /* ifdef CONFIG_FCC_GENERIC_PHY *//* ------------------------------------------------------------------------- *//* The Level one LXT970 is used by many boards */#ifdef CONFIG_FCC_LXT970#define MII_LXT970_MIRROR 16 /* Mirror register */#define MII_LXT970_IER 17 /* Interrupt Enable Register */#define MII_LXT970_ISR 18 /* Interrupt Status Register */#define MII_LXT970_CONFIG 19 /* Configuration Register */#define MII_LXT970_CSR 20 /* Chip Status Register */static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev){ volatile struct fcc_enet_private *fep = dev->priv; uint s = fep->phy_status; s &= ~(PHY_STAT_SPMASK); if (mii_reg & 0x0800) { if (mii_reg & 0x1000) s |= PHY_STAT_100FDX; else s |= PHY_STAT_100HDX; } else { if (mii_reg & 0x1000) s |= PHY_STAT_10FDX; else s |= PHY_STAT_10HDX; } fep->phy_status = s;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -