📄 xircom_tulip_cb.c
字号:
tp->tx_full = 0; tp->cur_rx = tp->cur_tx = 0; tp->dirty_rx = tp->dirty_tx = 0; for (i = 0; i < RX_RING_SIZE; i++) { tp->rx_ring[i].status = 0; tp->rx_ring[i].length = PKT_BUF_SZ; tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]); tp->rx_skbuff[i] = NULL; } /* Mark the last entry as wrapping the ring. */ tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap; tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]); for (i = 0; i < RX_RING_SIZE; i++) { /* Note the receive buffer must be longword aligned. dev_alloc_skb() provides 16 byte alignment. But do *not* use skb_reserve() to align the IP header! */ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); tp->rx_skbuff[i] = skb; if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */ tp->rx_ring[i].buffer1 = virt_to_bus(skb->data); } tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); /* The Tx buffer descriptor is filled in as needed, but we do need to clear the ownership bit. */ for (i = 0; i < TX_RING_SIZE; i++) { tp->tx_skbuff[i] = NULL; tp->tx_ring[i].status = 0; tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);#ifdef CARDBUS if (tp->chip_id == X3201_3) tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);#endif /* CARDBUS */ } tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);}static intxircom_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct xircom_private *tp = netdev_priv(dev); int entry; u32 flag; /* Caution: the write order is important here, set the base address with the "ownership" bits last. */ /* Calculate the next Tx descriptor entry. */ entry = tp->cur_tx % TX_RING_SIZE; tp->tx_skbuff[entry] = skb;#ifdef CARDBUS if (tp->chip_id == X3201_3) { memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len); tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); } else#endif tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ flag = Tx1WholePkt; /* No interrupt */ } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */ } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { flag = Tx1WholePkt; /* No Tx-done intr. */ } else { /* Leave room for set_rx_mode() to fill entries. */ flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */ tp->tx_full = 1; } if (entry == TX_RING_SIZE - 1) flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap; tp->tx_ring[entry].length = skb->len | flag; tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */ tp->cur_tx++; if (tp->tx_full) netif_stop_queue (dev); else netif_wake_queue (dev); /* Trigger an immediate transmit demand. */ outl(0, dev->base_addr + CSR1); dev->trans_start = jiffies; return 0;}static void xircom_media_change(struct net_device *dev){ struct xircom_private *tp = netdev_priv(dev); long ioaddr = dev->base_addr; u16 reg0, reg1, reg4, reg5; u32 csr6 = inl(ioaddr + CSR6), newcsr6; /* reset status first */ mdio_read(dev, tp->phys[0], MII_BMCR); mdio_read(dev, tp->phys[0], MII_BMSR); reg0 = mdio_read(dev, tp->phys[0], MII_BMCR); reg1 = mdio_read(dev, tp->phys[0], MII_BMSR); if (reg1 & BMSR_LSTATUS) { /* link is up */ if (reg0 & BMCR_ANENABLE) { /* autonegotiation is enabled */ reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE); reg5 = mdio_read(dev, tp->phys[0], MII_LPA); if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) { tp->speed100 = 1; tp->full_duplex = 1; } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) { tp->speed100 = 1; tp->full_duplex = 0; } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) { tp->speed100 = 0; tp->full_duplex = 1; } else { tp->speed100 = 0; tp->full_duplex = 0; } } else { /* autonegotiation is disabled */ if (reg0 & BMCR_SPEED100) tp->speed100 = 1; else tp->speed100 = 0; if (reg0 & BMCR_FULLDPLX) tp->full_duplex = 1; else tp->full_duplex = 0; } printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n", dev->name, tp->speed100 ? "100" : "10", tp->full_duplex ? "full" : "half"); netif_carrier_on(dev); newcsr6 = csr6 & ~FullDuplexBit; if (tp->full_duplex) newcsr6 |= FullDuplexBit; if (newcsr6 != csr6) outl_CSR6(newcsr6, ioaddr + CSR6); } else { printk(KERN_DEBUG "%s: Link is down\n", dev->name); netif_carrier_off(dev); }}static void check_duplex(struct net_device *dev){ struct xircom_private *tp = netdev_priv(dev); u16 reg0; mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET); udelay(500); while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET); reg0 = mdio_read(dev, tp->phys[0], MII_BMCR); mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]); if (tp->autoneg) { reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX); reg0 |= BMCR_ANENABLE | BMCR_ANRESTART; } else { reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART); if (tp->speed100) reg0 |= BMCR_SPEED100; if (tp->full_duplex) reg0 |= BMCR_FULLDPLX; printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n", dev->name, tp->speed100 ? "100" : "10", tp->full_duplex ? "full" : "half"); } mdio_write(dev, tp->phys[0], MII_BMCR, reg0);}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs){ struct net_device *dev = dev_instance; struct xircom_private *tp = netdev_priv(dev); long ioaddr = dev->base_addr; int csr5, work_budget = max_interrupt_work; int handled = 0; spin_lock (&tp->lock); do { csr5 = inl(ioaddr + CSR5); /* Acknowledge all of the current interrupt sources ASAP. */ outl(csr5 & 0x0001ffff, ioaddr + CSR5); if (xircom_debug > 4) printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", dev->name, csr5, inl(dev->base_addr + CSR5)); if (csr5 == 0xffffffff) break; /* all bits set, assume PCMCIA card removed */ if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) break; handled = 1; if (csr5 & (RxIntr | RxNoBuf)) work_budget -= xircom_rx(dev); if (csr5 & (TxNoBuf | TxDied | TxIntr)) { unsigned int dirty_tx; for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; dirty_tx++) { int entry = dirty_tx % TX_RING_SIZE; int status = tp->tx_ring[entry].status; if (status < 0) break; /* It still hasn't been Txed */ /* Check for Rx filter setup frames. */ if (tp->tx_skbuff[entry] == NULL) continue; if (status & Tx0DescError) { /* There was an major error, log it. */#ifndef final_version if (xircom_debug > 1) printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", dev->name, status);#endif tp->stats.tx_errors++; if (status & Tx0ManyColl) { tp->stats.tx_aborted_errors++; } if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++; if (status & Tx0LateColl) tp->stats.tx_window_errors++; if (status & Tx0Underflow) tp->stats.tx_fifo_errors++; } else { tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff; tp->stats.collisions += (status >> 3) & 15; tp->stats.tx_packets++; } /* Free the original skb. */ dev_kfree_skb_irq(tp->tx_skbuff[entry]); tp->tx_skbuff[entry] = NULL; }#ifndef final_version if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", dev->name, dirty_tx, tp->cur_tx, tp->tx_full); dirty_tx += TX_RING_SIZE; }#endif if (tp->tx_full && tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) /* The ring is no longer full */ tp->tx_full = 0; if (tp->tx_full) netif_stop_queue (dev); else netif_wake_queue (dev); tp->dirty_tx = dirty_tx; if (csr5 & TxDied) { if (xircom_debug > 2) printk(KERN_WARNING "%s: The transmitter stopped." " CSR5 is %x, CSR6 %x, new CSR6 %x.\n", dev->name, csr5, inl(ioaddr + CSR6), tp->csr6); outl_CSR6(tp->csr6 | EnableRx, ioaddr); outl_CSR6(tp->csr6 | EnableTxRx, ioaddr); } } /* Log errors. */ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ if (csr5 & LinkChange) xircom_media_change(dev); if (csr5 & TxFIFOUnderflow) { if ((tp->csr6 & TxThreshMask) != TxThreshMask) tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */ else tp->csr6 |= TxStoreForw; /* Store-n-forward. */ /* Restart the transmit process. */ outl_CSR6(tp->csr6 | EnableRx, ioaddr); outl_CSR6(tp->csr6 | EnableTxRx, ioaddr); } if (csr5 & RxDied) { /* Missed a Rx frame. */ tp->stats.rx_errors++; tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff; outl_CSR6(tp->csr6 | EnableTxRx, ioaddr); } /* Clear all error sources, included undocumented ones! */ outl(0x0800f7ba, ioaddr + CSR5); } if (--work_budget < 0) { if (xircom_debug > 1) printk(KERN_WARNING "%s: Too much work during an interrupt, " "csr5=0x%8.8x.\n", dev->name, csr5); /* Acknowledge all interrupt sources. */ outl(0x8001ffff, ioaddr + CSR5); break; } } while (1); if (xircom_debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n", dev->name, inl(ioaddr + CSR5)); spin_unlock (&tp->lock); return IRQ_RETVAL(handled);}static intxircom_rx(struct net_device *dev){ struct xircom_private *tp = netdev_priv(dev); int entry = tp->cur_rx % RX_RING_SIZE; int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; int work_done = 0; if (xircom_debug > 4) printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry, tp->rx_ring[entry].status); /* If we own the next entry, it's a new packet. Send it up. */ while (tp->rx_ring[entry].status >= 0) { s32 status = tp->rx_ring[entry].status; if (xircom_debug > 5) printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry, tp->rx_ring[entry].status); if (--rx_work_limit < 0) break; if ((status & 0x38008300) != 0x0300) { if ((status & 0x38000300) != 0x0300) { /* Ignore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { if (xircom_debug > 1) printk(KERN_WARNING "%s: Oversized Ethernet frame " "spanned multiple buffers, status %8.8x!\n", dev->name, status); tp->stats.rx_length_errors++; } } else if (status & Rx0DescError) { /* There was a fatal error. */ if (xircom_debug > 2) printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", dev->name, status); tp->stats.rx_errors++; /* end of a packet.*/ if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++; if (status & Rx0CRCError) tp->stats.rx_crc_errors++; } } else { /* Omit the four octet CRC from the length. */ short pkt_len = ((status >> 16) & 0x7ff) - 4; struct sk_buff *skb;#ifndef final_version if (pkt_len > 1518) { printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", dev->name, pkt_len, pkt_len); pkt_len = 1518; tp->stats.rx_length_errors++; }#endif /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { skb->dev = dev; skb_reserve(skb, 2); /* 16 byte align the IP header */#if ! defined(__alpha__) eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len, 0); skb_put(skb, pkt_len);#else memcpy(skb_put(skb, pkt_len), bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);#endif work_done++; } else { /* Pass up the skb already on the Rx ring. */ skb_put(skb = tp->rx_skbuff[entry], pkt_len); tp->rx_skbuff[entry] = NULL; } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; tp->stats.rx_packets++; tp->stats.rx_bytes += pkt_len; } entry = (++tp->cur_rx) % RX_RING_SIZE; } /* Refill the Rx ring buffers. */ for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) { entry = tp->dirty_rx % RX_RING_SIZE; if (tp->rx_skbuff[entry] == NULL) { struct sk_buff *skb; skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ); if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data); work_done++; } tp->rx_ring[entry].status = Rx0DescOwned; } return work_done;}static voidxircom_down(struct net_device *dev){ long ioaddr = dev->base_addr; struct xircom_private *tp = netdev_priv(dev); /* Disable interrupts by clearing the interrupt mask. */ outl(0, ioaddr + CSR7); /* Stop the chip's Tx and Rx processes. */ outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -