📄 winbond-840.c
字号:
np->cur_tx++; /* The spinlock protects against 2 races: * - tx_q_bytes is updated by this function and intr_handler * - our hardware is extremely fast and finishes the packet between * our check for "queue full" and netif_stop_queue. * Thus setting DescOwn and netif_stop_queue must be atomic. */ spin_lock_irq(&np->lock); wmb(); /* flush length, buffer1, buffer2 */ np->tx_ring[entry].status = cpu_to_le32(DescOwn); wmb(); /* flush status and kick the hardware */ writel(0, dev->base_addr + TxStartDemand); np->tx_q_bytes += skb->len; /* Work around horrible bug in the chip by marking the queue as full when we do not have FIFO room for a maximum sized packet. */ if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN) np->tx_full = 1; else if ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT) np->tx_full = 1; if (np->tx_full) netif_stop_queue(dev); dev->trans_start = jiffies; spin_unlock_irq(&np->lock); if (debug > 4) { printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, np->cur_tx, entry); } return 0;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs){ struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np = (struct netdev_private *)dev->priv; long ioaddr = dev->base_addr; int work_limit = max_interrupt_work; spin_lock(&np->lock); do { u32 intr_status = readl(ioaddr + IntrStatus); /* Acknowledge all of the current interrupt sources ASAP. */ writel(intr_status & 0x001ffff, ioaddr + IntrStatus); if (debug > 4) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) break; if (intr_status & (IntrRxDone | RxNoBuf)) netdev_rx(dev); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; int tx_status = le32_to_cpu(np->tx_ring[entry].status); if (tx_status < 0) break; if (tx_status & 0x8000) { /* There was an error, log it. */#ifndef final_version if (debug > 1) printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", dev->name, tx_status);#endif np->stats.tx_errors++; if (tx_status & 0x0104) np->stats.tx_aborted_errors++; if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; if (tx_status & 0x0200) np->stats.tx_window_errors++; if (tx_status & 0x0002) np->stats.tx_fifo_errors++; if ((tx_status & 0x0080) && np->full_duplex == 0) np->stats.tx_heartbeat_errors++;#ifdef ETHER_STATS if (tx_status & 0x0100) np->stats.collisions16++;#endif } else {#ifdef ETHER_STATS if (tx_status & 0x0001) np->stats.tx_deferred++;#endif np->stats.tx_bytes += np->tx_skbuff[entry]->len; np->stats.collisions += (tx_status >> 3) & 15; np->stats.tx_packets++; } /* Free the original skb. */ pci_unmap_single(np->pdev,np->tx_addr[entry], np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); np->tx_q_bytes -= np->tx_skbuff[entry]->len; dev_kfree_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = 0; } if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4 && np->tx_q_bytes < TX_BUG_FIFO_LIMIT) { /* The ring is no longer full, clear tbusy. */ np->tx_full = 0; netif_wake_queue(dev); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr | TimerInt | IntrTxStopped)) netdev_error(dev, intr_status); if (--work_limit < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x.\n", dev->name, intr_status); /* Set the timer to re-enable the other interrupts after 10*82usec ticks. */ writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable); writel(10, ioaddr + GPTimer); break; } } while (1); if (debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, (int)readl(ioaddr + IntrStatus)); spin_unlock(&np->lock);}/* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */static int netdev_rx(struct net_device *dev){ struct netdev_private *np = (struct netdev_private *)dev->priv; int entry = np->cur_rx % RX_RING_SIZE; int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; if (debug > 4) { printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n", entry, np->rx_ring[entry].status); } /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (--work_limit >= 0) { struct w840_rx_desc *desc = np->rx_head_desc; s32 status = le32_to_cpu(desc->status); if (debug > 4) printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", status); if (status < 0) break; if ((status & 0x38008300) != 0x0300) { if ((status & 0x38000300) != 0x0300) { /* Ingore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " "multiple buffers, entry %#x status %4.4x!\n", dev->name, np->cur_rx, status); np->stats.rx_length_errors++; } } else if (status & 0x8000) { /* There was a fatal error. */ if (debug > 2) printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", dev->name, status); np->stats.rx_errors++; /* end of a packet.*/ if (status & 0x0890) np->stats.rx_length_errors++; if (status & 0x004C) np->stats.rx_frame_errors++; if (status & 0x0002) np->stats.rx_crc_errors++; } } else { struct sk_buff *skb; /* Omit the four octet CRC from the length. */ int pkt_len = ((status >> 16) & 0x7ff) - 4;#ifndef final_version if (debug > 4) printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" " status %x.\n", pkt_len, status);#endif /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { skb->dev = dev; skb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single(np->pdev,np->rx_addr[entry], np->rx_skbuff[entry]->len, PCI_DMA_FROMDEVICE); /* Call copy + cksum if available. */#if HAS_IP_COPYSUM eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0); skb_put(skb, pkt_len);#else memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail, pkt_len);#endif } else { pci_unmap_single(np->pdev,np->rx_addr[entry], np->rx_skbuff[entry]->len, PCI_DMA_FROMDEVICE); skb_put(skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; }#ifndef final_version /* Remove after testing. */ /* You will want this info for the initial debug. */ if (debug > 5) printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:" "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x " "%d.%d.%d.%d.\n", skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11], skb->data[12], skb->data[13], skb->data[14], skb->data[15], skb->data[16], skb->data[17]);#endif skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; np->stats.rx_packets++; np->stats.rx_bytes += pkt_len; } entry = (++np->cur_rx) % RX_RING_SIZE; np->rx_head_desc = &np->rx_ring[entry]; } /* Refill the Rx ring buffers. */ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { struct sk_buff *skb; entry = np->dirty_rx % RX_RING_SIZE; if (np->rx_skbuff[entry] == NULL) { skb = dev_alloc_skb(np->rx_buf_sz); np->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ skb->dev = dev; /* Mark as being used by this device. */ np->rx_addr[entry] = pci_map_single(np->pdev, skb->tail, skb->len, PCI_DMA_FROMDEVICE); np->rx_ring[entry].buffer1 = cpu_to_le32(np->rx_addr[entry]); } wmb(); np->rx_ring[entry].status = cpu_to_le32(DescOwn); } return 0;}static void netdev_error(struct net_device *dev, int intr_status){ long ioaddr = dev->base_addr; struct netdev_private *np = (struct netdev_private *)dev->priv; if (debug > 2) printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n", dev->name, intr_status); if (intr_status == 0xffffffff) return; if (intr_status & TxFIFOUnderflow) { /* Bump up the Tx threshold */#if 0 /* This causes lots of dropped packets, * and under high load even tx_timeouts */ np->csr6 += 0x4000;#else int cur = (np->csr6 >> 14)&0x7f; if (cur < 64) cur *= 2; else cur = 0; /* load full packet before starting */ np->csr6 &= ~(0x7F << 14); np->csr6 |= cur<<14;#endif printk(KERN_DEBUG "%s: Tx underflow, increasing threshold to %8.8x.\n", dev->name, np->csr6); writel(np->csr6, ioaddr + NetworkConfig); } if (intr_status & IntrRxDied) { /* Missed a Rx frame. */ np->stats.rx_errors++; } if (intr_status & TimerInt) { /* Re-enable other interrupts. */ writel(0x1A0F5, ioaddr + IntrEnable); } np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff; writel(0, ioaddr + RxStartDemand);}static struct net_device_stats *get_stats(struct net_device *dev){ long ioaddr = dev->base_addr; struct netdev_private *np = (struct netdev_private *)dev->priv; /* The chip only need report frame silently dropped. */ if (netif_running(dev)) np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff; return &np->stats;}static unsigned const ethernet_polynomial = 0x04c11db7U;static inline u32 ether_crc(int length, unsigned char *data){ int crc = -1; while(--length >= 0) { unsigned char current_octet = *data++; int bit; for (bit = 0; bit < 8; bit++, current_octet >>= 1) { crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0); } } return crc;}static void set_rx_mode(struct net_device *dev){ struct netdev_private *np = (struct netdev_private *)dev->priv; long ioaddr = dev->base_addr; u32 mc_filter[2]; /* Multicast hash filter */ u32 rx_mode; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ /* Unconditionally log net taps. */ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys | AcceptMyPhys; } else if ((dev->mc_count > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to match, or accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; } else { struct dev_mc_list *mclist; int i; memset(mc_filter, 0, sizeof(mc_filter)); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F, mc_filter); } rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; } writel(mc_filter[0], ioaddr + MulticastFilter0); writel(mc_filter[1], ioaddr + MulticastFilter1); np->csr6 &= ~0x00F8; np->csr6 |= rx_mode; writel(np->csr6, ioaddr + NetworkConfig);}static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){ u16 *data = (u16 *)&rq->ifr_data; switch(cmd) { case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ data[0] = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f; /* Fall Through */ case SIOCDEVPRIVATE+1: /* Read the specified MII register. */ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f); return 0; case SIOCDEVPRIVATE+2: /* Write the specified MII register */ if (!capable(CAP_NET_ADMIN)) return -EPERM; mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]); return 0; default: return -EOPNOTSUPP; }}static int netdev_close(struct net_device *dev){ long ioaddr = dev->base_addr; struct netdev_private *np = (struct netdev_private *)dev->priv; int i; netif_stop_queue(dev); if (debug > 1) { printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x " "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus), (int)readl(ioaddr + NetworkConfig)); printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); } /* Disable interrupts by clearing the interrupt mask. */ writel(0x0000, ioaddr + IntrEnable); /* Stop the chip's Tx and Rx processes. */ writel(np->csr6 &= ~0x20FA, ioaddr + NetworkConfig); if (readl(ioaddr + NetworkConfig) != 0xffffffff) np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;#ifdef __i386__ if (debug > 2) { printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", (int)np->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(" #%d desc. %4.4x %4.4x %8.8x.\n", i, np->tx_ring[i].length, np->tx_ring[i].status, np->tx_ring[i].buffer1); printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", (int)np->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", i, np->rx_ring[i].length, np->rx_ring[i].status, np->rx_ring[i].buffer1); } }#endif /* __i386__ debugging only */ free_irq(dev->irq, dev); del_timer_sync(&np->timer); free_rxtx_rings(np); return 0;}static void __devexit w840_remove1 (struct pci_dev *pdev){ struct net_device *dev = pdev->driver_data; /* No need to check MOD_IN_USE, as sys_delete_module() checks. */ if (dev) { struct netdev_private *np = (void *)(dev->priv); unregister_netdev(dev);#ifdef USE_IO_OPS release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);#else release_mem_region(pci_resource_start(pdev, 1), pci_id_tbl[np->chip_id].io_size); iounmap((char *)(dev->base_addr));#endif kfree(dev); } pdev->driver_data = NULL;}static struct pci_driver w840_driver = { name: "winbond-840", id_table: w840_pci_tbl, probe: w840_probe1, remove: w840_remove1,};static int __init w840_init(void){ return pci_module_init(&w840_driver);}static void __exit w840_exit(void){ pci_unregister_driver(&w840_driver);}module_init(w840_init);module_exit(w840_exit);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -