📄 eepro100.c
字号:
/* User interrupt, Command/Tx unit interrupt or CU not active. */ if (status & 0xA400) { spin_lock(&sp->lock); speedo_tx_buffer_gc(dev); if (sp->tx_full && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) { /* The ring is no longer full. */ sp->tx_full = 0; netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */ } spin_unlock(&sp->lock); } if (--boguscnt < 0) { printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n", dev->name, status); /* Clear all interrupt sources. */ /* Will change from 0xfc00 to 0xff00 when we start handling FCP and ER interrupts --Dragan */ outw(0xfc00, ioaddr + SCBStatus); break; } } while (1); if (speedo_debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, inw(ioaddr + SCBStatus)); clear_bit(0, (void*)&sp->in_interrupt); return;}static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry){ struct speedo_private *sp = (struct speedo_private *)dev->priv; struct RxFD *rxf; struct sk_buff *skb; /* Get a fresh skbuff to replace the consumed one. */ skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); sp->rx_skbuff[entry] = skb; if (skb == NULL) { sp->rx_ringp[entry] = NULL; return NULL; } rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail; sp->rx_ring_dma[entry] = pci_map_single(sp->pdev, rxf, PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); skb->dev = dev; skb_reserve(skb, sizeof(struct RxFD)); rxf->rx_buf_addr = 0xffffffff; pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry], sizeof(struct RxFD), PCI_DMA_TODEVICE); return rxf;}static inline void speedo_rx_link(struct net_device *dev, int entry, struct RxFD *rxf, dma_addr_t rxf_dma){ struct speedo_private *sp = (struct speedo_private *)dev->priv; rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */ rxf->link = 0; /* None yet. */ rxf->count = cpu_to_le32(PKT_BUF_SZ << 16); sp->last_rxf->link = cpu_to_le32(rxf_dma); sp->last_rxf->status &= cpu_to_le32(~0xC0000000); pci_dma_sync_single(sp->pdev, sp->last_rxf_dma, sizeof(struct RxFD), PCI_DMA_TODEVICE); sp->last_rxf = rxf; sp->last_rxf_dma = rxf_dma;}static int speedo_refill_rx_buf(struct net_device *dev, int force){ struct speedo_private *sp = (struct speedo_private *)dev->priv; int entry; struct RxFD *rxf; entry = sp->dirty_rx % RX_RING_SIZE; if (sp->rx_skbuff[entry] == NULL) { rxf = speedo_rx_alloc(dev, entry); if (rxf == NULL) { unsigned int forw; int forw_entry; if (speedo_debug > 2 || !(sp->rx_ring_state & RrOOMReported)) { printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n", dev->name, force); speedo_show_state(dev); sp->rx_ring_state |= RrOOMReported; } if (!force) return -1; /* Better luck next time! */ /* Borrow an skb from one of next entries. */ for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++) if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL) break; if (forw == sp->cur_rx) return -1; forw_entry = forw % RX_RING_SIZE; sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry]; sp->rx_skbuff[forw_entry] = NULL; rxf = sp->rx_ringp[forw_entry]; sp->rx_ringp[forw_entry] = NULL; sp->rx_ringp[entry] = rxf; } } else { rxf = sp->rx_ringp[entry]; } speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]); sp->dirty_rx++; sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */ return 0;}static void speedo_refill_rx_buffers(struct net_device *dev, int force){ struct speedo_private *sp = (struct speedo_private *)dev->priv; /* Refill the RX ring. */ while ((int)(sp->cur_rx - sp->dirty_rx) > 0 && speedo_refill_rx_buf(dev, force) != -1);}static intspeedo_rx(struct net_device *dev){ struct speedo_private *sp = (struct speedo_private *)dev->priv; int entry = sp->cur_rx % RX_RING_SIZE; int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx; int alloc_ok = 1; if (speedo_debug > 4) printk(KERN_DEBUG " In speedo_rx().\n"); /* If we own the next entry, it's a new packet. Send it up. */ while (sp->rx_ringp[entry] != NULL) { int status; int pkt_len; pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry], sizeof(struct RxFD), PCI_DMA_FROMDEVICE); status = le32_to_cpu(sp->rx_ringp[entry]->status); pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff; if (!(status & RxComplete)) break; if (--rx_work_limit < 0) break; /* Check for a rare out-of-memory case: the current buffer is the last buffer allocated in the RX ring. --SAW */ if (sp->last_rxf == sp->rx_ringp[entry]) { /* Postpone the packet. It'll be reaped at an interrupt when this packet is no longer the last packet in the ring. */ if (speedo_debug > 2) printk(KERN_DEBUG "%s: RX packet postponed!\n", dev->name); sp->rx_ring_state |= RrPostponed; break; } if (speedo_debug > 4) printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status, pkt_len); if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) { if (status & RxErrTooBig) printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, " "status %8.8x!\n", dev->name, status); else if (! (status & RxOK)) { /* There was a fatal error. This *should* be impossible. */ sp->stats.rx_errors++; printk(KERN_ERR "%s: Anomalous event in speedo_rx(), " "status %8.8x.\n", dev->name, status); } } else { struct sk_buff *skb; /* Check if the packet is long enough to just accept without copying to a properly sized skbuff. */ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { skb->dev = dev; skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* 'skb_put()' points to the start of sk_buff data area. */ pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry], sizeof(struct RxFD) + pkt_len, PCI_DMA_FROMDEVICE);#if 1 || USE_IP_CSUM /* Packet is in one chunk -- we can copy + cksum. */ eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0); skb_put(skb, pkt_len);#else memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail, pkt_len);#endif } else { /* Pass up the already-filled skbuff. */ skb = sp->rx_skbuff[entry]; if (skb == NULL) { printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n", dev->name); break; } sp->rx_skbuff[entry] = NULL; skb_put(skb, pkt_len); sp->rx_ringp[entry] = NULL; pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry], PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); sp->stats.rx_packets++; sp->stats.rx_bytes += pkt_len; } entry = (++sp->cur_rx) % RX_RING_SIZE; sp->rx_ring_state &= ~RrPostponed; /* Refill the recently taken buffers. Do it one-by-one to handle traffic bursts better. */ if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1) alloc_ok = 0; } /* Try hard to refill the recently taken buffers. */ speedo_refill_rx_buffers(dev, 1); sp->last_rx_time = jiffies; return 0;}static intspeedo_close(struct net_device *dev){ long ioaddr = dev->base_addr; struct speedo_private *sp = (struct speedo_private *)dev->priv; int i; netdevice_stop(dev); netif_stop_queue(dev); if (speedo_debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", dev->name, inw(ioaddr + SCBStatus)); /* Shut off the media monitoring timer. */ del_timer_sync(&sp->timer); /* Shutting down the chip nicely fails to disable flow control. So.. */ outl(PortPartialReset, ioaddr + SCBPort); free_irq(dev->irq, dev); /* Print a few items for debugging. */ if (speedo_debug > 3) speedo_show_state(dev); /* Free all the skbuffs in the Rx and Tx queues. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = sp->rx_skbuff[i]; sp->rx_skbuff[i] = 0; /* Clear the Rx descriptors. */ if (skb) { pci_unmap_single(sp->pdev, sp->rx_ring_dma[i], PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); } } for (i = 0; i < TX_RING_SIZE; i++) { struct sk_buff *skb = sp->tx_skbuff[i]; sp->tx_skbuff[i] = 0; /* Clear the Tx descriptors. */ if (skb) { pci_unmap_single(sp->pdev, le32_to_cpu(sp->tx_ring[i].tx_buf_addr0), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); } } /* Free multicast setting blocks. */ for (i = 0; sp->mc_setup_head != NULL; i++) { struct speedo_mc_block *t; t = sp->mc_setup_head->next; kfree(sp->mc_setup_head); sp->mc_setup_head = t; } sp->mc_setup_tail = NULL; if (speedo_debug > 0) printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i); pci_set_power_state(sp->pdev, 2); MOD_DEC_USE_COUNT; return 0;}/* The Speedo-3 has an especially awkward and unusable method of getting statistics out of the chip. It takes an unpredictable length of time for the dump-stats command to complete. To avoid a busy-wait loop we update the stats with the previous dump results, and then trigger a new dump. Oh, and incoming frames are dropped while executing dump-stats! */static struct net_device_stats *speedo_get_stats(struct net_device *dev){ struct speedo_private *sp = (struct speedo_private *)dev->priv; long ioaddr = dev->base_addr; /* Update only if the previous dump finished. */ if (sp->lstats->done_marker == le32_to_cpu(0xA007)) { sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs); sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls); sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns); sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier); /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/ sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls); sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs); sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs); sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs); sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs); sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs); sp->lstats->done_marker = 0x0000; if (netif_running(dev)) { unsigned long flags; /* Take a spinlock to make wait_for_cmd_done and sending the command atomic. --SAW */ spin_lock_irqsave(&sp->lock, flags); wait_for_cmd_done(ioaddr + SCBCmd); outb(CUDumpStats, ioaddr + SCBCmd); spin_unlock_irqrestore(&sp->lock, flags); } } return &sp->stats;}static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){ struct speedo_private *sp = (struct speedo_private *)dev->priv; long ioaddr = dev->base_addr; u16 *data = (u16 *)&rq->ifr_data; int phy = sp->phy[0] & 0x1f; int saved_acpi; int t; switch(cmd) { case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ data[0] = phy; case SIOCDEVPRIVATE+1: /* Read the specified MII register. */ /* FIXME: these operations need to be serialized with MDIO access from the timeout handler. They are currently serialized only with MDIO access from the timer routine. 2000/05/09 SAW */ saved_acpi = pci_set_power_state(sp->pdev, 0); t = del_timer_sync(&sp->timer); data[3] = mdio_read(ioaddr, data[0], data[1]); if (t) add_timer(&sp->timer); /* may be set to the past --SAW */ pci_set_power_state(sp->pdev, saved_acpi); return 0; case SIOCDEVPRIVATE+2: /* Write the specified MII register */ if (!capable(CAP_NET_ADMIN)) return -EPERM; saved_acpi = pci_set_power_state(sp->pdev, 0); t = del_timer_sync(&sp->timer); mdio_write(ioaddr, data[0], data[1], data[2]); if (t) add_timer(&sp->timer); /* may be set to the past --SAW */ pci_set_power_state(sp->pdev, saved_acpi); return 0; default: return -EOPNOTSUPP; }}/* Set or clear the multicast filter for this adaptor. This is very ugly with Intel chips -- we usually have to execute an entire configuration command, plus process a multicast command. This is complicated. We must put a large configuration command and an arbitrarily-sized multicast command in the transmit list. To minimize the disruption -- the previous command might have already loaded the link -- we conve
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -