📄 eepro100.c
字号:
kfree(sp->mc_setup_head); sp->mc_setup_head = t; } if (sp->mc_setup_head == NULL) sp->mc_setup_tail = NULL; sp->dirty_tx = dirty_tx;}/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs){ struct net_device *dev = (struct net_device *)dev_instance; struct speedo_private *sp; long ioaddr, boguscnt = max_interrupt_work; unsigned short status; unsigned int handled = 0; ioaddr = dev->base_addr; sp = netdev_priv(dev);#ifndef final_version /* A lock to prevent simultaneous entry on SMP machines. */ if (test_and_set_bit(0, (void*)&sp->in_interrupt)) { printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", dev->name); sp->in_interrupt = 0; /* Avoid halting machine. */ return IRQ_NONE; }#endif do { status = inw(ioaddr + SCBStatus); /* Acknowledge all of the current interrupt sources ASAP. */ /* Will change from 0xfc00 to 0xff00 when we start handling FCP and ER interrupts --Dragan */ outw(status & 0xfc00, ioaddr + SCBStatus); if (netif_msg_intr(sp)) printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n", dev->name, status); if ((status & 0xfc00) == 0) break; handled = 1; if ((status & 0x5000) || /* Packet received, or Rx error. */ (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed) /* Need to gather the postponed packet. */ speedo_rx(dev); /* Always check if all rx buffers are allocated. --SAW */ speedo_refill_rx_buffers(dev, 0); spin_lock(&sp->lock); /* * The chip may have suspended reception for various reasons. * Check for that, and re-prime it should this be the case. */ switch ((status >> 2) & 0xf) { case 0: /* Idle */ break; case 1: /* Suspended */ case 2: /* No resources (RxFDs) */ case 9: /* Suspended with no more RBDs */ case 10: /* No resources due to no RBDs */ case 12: /* Ready with no RBDs */ speedo_rx_soft_reset(dev); break; case 3: case 5: case 6: case 7: case 8: case 11: case 13: case 14: case 15: /* these are all reserved values */ break; } /* User interrupt, Command/Tx unit interrupt or CU not active. */ if (status & 0xA400) { speedo_tx_buffer_gc(dev); if (sp->tx_full && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) { /* The ring is no longer full. */ sp->tx_full = 0; netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */ } } spin_unlock(&sp->lock); if (--boguscnt < 0) { printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n", dev->name, status); /* Clear all interrupt sources. */ /* Will change from 0xfc00 to 0xff00 when we start handling FCP and ER interrupts --Dragan */ outw(0xfc00, ioaddr + SCBStatus); break; } } while (1); if (netif_msg_intr(sp)) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, inw(ioaddr + SCBStatus)); clear_bit(0, (void*)&sp->in_interrupt); return IRQ_RETVAL(handled);}static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry){ struct speedo_private *sp = netdev_priv(dev); struct RxFD *rxf; struct sk_buff *skb; /* Get a fresh skbuff to replace the consumed one. */ skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); /* XXX: do we really want to call this before the NULL check? --hch */ rx_align(skb); /* Align IP on 16 byte boundary */ sp->rx_skbuff[entry] = skb; if (skb == NULL) { sp->rx_ringp[entry] = NULL; return NULL; } rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail; sp->rx_ring_dma[entry] = pci_map_single(sp->pdev, rxf, PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); skb->dev = dev; skb_reserve(skb, sizeof(struct RxFD)); rxf->rx_buf_addr = 0xffffffff; pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], sizeof(struct RxFD), PCI_DMA_TODEVICE); return rxf;}static inline void speedo_rx_link(struct net_device *dev, int entry, struct RxFD *rxf, dma_addr_t rxf_dma){ struct speedo_private *sp = netdev_priv(dev); rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */ rxf->link = 0; /* None yet. */ rxf->count = cpu_to_le32(PKT_BUF_SZ << 16); sp->last_rxf->link = cpu_to_le32(rxf_dma); sp->last_rxf->status &= cpu_to_le32(~0xC0000000); pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma, sizeof(struct RxFD), PCI_DMA_TODEVICE); sp->last_rxf = rxf; sp->last_rxf_dma = rxf_dma;}static int speedo_refill_rx_buf(struct net_device *dev, int force){ struct speedo_private *sp = netdev_priv(dev); int entry; struct RxFD *rxf; entry = sp->dirty_rx % RX_RING_SIZE; if (sp->rx_skbuff[entry] == NULL) { rxf = speedo_rx_alloc(dev, entry); if (rxf == NULL) { unsigned int forw; int forw_entry; if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) { printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n", dev->name, force); sp->rx_ring_state |= RrOOMReported; } speedo_show_state(dev); if (!force) return -1; /* Better luck next time! */ /* Borrow an skb from one of next entries. */ for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++) if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL) break; if (forw == sp->cur_rx) return -1; forw_entry = forw % RX_RING_SIZE; sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry]; sp->rx_skbuff[forw_entry] = NULL; rxf = sp->rx_ringp[forw_entry]; sp->rx_ringp[forw_entry] = NULL; sp->rx_ringp[entry] = rxf; } } else { rxf = sp->rx_ringp[entry]; } speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]); sp->dirty_rx++; sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */ return 0;}static void speedo_refill_rx_buffers(struct net_device *dev, int force){ struct speedo_private *sp = netdev_priv(dev); /* Refill the RX ring. */ while ((int)(sp->cur_rx - sp->dirty_rx) > 0 && speedo_refill_rx_buf(dev, force) != -1);}static intspeedo_rx(struct net_device *dev){ struct speedo_private *sp = netdev_priv(dev); int entry = sp->cur_rx % RX_RING_SIZE; int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx; int alloc_ok = 1; int npkts = 0; if (netif_msg_intr(sp)) printk(KERN_DEBUG " In speedo_rx().\n"); /* If we own the next entry, it's a new packet. Send it up. */ while (sp->rx_ringp[entry] != NULL) { int status; int pkt_len; pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry], sizeof(struct RxFD), PCI_DMA_FROMDEVICE); status = le32_to_cpu(sp->rx_ringp[entry]->status); pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff; if (!(status & RxComplete)) break; if (--rx_work_limit < 0) break; /* Check for a rare out-of-memory case: the current buffer is the last buffer allocated in the RX ring. --SAW */ if (sp->last_rxf == sp->rx_ringp[entry]) { /* Postpone the packet. It'll be reaped at an interrupt when this packet is no longer the last packet in the ring. */ if (netif_msg_rx_err(sp)) printk(KERN_DEBUG "%s: RX packet postponed!\n", dev->name); sp->rx_ring_state |= RrPostponed; break; } if (netif_msg_rx_status(sp)) printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status, pkt_len); if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) { if (status & RxErrTooBig) printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, " "status %8.8x!\n", dev->name, status); else if (! (status & RxOK)) { /* There was a fatal error. This *should* be impossible. */ sp->stats.rx_errors++; printk(KERN_ERR "%s: Anomalous event in speedo_rx(), " "status %8.8x.\n", dev->name, status); } } else { struct sk_buff *skb; /* Check if the packet is long enough to just accept without copying to a properly sized skbuff. */ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { skb->dev = dev; skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ /* 'skb_put()' points to the start of sk_buff data area. */ pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry], sizeof(struct RxFD) + pkt_len, PCI_DMA_FROMDEVICE);#if 1 || USE_IP_CSUM /* Packet is in one chunk -- we can copy + cksum. */ eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0); skb_put(skb, pkt_len);#else memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail, pkt_len);#endif pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], sizeof(struct RxFD) + pkt_len, PCI_DMA_FROMDEVICE); npkts++; } else { /* Pass up the already-filled skbuff. */ skb = sp->rx_skbuff[entry]; if (skb == NULL) { printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n", dev->name); break; } sp->rx_skbuff[entry] = NULL; skb_put(skb, pkt_len); npkts++; sp->rx_ringp[entry] = NULL; pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry], PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; sp->stats.rx_packets++; sp->stats.rx_bytes += pkt_len; } entry = (++sp->cur_rx) % RX_RING_SIZE; sp->rx_ring_state &= ~RrPostponed; /* Refill the recently taken buffers. Do it one-by-one to handle traffic bursts better. */ if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1) alloc_ok = 0; } /* Try hard to refill the recently taken buffers. */ speedo_refill_rx_buffers(dev, 1); if (npkts) sp->last_rx_time = jiffies; return 0;}static intspeedo_close(struct net_device *dev){ long ioaddr = dev->base_addr; struct speedo_private *sp = netdev_priv(dev); int i; netdevice_stop(dev); netif_stop_queue(dev); if (netif_msg_ifdown(sp)) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", dev->name, inw(ioaddr + SCBStatus)); /* Shut off the media monitoring timer. */ del_timer_sync(&sp->timer); outw(SCBMaskAll, ioaddr + SCBCmd); /* Shutting down the chip nicely fails to disable flow control. So.. */ outl(PortPartialReset, ioaddr + SCBPort); inl(ioaddr + SCBPort); /* flush posted write */ /* * The chip requires a 10 microsecond quiet period. Wait here! */ udelay(10); free_irq(dev->irq, dev); speedo_show_state(dev); /* Free all the skbuffs in the Rx and Tx queues. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = sp->rx_skbuff[i]; sp->rx_skbuff[i] = NULL; /* Clear the Rx descriptors. */ if (skb) { pci_unmap_single(sp->pdev, sp->rx_ring_dma[i], PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); } } for (i = 0; i < TX_RING_SIZE; i++) { struct sk_buff *skb = sp->tx_skbuff[i]; sp->tx_skbuff[i] = NULL; /* Clear the Tx descriptors. */ if (skb) { pci_unmap_single(sp->pdev, le32_to_cpu(sp->tx_ring[i].tx_buf_addr0), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); } } /* Free multicast setting blocks. */ for (i = 0; sp->mc_setup_head != NULL; i++) { struct speedo_mc_block *t; t = sp->mc_setup_head->next; kfree(sp->mc_setup_head); sp->mc_setup_head = t; } sp->mc_setup_tail = NULL; if (netif_msg_ifdown(sp)) printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i); pci_set_power_state(sp->pdev, 2); return 0;}/* The Speedo-3 has an especially awkward and unusable method of getting statistics out of the chip. It takes an unpredictable length of time for the dump-stats command to comple
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -