📄 tulip_core.c
字号:
tp->csr6 = 0x00420000; outl(0x0001B078, ioaddr + 0xB8); outl(0x0201B078, ioaddr + 0xB8); next_tick = 1*HZ; } } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) && ! tp->medialock) { dev->if_port = 0; tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0); outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80); } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) { /* Provided by BOLO, Macronix - 12/10/1998. */ dev->if_port = 0; tp->csr6 = 0x01a80200; outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80); outl(0x11000 | inw(ioaddr + 0xa0), ioaddr + 0xa0); } else if (tp->chip_id == DC21143 && tulip_media_cap[dev->if_port] & MediaIsMII) { /* We must reset the media CSRs when we force-select MII mode. */ outl(0x0000, ioaddr + CSR13); outl(0x0000, ioaddr + CSR14); outl(0x0008, ioaddr + CSR15); } else if (tp->chip_id == COMET) { dev->if_port = 0; tp->csr6 = 0x00040000; } else if (tp->chip_id == AX88140) { tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100; } else tulip_select_media(dev, 1); /* Start the chip's Tx to process setup frame. */ tulip_outl_csr(tp, tp->csr6, CSR6); tulip_outl_csr(tp, tp->csr6 | csr6_st, CSR6); /* Enable interrupts by setting the interrupt mask. */ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5); outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); tulip_outl_csr(tp, tp->csr6 | csr6_st | csr6_sr, CSR6); outl(0, ioaddr + CSR2); /* Rx poll demand */ if (tulip_debug > 2) { printk(KERN_DEBUG "%s: Done tulip_open(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n", dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5), inl(ioaddr + CSR6)); } /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ init_timer(&tp->timer); tp->timer.expires = RUN_AT(next_tick); tp->timer.data = (unsigned long)dev; tp->timer.function = tulip_tbl[tp->chip_id].media_timer; add_timer(&tp->timer);}static inttulip_open(struct net_device *dev){ int retval; MOD_INC_USE_COUNT; if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev))) { MOD_DEC_USE_COUNT; return retval; } tulip_init_ring (dev); tulip_up (dev); netif_start_queue (dev); return 0;}static void tulip_tx_timeout(struct net_device *dev){ struct tulip_private *tp = (struct tulip_private *)dev->priv; long ioaddr = dev->base_addr; unsigned long flags; DPRINTK("ENTER\n"); spin_lock_irqsave (&tp->lock, flags); if (tulip_media_cap[dev->if_port] & MediaIsMII) { /* Do nothing -- the media monitor should handle this. */ if (tulip_debug > 1) printk(KERN_WARNING "%s: Transmit timeout using MII device.\n", dev->name); } else if (tp->chip_id == DC21040) { if ( !tp->medialock && inl(ioaddr + CSR12) & 0x0002) { dev->if_port = (dev->if_port == 2 ? 0 : 2); printk(KERN_INFO "%s: 21040 transmit timed out, switching to " "%s.\n", dev->name, medianame[dev->if_port]); tulip_select_media(dev, 0); } goto out; } else if (tp->chip_id == DC21041) { int csr12 = inl(ioaddr + CSR12); printk(KERN_WARNING "%s: 21041 transmit timed out, status %8.8x, " "CSR12 %8.8x, CSR13 %8.8x, CSR14 %8.8x, resetting...\n", dev->name, inl(ioaddr + CSR5), csr12, inl(ioaddr + CSR13), inl(ioaddr + CSR14)); tp->mediasense = 1; if ( ! tp->medialock) { if (dev->if_port == 1 || dev->if_port == 2) if (csr12 & 0x0004) { dev->if_port = 2 - dev->if_port; } else dev->if_port = 0; else dev->if_port = 1; tulip_select_media(dev, 0); } } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 || tp->chip_id == DM910X) { printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", dev->name, inl(ioaddr + CSR5), inl(ioaddr + CSR12), inl(ioaddr + CSR13), inl(ioaddr + CSR14), inl(ioaddr + CSR15)); if ( ! tp->medialock && tp->mtable) { do --tp->cur_index; while (tp->cur_index >= 0 && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media] & MediaIsFD)); if (--tp->cur_index < 0) { /* We start again, but should instead look for default. */ tp->cur_index = tp->mtable->leafcount - 1; } tulip_select_media(dev, 0); printk(KERN_WARNING "%s: transmit timed out, switching to %s " "media.\n", dev->name, medianame[dev->if_port]); } } else { printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 " "%8.8x, resetting...\n", dev->name, inl(ioaddr + CSR5), inl(ioaddr + CSR12)); dev->if_port = 0; }#if defined(way_too_many_messages) if (tulip_debug > 3) { int i; for (i = 0; i < RX_RING_SIZE; i++) { u8 *buf = (u8 *)(tp->rx_ring[i].buffer1); int j; printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x " "%2.2x %2.2x %2.2x.\n", i, (unsigned int)tp->rx_ring[i].status, (unsigned int)tp->rx_ring[i].length, (unsigned int)tp->rx_ring[i].buffer1, (unsigned int)tp->rx_ring[i].buffer2, buf[0], buf[1], buf[2]); for (j = 0; buf[j] != 0xee && j < 1600; j++) if (j < 100) printk(" %2.2x", buf[j]); printk(" j=%d.\n", j); } printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(" %8.8x", (unsigned int)tp->rx_ring[i].status); printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(" %8.8x", (unsigned int)tp->tx_ring[i].status); printk("\n"); }#endif /* Stop and restart the chip's Tx processes . */ tulip_restart_rxtx(tp, tp->csr6); /* Trigger an immediate transmit demand. */ outl(0, ioaddr + CSR1); tp->stats.tx_errors++;out: spin_unlock_irqrestore (&tp->lock, flags); dev->trans_start = jiffies; netif_wake_queue (dev);}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void tulip_init_ring(struct net_device *dev){ struct tulip_private *tp = (struct tulip_private *)dev->priv; int i; DPRINTK("ENTER\n"); tp->cur_rx = tp->cur_tx = 0; tp->dirty_rx = tp->dirty_tx = 0; tp->susp_rx = 0; tp->ttimer = 0; tp->nir = 0; for (i = 0; i < RX_RING_SIZE; i++) { tp->rx_ring[i].status = 0x00000000; tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ); tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1)); tp->rx_buffers[i].skb = NULL; tp->rx_buffers[i].mapping = 0; } /* Mark the last entry as wrapping the ring. */ tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP); tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma); for (i = 0; i < RX_RING_SIZE; i++) { dma_addr_t mapping; /* Note the receive buffer must be longword aligned. dev_alloc_skb() provides 16 byte alignment. But do *not* use skb_reserve() to align the IP header! */ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); tp->rx_buffers[i].skb = skb; if (skb == NULL) break; mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); tp->rx_buffers[i].mapping = mapping; skb->dev = dev; /* Mark as being used by this device. */ tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); } tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); /* The Tx buffer descriptor is filled in as needed, but we do need to clear the ownership bit. */ for (i = 0; i < TX_RING_SIZE; i++) { tp->tx_buffers[i].skb = NULL; tp->tx_buffers[i].mapping = 0; tp->tx_ring[i].status = 0x00000000; tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1)); } tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);}static inttulip_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct tulip_private *tp = (struct tulip_private *)dev->priv; int entry; u32 flag; dma_addr_t mapping; spin_lock_irq(&tp->lock); /* Calculate the next Tx descriptor entry. */ entry = tp->cur_tx % TX_RING_SIZE; tp->tx_buffers[entry].skb = skb; mapping = pci_map_single(tp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); tp->tx_buffers[entry].mapping = mapping; tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping); if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ flag = 0x60000000; /* No interrupt */ } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { flag = 0xe0000000; /* Tx-done intr. */ } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { flag = 0x60000000; /* No Tx-done intr. */ } else { /* Leave room for set_rx_mode() to fill entries. */ flag = 0xe0000000; /* Tx-done intr. */ netif_stop_queue(dev); } if (entry == TX_RING_SIZE-1) flag = 0xe0000000 | DESC_RING_WRAP; tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag); /* if we were using Transmit Automatic Polling, we would need a * wmb() here. */ tp->tx_ring[entry].status = cpu_to_le32(DescOwned); wmb(); tp->cur_tx++; /* Trigger an immediate transmit demand. */ outl(0, dev->base_addr + CSR1); spin_unlock_irq(&tp->lock); dev->trans_start = jiffies; return 0;}static void tulip_down (struct net_device *dev){ long ioaddr = dev->base_addr; struct tulip_private *tp = (struct tulip_private *) dev->priv; unsigned long flags; del_timer_sync (&tp->timer); spin_lock_irqsave (&tp->lock, flags); /* Disable interrupts by clearing the interrupt mask. */ outl (0x00000000, ioaddr + CSR7); /* Stop the Tx and Rx processes. */ tulip_stop_rxtx(tp, inl(ioaddr + CSR6)); /* 21040 -- Leave the card in 10baseT state. */ if (tp->chip_id == DC21040) outl (0x00000004, ioaddr + CSR13); if (inl (ioaddr + CSR6) != 0xffffffff) tp->stats.rx_missed_errors += inl (ioaddr + CSR8) & 0xffff; spin_unlock_irqrestore (&tp->lock, flags); dev->if_port = tp->saved_if_port; /* Leave the driver in snooze, not sleep, mode. */ tulip_set_power_state (tp, 0, 1);}static int tulip_close (struct net_device *dev){ long ioaddr = dev->base_addr; struct tulip_private *tp = (struct tulip_private *) dev->priv; int i; netif_stop_queue (dev); tulip_down (dev); if (tulip_debug > 1) printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inl (ioaddr + CSR5)); free_irq (dev->irq, dev); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = tp->rx_buffers[i].skb; dma_addr_t mapping = tp->rx_buffers[i].mapping; tp->rx_buffers[i].skb = NULL; tp->rx_buffers[i].mapping = 0; tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */ tp->rx_ring[i].length = 0; tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */ if (skb) { pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); dev_kfree_skb (skb); } } for (i = 0; i < TX_RING_SIZE; i++) { struct sk_buff *skb = tp->tx_buffers[i].skb; if (skb != NULL) { pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb (skb); } tp->tx_buffers[i].skb = NULL; tp->tx_buffers[i].mapping = 0; } MOD_DEC_USE_COUNT; return 0;}static struct net_device_stats *tulip_get_stats(struct net_device *dev){ struct tulip_private *tp = (struct tulip_private *)dev->priv; long ioaddr = dev->base_addr; if (netif_running(dev)) { unsigned long flags; spin_lock_irqsave (&tp->lock, flags); tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff; spin_unlock_irqrestore(&tp->lock, flags); } return &tp->stats;}/* Provide ioctl() calls to examine the MII xcvr state. */static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){ struct tulip_private *tp = (struct tulip_private *)dev->priv; long ioaddr = dev->base_addr;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -