📄 tulip_core.c
字号:
tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in); } return 0; default: return -EOPNOTSUPP; } return -EOPNOTSUPP;}/* Set or clear the multicast filter for this adaptor. Note that we only use exclusion around actually queueing the new frame, not around filling tp->setup_frame. This is non-deterministic when re-entered but still correct. */#undef set_bit_le#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev){ struct tulip_private *tp = netdev_priv(dev); u16 hash_table[32]; struct dev_mc_list *mclist; int i; u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; set_bit_le(index, hash_table); } for (i = 0; i < 32; i++) { *setup_frm++ = hash_table[i]; *setup_frm++ = hash_table[i]; } setup_frm = &tp->setup_frame[13*6]; /* Fill the final entry with our physical address. */ eaddrs = (u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];}static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev){ struct tulip_private *tp = netdev_priv(dev); struct dev_mc_list *mclist; int i; u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ for (i = 0, mclist = dev->mc_list; i < dev->mc_count; i++, mclist = mclist->next) { eaddrs = (u16 *)mclist->dmi_addr; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; } /* Fill the unused entries with the broadcast address. */ memset(setup_frm, 0xff, (15-i)*12); setup_frm = &tp->setup_frame[15*6]; /* Fill the final entry with our physical address. */ eaddrs = (u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];}static void set_rx_mode(struct net_device *dev){ struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int csr6; csr6 = ioread32(ioaddr + CSR6) & ~0x00D5; tp->csr6 &= ~0x00D5; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; csr6 |= AcceptAllMulticast | AcceptAllPhys; /* Unconditionally log net taps. */ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter well -- accept all multicasts. */ tp->csr6 |= AcceptAllMulticast; csr6 |= AcceptAllMulticast; } else if (tp->flags & MC_HASH_ONLY) { /* Some work-alikes have only a 64-entry hash filter table. */ /* Should verify correctness on big-endian/__powerpc__ */ struct dev_mc_list *mclist; int i; if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */ tp->csr6 |= AcceptAllMulticast; csr6 |= AcceptAllMulticast; } else { u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ int filterbit; for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { if (tp->flags & COMET_MAC_ADDR) filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr); else filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; filterbit &= 0x3f; mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); if (tulip_debug > 2) { printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:" "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name, mclist->dmi_addr[0], mclist->dmi_addr[1], mclist->dmi_addr[2], mclist->dmi_addr[3], mclist->dmi_addr[4], mclist->dmi_addr[5], ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); } } if (mc_filter[0] == tp->mc_filter[0] && mc_filter[1] == tp->mc_filter[1]) ; /* No change. */ else if (tp->flags & IS_ASIX) { iowrite32(2, ioaddr + CSR13); iowrite32(mc_filter[0], ioaddr + CSR14); iowrite32(3, ioaddr + CSR13); iowrite32(mc_filter[1], ioaddr + CSR14); } else if (tp->flags & COMET_MAC_ADDR) { iowrite32(mc_filter[0], ioaddr + 0xAC); iowrite32(mc_filter[1], ioaddr + 0xB0); } tp->mc_filter[0] = mc_filter[0]; tp->mc_filter[1] = mc_filter[1]; } } else { unsigned long flags; u32 tx_flags = 0x08000000 | 192; /* Note that only the low-address shortword of setup_frame is valid! The values are doubled for big-endian architectures. */ if (dev->mc_count > 14) { /* Must use a multicast hash table. */ build_setup_frame_hash(tp->setup_frame, dev); tx_flags = 0x08400000 | 192; } else { build_setup_frame_perfect(tp->setup_frame, dev); } spin_lock_irqsave(&tp->lock, flags); if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { /* Same setup recently queued, we need not add it. */ } else { unsigned int entry; int dummy = -1; /* Now add this frame to the Tx list. */ entry = tp->cur_tx++ % TX_RING_SIZE; if (entry != 0) { /* Avoid a chip errata by prefixing a dummy entry. */ tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = 0; tp->tx_ring[entry].length = (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; tp->tx_ring[entry].buffer1 = 0; /* Must set DescOwned later to avoid race with chip */ dummy = entry; entry = tp->cur_tx++ % TX_RING_SIZE; } tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = pci_map_single(tp->pdev, tp->setup_frame, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); /* Put the setup frame on the Tx list. */ if (entry == TX_RING_SIZE-1) tx_flags |= DESC_RING_WRAP; /* Wrap ring. */ tp->tx_ring[entry].length = cpu_to_le32(tx_flags); tp->tx_ring[entry].buffer1 = cpu_to_le32(tp->tx_buffers[entry].mapping); tp->tx_ring[entry].status = cpu_to_le32(DescOwned); if (dummy >= 0) tp->tx_ring[dummy].status = cpu_to_le32(DescOwned); if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) netif_stop_queue(dev); /* Trigger an immediate transmit demand. */ iowrite32(0, ioaddr + CSR1); } spin_unlock_irqrestore(&tp->lock, flags); } iowrite32(csr6, ioaddr + CSR6);}#ifdef CONFIG_TULIP_MWIstatic void __devinit tulip_mwi_config (struct pci_dev *pdev, struct net_device *dev){ struct tulip_private *tp = netdev_priv(dev); u8 cache; u16 pci_command; u32 csr0; if (tulip_debug > 3) printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev)); tp->csr0 = csr0 = 0; /* if we have any cache line size at all, we can do MRM */ csr0 |= MRM; /* ...and barring hardware bugs, MWI */ if (!(tp->chip_id == DC21143 && tp->revision == 65)) csr0 |= MWI; /* set or disable MWI in the standard PCI command bit. * Check for the case where mwi is desired but not available */ if (csr0 & MWI) pci_set_mwi(pdev); else pci_clear_mwi(pdev); /* read result from hardware (in case bit refused to enable) */ pci_read_config_word(pdev, PCI_COMMAND, &pci_command); if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE))) csr0 &= ~MWI; /* if cache line size hardwired to zero, no MWI */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); if ((csr0 & MWI) && (cache == 0)) { csr0 &= ~MWI; pci_clear_mwi(pdev); } /* assign per-cacheline-size cache alignment and * burst length values */ switch (cache) { case 8: csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift); break; case 16: csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift); break; case 32: csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift); break; default: cache = 0; break; } /* if we have a good cache line size, we by now have a good * csr0, so save it and exit */ if (cache) goto out; /* we don't have a good csr0 or cache line size, disable MWI */ if (csr0 & MWI) { pci_clear_mwi(pdev); csr0 &= ~MWI; } /* sane defaults for burst length and cache alignment * originally from de4x5 driver */ csr0 |= (8 << BurstLenShift) | (1 << CALShift);out: tp->csr0 = csr0; if (tulip_debug > 2) printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n", pci_name(pdev), cache, csr0);}#endif/* * Chips that have the MRM/reserved bit quirk and the burst quirk. That * is the DM910X and the on chip ULi devices */ static int tulip_uli_dm_quirk(struct pci_dev *pdev){ if (pdev->vendor == 0x1282 && pdev->device == 0x9102) return 1; return 0;}static int __devinit tulip_init_one (struct pci_dev *pdev, const struct pci_device_id *ent){ struct tulip_private *tp; /* See note below on the multiport cards. */ static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; static struct pci_device_id early_486_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) }, { }, }; static int last_irq; static int multiport_cnt; /* For four-port boards w/one EEPROM */ u8 chip_rev; int i, irq; unsigned short sum; unsigned char *ee_data; struct net_device *dev; void __iomem *ioaddr; static int board_idx = -1; int chip_idx = ent->driver_data; const char *chip_name = tulip_tbl[chip_idx].chip_name; unsigned int eeprom_missing = 0; unsigned int force_csr0 = 0;#ifndef MODULE static int did_version; /* Already printed version info. */ if (tulip_debug > 0 && did_version++ == 0) printk (KERN_INFO "%s", version);#endif board_idx++; /* * Lan media wire a tulip chip to a wan interface. Needs a very * different driver (lmc driver) */ if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { printk (KERN_ERR PFX "skipping LMC card.\n"); return -ENODEV; } /* * Early DM9100's need software CRC and the DMFE driver */ if (pdev->vendor == 0x1282 && pdev->device == 0x9100) { u32 dev_rev; /* Read Chip revision */ pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev); if(dev_rev < 0x02000030) { printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); return -ENODEV; } } /* * Looks for early PCI chipsets where people report hangs * without the workarounds being on. */ /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache aligned. Aries might need this too. The Saturn errata are not pretty reading but thankfully it's an old 486 chipset. 2. The dreaded SiS496 486 chipset. Same workaround as Intel Saturn. */ if (pci_dev_present(early_486_chipsets)) { csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift); force_csr0 = 1; } /* bugfix: the ASIX must have a burst limit or horrible things happen. */ if (chip_idx == AX88140) { if ((csr0 & 0x3f00) == 0) csr0 |= 0x2000; } /* PNIC doesn't have MWI/MRL/MRM... */ if (chip_idx == LC82C168) csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */ /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ if (tulip_uli_dm_quirk(pdev)) { csr0 &= ~0x01f100ff;#if defined(__sparc__) csr0 = (csr0 & ~0xff00) | 0xe000;#endif } /* * And back to business */ i = pci_enable_device(pdev); if (i) { printk (KERN_ERR PFX "Cannot enable tulip board #%d, aborting\n", board_idx); return i; } irq = pdev->irq; /* alloc_etherdev ensures aligned and zeroed private structures */ dev = alloc_etherdev (sizeof (*tp)); if (!dev) { printk (KERN_ERR PFX "ether device alloc failed, aborting\n"); return -ENOMEM; } SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { printk (KERN_ERR PFX "%s: I/O region (0x%lx@0x%lx) too small, " "aborting\n", pci_name(pdev), pci_resource_len (pdev, 0), pci_resource_start (pdev, 0)); goto err_out_free_netdev; } /* grab all resources from both PIO and MMIO regions, as we * don't want anyone else messing around with our hardware */ if (pci_request_regions (pdev, "tulip")) goto err_out_free_netdev;#ifndef USE_IO_OPS ioaddr = pci_iomap(pdev, 1, tulip_tbl[chip_idx].io_size);#else ioaddr = pci_iomap(pdev, 0, tulip_tbl[chip_idx].io_size);#endif if (!ioaddr) goto err_out_free_res; pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev); /* * initialize private data structure 'tp' * it is zeroed and aligned in alloc_etherdev */ tp = netdev_priv(dev); tp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct tulip_rx_desc) * RX_RING_SIZE + sizeof(struct tulip_tx_desc) * TX_RING_SIZE, &tp->rx_ring_dma); if (!tp->rx_ring) goto err_out_mtable; tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE); tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE; tp->chip_id = chip_idx; tp->flags = tulip_tbl[chip_idx].flags; tp->pdev = pdev; tp->base_addr = ioaddr; tp->revision = chip_rev; tp->csr0 = csr0; spin_lock_init(&tp->lock); spin_lock_init(&tp->mii_lock); init_timer(&tp->timer); tp->timer.data = (unsigned long)dev; tp->timer.function = tulip_tbl[tp->chip_id].media_timer; dev->base_addr = (unsigned long)ioaddr;#ifdef CONFIG_TULIP_MWI
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -