📄 tulip_core.c
字号:
int csr12 = inl (ioaddr + CSR12); int csr14 = inl (ioaddr + CSR14); switch (regnum) { case 0: if (((csr14<<5) & 0x1000) || (dev->if_port == 5 && tp->nwayset)) data->val_out = 0x1000; else data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0) | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0); break; case 1: data->val_out = 0x1848 + ((csr12&0x7000) == 0x5000 ? 0x20 : 0) + ((csr12&0x06) == 6 ? 0 : 4); if (tp->chip_id != DC21041) data->val_out |= 0x6048; break; case 4: /* Advertised value, bogus 10baseTx-FD value from CSR6. */ data->val_out = ((inl(ioaddr + CSR6) >> 3) & 0x0040) + ((csr14 >> 1) & 0x20) + 1; if (tp->chip_id != DC21041) data->val_out |= ((csr14 >> 9) & 0x03C0); break; case 5: data->val_out = tp->lpar; break; default: data->val_out = 0; break; } } else { data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum); } return 0; case SIOCSMIIREG: /* Write MII PHY register. */ case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */ if (!capable (CAP_NET_ADMIN)) return -EPERM; if (regnum & ~0x1f) return -EINVAL; if (data->phy_id == phy) { u16 value = data->val_in; switch (regnum) { case 0: /* Check for autonegotiation on or reset. */ tp->full_duplex_lock = (value & 0x9000) ? 0 : 1; if (tp->full_duplex_lock) tp->full_duplex = (value & 0x0100) ? 1 : 0; break; case 4: tp->advertising[phy_idx] = tp->mii_advertise = data->val_in; break; } } if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { u16 value = data->val_in; if (regnum == 0) { if ((value & 0x1200) == 0x1200) { if (tp->chip_id == PNIC2) { pnic2_start_nway (dev); } else { t21142_start_nway (dev); } } } else if (regnum == 4) tp->sym_advertise = value; } else { tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in); } return 0; default: return -EOPNOTSUPP; } return -EOPNOTSUPP;}/* Set or clear the multicast filter for this adaptor. Note that we only use exclusion around actually queueing the new frame, not around filling tp->setup_frame. This is non-deterministic when re-entered but still correct. */#undef set_bit_le#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev){ struct tulip_private *tp = (struct tulip_private *)dev->priv; u16 hash_table[32]; struct dev_mc_list *mclist; int i; u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; set_bit_le(index, hash_table); } for (i = 0; i < 32; i++) { *setup_frm++ = hash_table[i]; *setup_frm++ = hash_table[i]; } setup_frm = &tp->setup_frame[13*6]; /* Fill the final entry with our physical address. */ eaddrs = (u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];}static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev){ struct tulip_private *tp = (struct tulip_private *)dev->priv; struct dev_mc_list *mclist; int i; u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ for (i = 0, mclist = dev->mc_list; i < dev->mc_count; i++, mclist = mclist->next) { eaddrs = (u16 *)mclist->dmi_addr; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; } /* Fill the unused entries with the broadcast address. */ memset(setup_frm, 0xff, (15-i)*12); setup_frm = &tp->setup_frame[15*6]; /* Fill the final entry with our physical address. */ eaddrs = (u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];}static void set_rx_mode(struct net_device *dev){ struct tulip_private *tp = (struct tulip_private *)dev->priv; long ioaddr = dev->base_addr; int csr6; csr6 = inl(ioaddr + CSR6) & ~0x00D5; tp->csr6 &= ~0x00D5; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; csr6 |= AcceptAllMulticast | AcceptAllPhys; /* Unconditionally log net taps. */ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter well -- accept all multicasts. */ tp->csr6 |= AcceptAllMulticast; csr6 |= AcceptAllMulticast; } else if (tp->flags & MC_HASH_ONLY) { /* Some work-alikes have only a 64-entry hash filter table. */ /* Should verify correctness on big-endian/__powerpc__ */ struct dev_mc_list *mclist; int i; if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */ tp->csr6 |= AcceptAllMulticast; csr6 |= AcceptAllMulticast; } else { u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ int filterbit; for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { if (tp->flags & COMET_MAC_ADDR) filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr); else filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; filterbit &= 0x3f; mc_filter[filterbit >> 5] |= cpu_to_le32(1 << (filterbit & 31)); if (tulip_debug > 2) { printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:" "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name, mclist->dmi_addr[0], mclist->dmi_addr[1], mclist->dmi_addr[2], mclist->dmi_addr[3], mclist->dmi_addr[4], mclist->dmi_addr[5], ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); } } if (mc_filter[0] == tp->mc_filter[0] && mc_filter[1] == tp->mc_filter[1]) ; /* No change. */ else if (tp->flags & IS_ASIX) { outl(2, ioaddr + CSR13); outl(mc_filter[0], ioaddr + CSR14); outl(3, ioaddr + CSR13); outl(mc_filter[1], ioaddr + CSR14); } else if (tp->flags & COMET_MAC_ADDR) { outl(mc_filter[0], ioaddr + 0xAC); outl(mc_filter[1], ioaddr + 0xB0); } tp->mc_filter[0] = mc_filter[0]; tp->mc_filter[1] = mc_filter[1]; } } else { unsigned long flags; u32 tx_flags = 0x08000000 | 192; /* Note that only the low-address shortword of setup_frame is valid! The values are doubled for big-endian architectures. */ if (dev->mc_count > 14) { /* Must use a multicast hash table. */ build_setup_frame_hash(tp->setup_frame, dev); tx_flags = 0x08400000 | 192; } else { build_setup_frame_perfect(tp->setup_frame, dev); } spin_lock_irqsave(&tp->lock, flags); if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { /* Same setup recently queued, we need not add it. */ } else { unsigned int entry; int dummy = -1; /* Now add this frame to the Tx list. */ entry = tp->cur_tx++ % TX_RING_SIZE; if (entry != 0) { /* Avoid a chip errata by prefixing a dummy entry. */ tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = 0; tp->tx_ring[entry].length = (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; tp->tx_ring[entry].buffer1 = 0; /* Must set DescOwned later to avoid race with chip */ dummy = entry; entry = tp->cur_tx++ % TX_RING_SIZE; } tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = pci_map_single(tp->pdev, tp->setup_frame, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); /* Put the setup frame on the Tx list. */ if (entry == TX_RING_SIZE-1) tx_flags |= DESC_RING_WRAP; /* Wrap ring. */ tp->tx_ring[entry].length = cpu_to_le32(tx_flags); tp->tx_ring[entry].buffer1 = cpu_to_le32(tp->tx_buffers[entry].mapping); tp->tx_ring[entry].status = cpu_to_le32(DescOwned); if (dummy >= 0) tp->tx_ring[dummy].status = cpu_to_le32(DescOwned); if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) netif_stop_queue(dev); /* Trigger an immediate transmit demand. */ outl(0, ioaddr + CSR1); } spin_unlock_irqrestore(&tp->lock, flags); } outl(csr6, ioaddr + CSR6);}#ifdef CONFIG_TULIP_MWIstatic void __devinit tulip_mwi_config (struct pci_dev *pdev, struct net_device *dev){ struct tulip_private *tp = dev->priv; u8 cache; u16 pci_command; u32 csr0; if (tulip_debug > 3) printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pdev->slot_name); tp->csr0 = csr0 = 0; /* if we have any cache line size at all, we can do MRM */ csr0 |= MRM; /* ...and barring hardware bugs, MWI */ if (!(tp->chip_id == DC21143 && tp->revision == 65)) csr0 |= MWI; /* set or disable MWI in the standard PCI command bit. * Check for the case where mwi is desired but not available */ if (csr0 & MWI) pci_set_mwi(pdev); else pci_clear_mwi(pdev); /* read result from hardware (in case bit refused to enable) */ pci_read_config_word(pdev, PCI_COMMAND, &pci_command); if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE))) csr0 &= ~MWI; /* if cache line size hardwired to zero, no MWI */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); if ((csr0 & MWI) && (cache == 0)) { csr0 &= ~MWI; pci_clear_mwi(pdev); } /* assign per-cacheline-size cache alignment and * burst length values */ switch (cache) { case 8: csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift); break; case 16: csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift); break; case 32: csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift); break; default: cache = 0; break; } /* if we have a good cache line size, we by now have a good * csr0, so save it and exit */ if (cache) goto out; /* we don't have a good csr0 or cache line size, disable MWI */ if (csr0 & MWI) { pci_clear_mwi(pdev); csr0 &= ~MWI; } /* sane defaults for burst length and cache alignment * originally from de4x5 driver */ csr0 |= (8 << BurstLenShift) | (1 << CALShift);out: tp->csr0 = csr0; if (tulip_debug > 2) printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n", pdev->slot_name, cache, csr0);}#endifstatic int __devinit tulip_init_one (struct pci_dev *pdev, const struct pci_device_id *ent){ struct tulip_private *tp; /* See note below on the multiport cards. */ static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; static int last_irq; static int multiport_cnt; /* For four-port boards w/one EEPROM */ u8 chip_rev; int i, irq; unsigned short sum; u8 ee_data[EEPROM_SIZE]; struct net_device *dev; long ioaddr; static int board_idx = -1; int chip_idx = ent->driver_data; unsigned int t2104x_mode = 0; unsigned int eeprom_missing = 0; unsigned int force_csr0 = 0;#ifndef MODULE static int did_version; /* Already printed version info. */ if (tulip_debug > 0 && did_version++ == 0) printk (KERN_INFO "%s", version);#endif board_idx++; /* * Lan media wire a tulip chip to a wan interface. Needs a very * different driver (lmc driver) */ if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { printk (KERN_ERR PFX "skipping LMC card.\n"); return -ENODEV; } /* * Early DM9100's need software CRC and the DMFE driver */ if (pdev->vendor == 0x1282 && pdev->device == 0x9100) { u32 dev_rev; /* Read Chip revision */ pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev); if(dev_rev < 0x02000030) { printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); return -ENODEV; } } /* * Looks for early PCI chipsets where people report hangs * without the workarounds being on. */ /* Intel Saturn. Switch to 8 long words burst, 8 long word cache aligned Aries might need this too. The Saturn errata are not pretty reading but thankfully its an old 486 chipset. */ if (pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424, NULL)) { csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift); force_csr0 = 1; } /* The dreaded SiS496 486 chipset. Same workaround as above. */ if (pci_find_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, NULL)) { csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift); force_csr0 = 1; } /* bugfix: the ASIX must have a burst limit or horrible things happen. */ if (chip_idx == AX88140) { if ((csr0 & 0x3f00) == 0) csr0 |= 0x2000; } /* PNIC doesn't have MWI/MRL/MRM... */ if (chip_idx == LC82C168) csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */ /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ if ((pdev->vendor == 0x1282 && pdev->device == 0x9102) || (pdev->vendor == 0x10b9 && pdev->device == 0x5261)) csr0 &= ~0x01f100ff;#if defined(__sparc__) /* DM9102A needs 32-dword alignment/burst length on sparc - chip bug? */ if ((pdev->vendor == 0x1282 && pdev->device == 0x9102) || (pdev->vendor == 0x10b9 && pdev->device == 0x5261)) csr0 = (csr0 & ~0xff00) | 0xe000;#endif /* * And back to business */ i = pci_enable_device(pdev); if (i) { printk (KERN_ERR PFX "Cannot enable tulip board #%d, aborting\n", board_idx); return i; } ioaddr = pci_resource_start (pdev, 0); irq = pdev->irq; /* alloc_etherdev ensures aligned and zeroed private structures */ dev = alloc_etherdev (sizeof (*tp)); if (!dev) { printk (KERN_ERR PFX "ether device alloc failed, aborting\n"); return -ENOMEM; } if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { printk (KERN_ERR PFX "%s: I/O region (0x%lx@0x%lx) too small, " "aborting\n", pdev->slot_name, pci_resource_len (pdev, 0), pci_resource_start (pdev, 0)); goto err_out_free_netdev; } /* grab all resources from both PIO and MMIO regions, as we * don't want anyone else messing around with our hardware */ if (pci_request_regions (pdev, "tulip")) goto err_out_free_netdev;#ifndef USE_IO_OPS ioaddr = (unsigned long) ioremap (pci_resource_start (pdev, 1),
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -