📄 eth8186.c
字号:
skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); if (!skb) goto err_out; skb->dev = cp->dev; cp->rx_skb[i].skb = skb; cp->rx_skb[i].frag = 0; if ((u32)skb->data & 0x3) printk(KERN_DEBUG "skb->data unaligment %8x\n",(u32)skb->data); // set to noncache area cp->rx_ring[i].addr = (u32)skb->data|UNCACHE_MASK; if (i == (RTL8186_RX_RING_SIZE - 1)) cp->rx_ring[i].opts1 = (DescOwn | RingEnd | cp->rx_buf_sz); else cp->rx_ring[i].opts1 =(DescOwn | cp->rx_buf_sz); cp->rx_ring[i].opts2 = 0; } return 0;err_out: rtl8186_clean_rings(cp); return -ENOMEM;}static void rtl8186_tx_timeout(struct net_device *dev){ struct re_private *cp = dev->priv; unsigned tx_head = cp->tx_hqhead; unsigned tx_tail = cp->tx_hqtail; cp->cp_stats.tx_timeouts++; spin_lock_irq(&cp->lock); while (tx_tail != tx_head) { struct sk_buff *skb; u32 status; status = (cp->tx_hqring[tx_tail].opts1); if (status & DescOwn) break; skb = cp->tx_skb[tx_tail].skb; if (!skb) BUG(); cp->net_stats.tx_packets++; cp->net_stats.tx_bytes += skb->len; dev_kfree_skb(skb); cp->tx_skb[tx_tail].skb = NULL; tx_tail = NEXT_TX(tx_tail); } cp->tx_hqtail = tx_tail; spin_unlock_irq(&cp->lock); if (netif_queue_stopped(cp->dev)) netif_wake_queue(cp->dev);}static int rtl8186_init_rings(struct re_private *cp){ memset(cp->tx_hqring, 0, sizeof(DMA_DESC) * RTL8186_TX_RING_SIZE); memset(cp->rx_ring, 0, sizeof(DMA_DESC) * RTL8186_RX_RING_SIZE); cp->rx_tail = 0; cp->tx_hqhead = cp->tx_hqtail = 0; return rtl8186_refill_rx(cp);}static int rtl8186_alloc_rings(struct re_private *cp){ void *pBuf; pBuf = kmalloc(RTL8186_RXRING_BYTES, GFP_KERNEL); if (!pBuf) goto ErrMem; cp->rxdesc_buf = pBuf; memset(pBuf, 0, RTL8186_RXRING_BYTES); pBuf = (void*)((u32)(pBuf + DESC_ALIGN-1) & ~(DESC_ALIGN -1)); cp->rx_ring = (DMA_DESC*)((u32)(pBuf) | UNCACHE_MASK); pBuf= kmalloc(RTL8186_TXRING_BYTES, GFP_KERNEL); if (!pBuf) goto ErrMem; cp->txdesc_buf = pBuf; memset(pBuf, 0, RTL8186_TXRING_BYTES); pBuf = (void*)((u32)(pBuf + DESC_ALIGN-1) & ~(DESC_ALIGN -1)); cp->tx_hqring = (DMA_DESC*)((u32)(pBuf) | UNCACHE_MASK); return rtl8186_init_rings(cp);ErrMem: if (cp->rxdesc_buf) kfree(cp->rxdesc_buf); if (cp->txdesc_buf) kfree(cp->txdesc_buf); return -ENOMEM;}static void rtl8186_clean_rings(struct re_private *cp){ unsigned i; for (i = 0; i < RTL8186_RX_RING_SIZE; i++) { if (cp->rx_skb[i].skb) { dev_kfree_skb(cp->rx_skb[i].skb); } } for (i = 0; i < RTL8186_TX_RING_SIZE; i++) { if (cp->tx_skb[i].skb) { struct sk_buff *skb = cp->tx_skb[i].skb; dev_kfree_skb(skb); cp->net_stats.tx_dropped++; } } memset(&cp->rx_skb, 0, sizeof(struct ring_info) * RTL8186_RX_RING_SIZE); memset(&cp->tx_skb, 0, sizeof(struct ring_info) * RTL8186_TX_RING_SIZE);}static void rtl8186_free_rings(struct re_private *cp){ rtl8186_clean_rings(cp); kfree(cp->rxdesc_buf); kfree(cp->txdesc_buf); cp->rx_ring = NULL; cp->tx_hqring = NULL;}static int rtl8186_open(struct net_device *dev){ struct re_private *cp = dev->priv; int rc; if (netif_msg_ifup(cp)) printk(KERN_DEBUG "%s: enabling interface\n", dev->name); rtl8186_set_rxbufsize(cp); /* set new rx buf size */ rc = rtl8186_alloc_rings(cp); if (rc) return rc;#ifdef DELAY_RX tasklet_init(&cp->rx_tasklet, rtl8186_rx, (unsigned long)cp);#endif#ifdef CONFIG_RTL_IPSEC rc = request_irq(dev->irq, rtl8186_interrupt, SA_INTERRUPT | SA_SAMPLE_RANDOM, dev->name, dev);#else rc = request_irq(dev->irq, rtl8186_interrupt, SA_INTERRUPT, dev->name, dev);#endif if (rc) goto err_out_hw; // remember to enable IRQ before enabling TX/RX rtl8186_init_hw(cp); netif_start_queue(dev); // power down and up port if((RTL_R32(MIIAR)&0x0000ffff) != 0x8201) { // if PHY == 8305 if(dev->base_addr == 0xbd200000){ // LAN interface unsigned long flags; save_flags(flags);cli(); cp->regs = (void *)0xbd300000; RTL_W32(MIIAR, BIT(31)|0x800|(0<<26)); mdelay(10); RTL_W32(MIIAR, BIT(31)|0x800|(1<<26)); mdelay(10); RTL_W32(MIIAR, BIT(31)|0x800|(2<<26)); mdelay(10); RTL_W32(MIIAR, BIT(31)|0x800|(3<<26)); mdelay(400); RTL_W32(MIIAR, BIT(31)|0x8000|(0<<26)); mdelay(10); RTL_W32(MIIAR, BIT(31)|0x8000|(1<<26)); mdelay(10); RTL_W32(MIIAR, BIT(31)|0x8000|(2<<26)); mdelay(10); RTL_W32(MIIAR, BIT(31)|0x8000|(3<<26)); cp->regs = (void *)dev->base_addr; restore_flags(flags); } else { RTL_W32(MIIAR, BIT(31)|0x800|(4<<26)); mdelay(400); RTL_W32(MIIAR, BIT(31)|0x8000|(4<<26)); } } else RTL_W32(MIIAR, BIT(31)|BIT(26)|0x3300);#ifdef VLAN_QOS memset(cp->qosnode_table, 0, sizeof(cp->qosnode_table)); memset(cp->qosnode_index, 0, sizeof(cp->qosnode_index));#endif return 0;err_out_hw: rtl8186_stop_hw(dev, cp); rtl8186_free_rings(cp); return rc;}static int rtl8186_close(struct net_device *dev){ struct re_private *cp = dev->priv; if (netif_msg_ifdown(cp)) printk(KERN_DEBUG "%s: disabling interface\n", dev->name);#ifdef DELAY_RX tasklet_kill(&cp->rx_tasklet);#endif netif_stop_queue(dev); rtl8186_stop_hw(dev, cp); free_irq(dev->irq, dev); rtl8186_free_rings(cp); return 0;}#if 0static int rtl8186_change_mtu(struct net_device *dev, int new_mtu){ struct re_private *cp = dev->priv; int rc; /* check for invalid MTU, according to hardware limits */ if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) return -EINVAL; /* if network interface not up, no need for complexity */ if (!netif_running(dev)) { dev->mtu = new_mtu; rtl8186_set_rxbufsize(cp); /* set new rx buf size */ return 0; } spin_lock_irq(&cp->lock); rtl8186_stop_hw(dev, cp); /* stop h/w and free rings */ rtl8186_clean_rings(cp); dev->mtu = new_mtu; rtl8186_set_rxbufsize(cp); /* set new rx buf size */ rc = rtl8186_init_rings(cp); /* realloc and restart h/w */ rtl8186_start_hw(cp); spin_unlock_irq(&cp->lock); return rc;}static char mii_2_8139_map[8] = { BasicModeCtrl, BasicModeStatus, 0, 0, NWayAdvert, NWayLPAR, NWayExpansion, 0};static int mdio_read(struct net_device *dev, int phy_id, int location){ struct re_private *cp = dev->priv; return location < 8 && mii_2_8139_map[location] ? readw(cp->regs + mii_2_8139_map[location]) : 0;}static void mdio_write(struct net_device *dev, int phy_id, int location, int value){ struct re_private *cp = dev->priv; if (location == 0) { cpw8(Cfg9346, Cfg9346_Unlock); cpw16(BasicModeCtrl, value); cpw8(Cfg9346, Cfg9346_Lock); } else if (location < 8 && mii_2_8139_map[location]) cpw16(mii_2_8139_map[location], value);}static int rtl8186_ethtool_ioctl (struct re_private *cp, void *useraddr){ u32 ethcmd; /* dev_ioctl() in ../../net/core/dev.c has already checked capable(CAP_NET_ADMIN), so don't bother with that here. */ if (get_user(ethcmd, (u32 *)useraddr)) return -EFAULT; switch (ethcmd) { case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO }; strcpy (info.driver, DRV_NAME); strcpy (info.version, DRV_VERSION); strcpy (info.bus_info, cp->pdev->slot_name); if (copy_to_user (useraddr, &info, sizeof (info))) return -EFAULT; return 0; } /* get settings */ case ETHTOOL_GSET: { struct ethtool_cmd ecmd = { ETHTOOL_GSET }; spin_lock_irq(&cp->lock); mii_ethtool_gset(&cp->mii_if, &ecmd); spin_unlock_irq(&cp->lock); if (copy_to_user(useraddr, &ecmd, sizeof(ecmd))) return -EFAULT; return 0; } /* set settings */ case ETHTOOL_SSET: { int r; struct ethtool_cmd ecmd; if (copy_from_user(&ecmd, useraddr, sizeof(ecmd))) return -EFAULT; spin_lock_irq(&cp->lock); r = mii_ethtool_sset(&cp->mii_if, &ecmd); spin_unlock_irq(&cp->lock); return r; } /* restart autonegotiation */ case ETHTOOL_NWAY_RST: { return mii_nway_restart(&cp->mii_if); } /* get link status */ case ETHTOOL_GLINK: { struct ethtool_value edata = {ETHTOOL_GLINK}; edata.data = mii_link_ok(&cp->mii_if); if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } /* get message-level */ case ETHTOOL_GMSGLVL: { struct ethtool_value edata = {ETHTOOL_GMSGLVL}; edata.data = cp->msg_enable; if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } /* set message-level */ case ETHTOOL_SMSGLVL: { struct ethtool_value edata; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; cp->msg_enable = edata.data; return 0; } default: break; } return -EOPNOTSUPP;}#endifstatic int rtl8186_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){// struct re_private *cp = dev->priv; int rc = 0; if (!netif_running(dev)) return -EINVAL; switch (cmd) { case SIOCETHTOOL:#if 0 return rtl8186_ethtool_ioctl(cp, (void *) rq->ifr_data);#endif return 1; default: rc = -EOPNOTSUPP; break; } return rc;}#if CP_VLAN_TAG_USEDstatic void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp){ struct re_private *cp = dev->priv; spin_lock_irq(&cp->lock); cp->vlgrp = grp; cpw16(CpCmd, cpr16(CpCmd) | RxVlanOn); spin_unlock_irq(&cp->lock);}static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid){ struct re_private *cp = dev->priv; spin_lock_irq(&cp->lock); cpw16(CpCmd, cpr16(CpCmd) & ~RxVlanOn); if (cp->vlgrp) cp->vlgrp->vlan_devices[vid] = NULL; spin_unlock_irq(&cp->lock);}#endifstatic int rtl8186_probe(int ethno){#ifdef MODULE printk("%s", version);#endif struct net_device *dev; struct re_private *cp; int rc; void *regs; unsigned i; struct proc_dir_entry *res; regs = (void *)((ethno)?0xbd300000:0xbd200000);#ifndef MODULE static int version_printed; if (version_printed++ == 0) printk("%s", version);#endif dev = alloc_etherdev(sizeof(struct re_private)); if (!dev) return -ENOMEM; SET_MODULE_OWNER(dev); cp = dev->priv; reDev[ethno] = (struct re_private *)dev->priv; cp->dev = dev; spin_lock_init(&cp->lock);#if 0 cp->mii_if.dev = dev; cp->mii_if.mdio_read = mdio_read; cp->mii_if.mdio_write = mdio_write; cp->mii_if.phy_id = CP_INTERNAL_PHY;#endif dev->base_addr = (unsigned long) regs; cp->regs = regs; rtl8186_stop_hw(dev, cp); /* Set Default MAC address */ for (i = 0; i < 6; i++) ((u8 *)(dev->dev_addr))[i] = i+(ethno<<2); dev->open = rtl8186_open; dev->stop = rtl8186_close; dev->set_multicast_list = rtl8186_set_rx_mode; dev->hard_start_xmit = rtl8186_start_xmit; dev->get_stats = rtl8186_get_stats; dev->do_ioctl = rtl8186_ioctl; dev->set_mac_address = rtl8139_set_hwaddr; /*dev->change_mtu = rtl8186_change_mtu;*/#if 1 dev->tx_timeout = rtl8186_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT;#endif#ifdef CP_TX_CHECKSUM dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;#endif#if CP_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->vlan_rx_register = cp_vlan_rx_register; dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;#endif dev->irq = (ethno)?5:4; rc = register_netdev(dev); if (rc) goto err_out_iomap; printk(KERN_INFO "%s: %s at 0x%lx, " "%02x:%02x:%02x:%02x:%02x:%02x, " "IRQ %d\n", dev->name, "RTL8186-NIC", dev->base_addr, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], dev->irq); res = create_proc_entry("eth_flag", 0, NULL); if (res) res->write_proc = write_proc; /* * Looks like this is necessary to deal with on all architectures, * even this %$#%$# N440BX Intel based thing doesn't get it right. * Ie. having two NICs in the machine, one will have the cache * line set at boot time, the other will not. */ return 0;err_out_iomap: printk("in err_out_iomap\n"); iounmap(regs); kfree(dev); return -1 ;}static void __exit rtl8186_exit (void){}static int __init rtl8186_init(void){ rtl8186_probe(0); rtl8186_probe(1);#ifdef BICOLOR_LED { unsigned int val = (19<<16); writel(val, NIC1BASE|MIIAR); val = readl(NIC1BASE|MIIAR); val &= ~0x3400; val |= 0x800023ff; writel(val, NIC1BASE|MIIAR); }#endif /* BICOLOR_LED */ return 0;}static int rtl8139_set_hwaddr(struct net_device *dev, void *addr){ unsigned long flags; int i; struct re_private *cp = (struct re_private *) dev->priv; unsigned char *p; p = ((struct sockaddr *)addr)->sa_data; save_flags(flags); cli(); for (i = 0; i < 6; ++i) { dev->dev_addr[i] = p[i]; //printk("setup %s hw_address %d : %X\n", dev->name, i, p[i]); } RTL_W8(IDR0, dev->dev_addr[0]); RTL_W8(IDR1, dev->dev_addr[1]); RTL_W8(IDR2, dev->dev_addr[2]); RTL_W8(IDR3, dev->dev_addr[3]); RTL_W8(IDR4, dev->dev_addr[4]); RTL_W8(IDR5, dev->dev_addr[5]); restore_flags(flags); return 0;}module_init(rtl8186_init);module_exit(rtl8186_exit);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -