cassini.c
来自「linux 内核源代码」· C语言 代码 · 共 2,335 行 · 第 1/5 页
C
2,335 行
cas_phy_write(cp, MII_BMCR, ctl);}/* cp->lock held. note: the last put_page will free the buffer */static int cas_page_free(struct cas *cp, cas_page_t *page){ pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, PCI_DMA_FROMDEVICE); __free_pages(page->buffer, cp->page_order); kfree(page); return 0;}#ifdef RX_COUNT_BUFFERS#define RX_USED_ADD(x, y) ((x)->used += (y))#define RX_USED_SET(x, y) ((x)->used = (y))#else#define RX_USED_ADD(x, y)#define RX_USED_SET(x, y)#endif/* local page allocation routines for the receive buffers. jumbo pages * require at least 8K contiguous and 8K aligned buffers. */static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags){ cas_page_t *page; page = kmalloc(sizeof(cas_page_t), flags); if (!page) return NULL; INIT_LIST_HEAD(&page->list); RX_USED_SET(page, 0); page->buffer = alloc_pages(flags, cp->page_order); if (!page->buffer) goto page_err; page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, cp->page_size, PCI_DMA_FROMDEVICE); return page;page_err: kfree(page); return NULL;}/* initialize spare pool of rx buffers, but allocate during the open */static void cas_spare_init(struct cas *cp){ spin_lock(&cp->rx_inuse_lock); INIT_LIST_HEAD(&cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); spin_lock(&cp->rx_spare_lock); INIT_LIST_HEAD(&cp->rx_spare_list); cp->rx_spares_needed = RX_SPARE_COUNT; spin_unlock(&cp->rx_spare_lock);}/* used on close. free all the spare buffers. */static void cas_spare_free(struct cas *cp){ struct list_head list, *elem, *tmp; /* free spare buffers */ INIT_LIST_HEAD(&list); spin_lock(&cp->rx_spare_lock); list_splice(&cp->rx_spare_list, &list); INIT_LIST_HEAD(&cp->rx_spare_list); spin_unlock(&cp->rx_spare_lock); list_for_each_safe(elem, tmp, &list) { cas_page_free(cp, list_entry(elem, cas_page_t, list)); } INIT_LIST_HEAD(&list);#if 1 /* * Looks like Adrian had protected this with a different * lock than used everywhere else to manipulate this list. */ spin_lock(&cp->rx_inuse_lock); list_splice(&cp->rx_inuse_list, &list); INIT_LIST_HEAD(&cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock);#else spin_lock(&cp->rx_spare_lock); list_splice(&cp->rx_inuse_list, &list); INIT_LIST_HEAD(&cp->rx_inuse_list); spin_unlock(&cp->rx_spare_lock);#endif list_for_each_safe(elem, tmp, &list) { cas_page_free(cp, list_entry(elem, cas_page_t, list)); }}/* replenish spares if needed */static void cas_spare_recover(struct cas *cp, const gfp_t flags){ struct list_head list, *elem, *tmp; int needed, i; /* check inuse list. if we don't need any more free buffers, * just free it */ /* make a local copy of the list */ INIT_LIST_HEAD(&list); spin_lock(&cp->rx_inuse_lock); list_splice(&cp->rx_inuse_list, &list); INIT_LIST_HEAD(&cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); list_for_each_safe(elem, tmp, &list) { cas_page_t *page = list_entry(elem, cas_page_t, list); if (page_count(page->buffer) > 1) continue; list_del(elem); spin_lock(&cp->rx_spare_lock); if (cp->rx_spares_needed > 0) { list_add(elem, &cp->rx_spare_list); cp->rx_spares_needed--; spin_unlock(&cp->rx_spare_lock); } else { spin_unlock(&cp->rx_spare_lock); cas_page_free(cp, page); } } /* put any inuse buffers back on the list */ if (!list_empty(&list)) { spin_lock(&cp->rx_inuse_lock); list_splice(&list, &cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); } spin_lock(&cp->rx_spare_lock); needed = cp->rx_spares_needed; spin_unlock(&cp->rx_spare_lock); if (!needed) return; /* we still need spares, so try to allocate some */ INIT_LIST_HEAD(&list); i = 0; while (i < needed) { cas_page_t *spare = cas_page_alloc(cp, flags); if (!spare) break; list_add(&spare->list, &list); i++; } spin_lock(&cp->rx_spare_lock); list_splice(&list, &cp->rx_spare_list); cp->rx_spares_needed -= i; spin_unlock(&cp->rx_spare_lock);}/* pull a page from the list. */static cas_page_t *cas_page_dequeue(struct cas *cp){ struct list_head *entry; int recover; spin_lock(&cp->rx_spare_lock); if (list_empty(&cp->rx_spare_list)) { /* try to do a quick recovery */ spin_unlock(&cp->rx_spare_lock); cas_spare_recover(cp, GFP_ATOMIC); spin_lock(&cp->rx_spare_lock); if (list_empty(&cp->rx_spare_list)) { if (netif_msg_rx_err(cp)) printk(KERN_ERR "%s: no spare buffers " "available.\n", cp->dev->name); spin_unlock(&cp->rx_spare_lock); return NULL; } } entry = cp->rx_spare_list.next; list_del(entry); recover = ++cp->rx_spares_needed; spin_unlock(&cp->rx_spare_lock); /* trigger the timer to do the recovery */ if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {#if 1 atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_spare); schedule_work(&cp->reset_task);#else atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); schedule_work(&cp->reset_task);#endif } return list_entry(entry, cas_page_t, list);}static void cas_mif_poll(struct cas *cp, const int enable){ u32 cfg; cfg = readl(cp->regs + REG_MIF_CFG); cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); if (cp->phy_type & CAS_PHY_MII_MDIO1) cfg |= MIF_CFG_PHY_SELECT; /* poll and interrupt on link status change. */ if (enable) { cfg |= MIF_CFG_POLL_EN; cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); } writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, cp->regs + REG_MIF_MASK); writel(cfg, cp->regs + REG_MIF_CFG);}/* Must be invoked under cp->lock */static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep){ u16 ctl;#if 1 int lcntl; int changed = 0; int oldstate = cp->lstate; int link_was_not_down = !(oldstate == link_down);#endif /* Setup link parameters */ if (!ep) goto start_aneg; lcntl = cp->link_cntl; if (ep->autoneg == AUTONEG_ENABLE) cp->link_cntl = BMCR_ANENABLE; else { cp->link_cntl = 0; if (ep->speed == SPEED_100) cp->link_cntl |= BMCR_SPEED100; else if (ep->speed == SPEED_1000) cp->link_cntl |= CAS_BMCR_SPEED1000; if (ep->duplex == DUPLEX_FULL) cp->link_cntl |= BMCR_FULLDPLX; }#if 1 changed = (lcntl != cp->link_cntl);#endifstart_aneg: if (cp->lstate == link_up) { printk(KERN_INFO "%s: PCS link down.\n", cp->dev->name); } else { if (changed) { printk(KERN_INFO "%s: link configuration changed\n", cp->dev->name); } } cp->lstate = link_down; cp->link_transition = LINK_TRANSITION_LINK_DOWN; if (!cp->hw_running) return;#if 1 /* * WTZ: If the old state was link_up, we turn off the carrier * to replicate everything we do elsewhere on a link-down * event when we were already in a link-up state.. */ if (oldstate == link_up) netif_carrier_off(cp->dev); if (changed && link_was_not_down) { /* * WTZ: This branch will simply schedule a full reset after * we explicitly changed link modes in an ioctl. See if this * fixes the link-problems we were having for forced mode. */ atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_all); schedule_work(&cp->reset_task); cp->timer_ticks = 0; mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); return; }#endif if (cp->phy_type & CAS_PHY_SERDES) { u32 val = readl(cp->regs + REG_PCS_MII_CTRL); if (cp->link_cntl & BMCR_ANENABLE) { val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); cp->lstate = link_aneg; } else { if (cp->link_cntl & BMCR_FULLDPLX) val |= PCS_MII_CTRL_DUPLEX; val &= ~PCS_MII_AUTONEG_EN; cp->lstate = link_force_ok; } cp->link_transition = LINK_TRANSITION_LINK_CONFIG; writel(val, cp->regs + REG_PCS_MII_CTRL); } else { cas_mif_poll(cp, 0); ctl = cas_phy_read(cp, MII_BMCR); ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | CAS_BMCR_SPEED1000 | BMCR_ANENABLE); ctl |= cp->link_cntl; if (ctl & BMCR_ANENABLE) { ctl |= BMCR_ANRESTART; cp->lstate = link_aneg; } else { cp->lstate = link_force_ok; } cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_phy_write(cp, MII_BMCR, ctl); cas_mif_poll(cp, 1); } cp->timer_ticks = 0; mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);}/* Must be invoked under cp->lock. */static int cas_reset_mii_phy(struct cas *cp){ int limit = STOP_TRIES_PHY; u16 val; cas_phy_write(cp, MII_BMCR, BMCR_RESET); udelay(100); while (limit--) { val = cas_phy_read(cp, MII_BMCR); if ((val & BMCR_RESET) == 0) break; udelay(10); } return (limit <= 0);}static void cas_saturn_firmware_load(struct cas *cp){ cas_saturn_patch_t *patch = cas_saturn_patch; cas_phy_powerdown(cp); /* expanded memory access mode */ cas_phy_write(cp, DP83065_MII_MEM, 0x0); /* pointer configuration for new firmware */ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); cas_phy_write(cp, DP83065_MII_REGD, 0xbd); cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); cas_phy_write(cp, DP83065_MII_REGD, 0x82); cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); cas_phy_write(cp, DP83065_MII_REGD, 0x0); cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); cas_phy_write(cp, DP83065_MII_REGD, 0x39); /* download new firmware */ cas_phy_write(cp, DP83065_MII_MEM, 0x1); cas_phy_write(cp, DP83065_MII_REGE, patch->addr); while (patch->addr) { cas_phy_write(cp, DP83065_MII_REGD, patch->val); patch++; } /* enable firmware */ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); cas_phy_write(cp, DP83065_MII_REGD, 0x1);}/* phy initialization */static void cas_phy_init(struct cas *cp){ u16 val; /* if we're in MII/GMII mode, set up phy */ if (CAS_PHY_MII(cp->phy_type)) { writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); cas_mif_poll(cp, 0); cas_reset_mii_phy(cp); /* take out of isolate mode */ if (PHY_LUCENT_B0 == cp->phy_id) { /* workaround link up/down issue with lucent */ cas_phy_write(cp, LUCENT_MII_REG, 0x8000); cas_phy_write(cp, MII_BMCR, 0x00f1); cas_phy_write(cp, LUCENT_MII_REG, 0x0); } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { /* workarounds for broadcom phy */ cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); } else if (PHY_BROADCOM_5411 == cp->phy_id) { val = cas_phy_read(cp, BROADCOM_MII_REG4); val = cas_phy_read(cp, BROADCOM_MII_REG4); if (val & 0x0080) { /* link workaround */ cas_phy_write(cp, BROADCOM_MII_REG4, val & ~0x0080); } } else if (cp->cas_flags & CAS_FLAG_SATURN) { writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? SATURN_PCFG_FSI : 0x0, cp->regs + REG_SATURN_PCFG); /* load firmware to address 10Mbps auto-negotiation * issue. NOTE: this will need to be changed if the * default firmware gets fixed. */ if (PHY_NS_DP83065 == cp->phy_id) { cas_saturn_firmware_load(cp); } cas_phy_powerup(cp); } /* advertise capabilities */ val = cas_phy_read(cp, MII_BMCR); val &= ~BMCR_ANENABLE; cas_phy_write(cp, MII_BMCR, val); udelay(10); cas_phy_write(cp, MII_ADVERTISE, cas_phy_read(cp, MII_ADVERTISE) | (ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL | CAS_ADVERTISE_PAUSE | CAS_ADVERTISE_ASYM_PAUSE)); if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { /* make sure that we don't advertise half * duplex to avoid a chip issue */ val = cas_phy_read(cp, CAS_MII_1000_CTRL); val &= ~CAS_ADVERTISE_1000HALF; val |= CAS_ADVERTISE_1000FULL; cas_phy_write(cp, CAS_MII_1000_CTRL, val); } } else { /* reset pcs for serdes */ u32 val; int limit; writel(PCS_DATAPATH_MODE_SERDES, cp->regs + REG_PCS_DATAPATH_MODE); /* enable serdes pins on saturn */ if (cp->cas_flags & CAS_FLAG_SATURN) writel(0, cp->regs + REG_SATURN_PCFG); /* Reset PCS unit. */ val = readl(cp->regs + REG_PCS_MII_CTRL); val |= PCS_MII_RESET;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?