core.c
来自「linux 内核源代码」· C语言 代码 · 共 2,457 行 · 第 1/5 页
C
2,457 行
static void emac_clean_tx_ring(struct emac_instance *dev){ int i; for (i = 0; i < NUM_TX_BUFF; ++i) { if (dev->tx_skb[i]) { dev_kfree_skb(dev->tx_skb[i]); dev->tx_skb[i] = NULL; if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY) ++dev->estats.tx_dropped; } dev->tx_desc[i].ctrl = 0; dev->tx_desc[i].data_ptr = 0; }}static void emac_clean_rx_ring(struct emac_instance *dev){ int i; for (i = 0; i < NUM_RX_BUFF; ++i) if (dev->rx_skb[i]) { dev->rx_desc[i].ctrl = 0; dev_kfree_skb(dev->rx_skb[i]); dev->rx_skb[i] = NULL; dev->rx_desc[i].data_ptr = 0; } if (dev->rx_sg_skb) { dev_kfree_skb(dev->rx_sg_skb); dev->rx_sg_skb = NULL; }}static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, gfp_t flags){ struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); if (unlikely(!skb)) return -ENOMEM; dev->rx_skb[slot] = skb; dev->rx_desc[slot].data_len = 0; skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); dev->rx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size, DMA_FROM_DEVICE) + 2; wmb(); dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); return 0;}static void emac_print_link_status(struct emac_instance *dev){ if (netif_carrier_ok(dev->ndev)) printk(KERN_INFO "%s: link is up, %d %s%s\n", dev->ndev->name, dev->phy.speed, dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX", dev->phy.pause ? ", pause enabled" : dev->phy.asym_pause ? ", asymmetric pause enabled" : ""); else printk(KERN_INFO "%s: link is down\n", dev->ndev->name);}/* Process ctx, rtnl_lock semaphore */static int emac_open(struct net_device *ndev){ struct emac_instance *dev = netdev_priv(ndev); int err, i; DBG(dev, "open" NL); /* Setup error IRQ handler */ err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev); if (err) { printk(KERN_ERR "%s: failed to request IRQ %d\n", ndev->name, dev->emac_irq); return err; } /* Allocate RX ring */ for (i = 0; i < NUM_RX_BUFF; ++i) if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) { printk(KERN_ERR "%s: failed to allocate RX ring\n", ndev->name); goto oom; } dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0; clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); dev->rx_sg_skb = NULL; mutex_lock(&dev->link_lock); dev->opened = 1; /* Start PHY polling now. */ if (dev->phy.address >= 0) { int link_poll_interval; if (dev->phy.def->ops->poll_link(&dev->phy)) { dev->phy.def->ops->read_link(&dev->phy); netif_carrier_on(dev->ndev); link_poll_interval = PHY_POLL_LINK_ON; } else { netif_carrier_off(dev->ndev); link_poll_interval = PHY_POLL_LINK_OFF; } dev->link_polling = 1; wmb(); schedule_delayed_work(&dev->link_work, link_poll_interval); emac_print_link_status(dev); } else netif_carrier_on(dev->ndev); emac_configure(dev); mal_poll_add(dev->mal, &dev->commac); mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu)); mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); emac_tx_enable(dev); emac_rx_enable(dev); emac_netif_start(dev); mutex_unlock(&dev->link_lock); return 0; oom: emac_clean_rx_ring(dev); free_irq(dev->emac_irq, dev); return -ENOMEM;}/* BHs disabled */#if 0static int emac_link_differs(struct emac_instance *dev){ u32 r = in_be32(&dev->emacp->mr1); int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF; int speed, pause, asym_pause; if (r & EMAC_MR1_MF_1000) speed = SPEED_1000; else if (r & EMAC_MR1_MF_100) speed = SPEED_100; else speed = SPEED_10; switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) { case (EMAC_MR1_EIFC | EMAC_MR1_APP): pause = 1; asym_pause = 0; break; case EMAC_MR1_APP: pause = 0; asym_pause = 1; break; default: pause = asym_pause = 0; } return speed != dev->phy.speed || duplex != dev->phy.duplex || pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;}#endifstatic void emac_link_timer(struct work_struct *work){ struct emac_instance *dev = container_of((struct delayed_work *)work, struct emac_instance, link_work); int link_poll_interval; mutex_lock(&dev->link_lock); DBG2(dev, "link timer" NL); if (!dev->opened) goto bail; if (dev->phy.def->ops->poll_link(&dev->phy)) { if (!netif_carrier_ok(dev->ndev)) { /* Get new link parameters */ dev->phy.def->ops->read_link(&dev->phy); netif_carrier_on(dev->ndev); emac_netif_stop(dev); emac_full_tx_reset(dev); emac_netif_start(dev); emac_print_link_status(dev); } link_poll_interval = PHY_POLL_LINK_ON; } else { if (netif_carrier_ok(dev->ndev)) { netif_carrier_off(dev->ndev); netif_tx_disable(dev->ndev); emac_reinitialize(dev); emac_print_link_status(dev); } link_poll_interval = PHY_POLL_LINK_OFF; } schedule_delayed_work(&dev->link_work, link_poll_interval); bail: mutex_unlock(&dev->link_lock);}static void emac_force_link_update(struct emac_instance *dev){ netif_carrier_off(dev->ndev); smp_rmb(); if (dev->link_polling) { cancel_rearming_delayed_work(&dev->link_work); if (dev->link_polling) schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF); }}/* Process ctx, rtnl_lock semaphore */static int emac_close(struct net_device *ndev){ struct emac_instance *dev = netdev_priv(ndev); DBG(dev, "close" NL); if (dev->phy.address >= 0) { dev->link_polling = 0; cancel_rearming_delayed_work(&dev->link_work); } mutex_lock(&dev->link_lock); emac_netif_stop(dev); dev->opened = 0; mutex_unlock(&dev->link_lock); emac_rx_disable(dev); emac_tx_disable(dev); mal_disable_rx_channel(dev->mal, dev->mal_rx_chan); mal_disable_tx_channel(dev->mal, dev->mal_tx_chan); mal_poll_del(dev->mal, &dev->commac); emac_clean_tx_ring(dev); emac_clean_rx_ring(dev); free_irq(dev->emac_irq, dev); return 0;}static inline u16 emac_tx_csum(struct emac_instance *dev, struct sk_buff *skb){ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH && skb->ip_summed == CHECKSUM_PARTIAL)) { ++dev->stats.tx_packets_csum; return EMAC_TX_CTRL_TAH_CSUM; } return 0;}static inline int emac_xmit_finish(struct emac_instance *dev, int len){ struct emac_regs __iomem *p = dev->emacp; struct net_device *ndev = dev->ndev; /* Send the packet out. If the if makes a significant perf * difference, then we can store the TMR0 value in "dev" * instead */ if (emac_has_feature(dev, EMAC_FTR_EMAC4)) out_be32(&p->tmr0, EMAC4_TMR0_XMIT); else out_be32(&p->tmr0, EMAC_TMR0_XMIT); if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) { netif_stop_queue(ndev); DBG2(dev, "stopped TX queue" NL); } ndev->trans_start = jiffies; ++dev->stats.tx_packets; dev->stats.tx_bytes += len; return 0;}/* Tx lock BH */static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev){ struct emac_instance *dev = netdev_priv(ndev); unsigned int len = skb->len; int slot; u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY | MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb); slot = dev->tx_slot++; if (dev->tx_slot == NUM_TX_BUFF) { dev->tx_slot = 0; ctrl |= MAL_TX_CTRL_WRAP; } DBG2(dev, "xmit(%u) %d" NL, len, slot); dev->tx_skb[slot] = skb; dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE); dev->tx_desc[slot].data_len = (u16) len; wmb(); dev->tx_desc[slot].ctrl = ctrl; return emac_xmit_finish(dev, len);}#ifdef CONFIG_IBM_NEW_EMAC_TAHstatic inline int emac_xmit_split(struct emac_instance *dev, int slot, u32 pd, int len, int last, u16 base_ctrl){ while (1) { u16 ctrl = base_ctrl; int chunk = min(len, MAL_MAX_TX_SIZE); len -= chunk; slot = (slot + 1) % NUM_TX_BUFF; if (last && !len) ctrl |= MAL_TX_CTRL_LAST; if (slot == NUM_TX_BUFF - 1) ctrl |= MAL_TX_CTRL_WRAP; dev->tx_skb[slot] = NULL; dev->tx_desc[slot].data_ptr = pd; dev->tx_desc[slot].data_len = (u16) chunk; dev->tx_desc[slot].ctrl = ctrl; ++dev->tx_cnt; if (!len) break; pd += chunk; } return slot;}/* Tx lock BH disabled (SG version for TAH equipped EMACs) */static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev){ struct emac_instance *dev = netdev_priv(ndev); int nr_frags = skb_shinfo(skb)->nr_frags; int len = skb->len, chunk; int slot, i; u16 ctrl; u32 pd; /* This is common "fast" path */ if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE)) return emac_start_xmit(skb, ndev); len -= skb->data_len; /* Note, this is only an *estimation*, we can still run out of empty * slots because of the additional fragmentation into * MAL_MAX_TX_SIZE-sized chunks */ if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF)) goto stop_queue; ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY | emac_tx_csum(dev, skb); slot = dev->tx_slot; /* skb data */ dev->tx_skb[slot] = NULL; chunk = min(len, MAL_MAX_TX_SIZE); dev->tx_desc[slot].data_ptr = pd = dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE); dev->tx_desc[slot].data_len = (u16) chunk; len -= chunk; if (unlikely(len)) slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags, ctrl); /* skb fragments */ for (i = 0; i < nr_frags; ++i) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; len = frag->size; if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF)) goto undo_frame; pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len, DMA_TO_DEVICE); slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1, ctrl); } DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot); /* Attach skb to the last slot so we don't release it too early */ dev->tx_skb[slot] = skb; /* Send the packet out */ if (dev->tx_slot == NUM_TX_BUFF - 1) ctrl |= MAL_TX_CTRL_WRAP; wmb(); dev->tx_desc[dev->tx_slot].ctrl = ctrl; dev->tx_slot = (slot + 1) % NUM_TX_BUFF; return emac_xmit_finish(dev, skb->len); undo_frame: /* Well, too bad. Our previous estimation was overly optimistic. * Undo everything. */ while (slot != dev->tx_slot) { dev->tx_desc[slot].ctrl = 0; --dev->tx_cnt; if (--slot < 0) slot = NUM_TX_BUFF - 1; } ++dev->estats.tx_undo; stop_queue: netif_stop_queue(ndev); DBG2(dev, "stopped TX queue" NL); return 1;}#else# define emac_start_xmit_sg emac_start_xmit#endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) *//* Tx lock BHs */static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl){ struct emac_error_stats *st = &dev->estats; DBG(dev, "BD TX error %04x" NL, ctrl); ++st->tx_bd_errors; if (ctrl & EMAC_TX_ST_BFCS) ++st->tx_bd_bad_fcs; if (ctrl & EMAC_TX_ST_LCS) ++st->tx_bd_carrier_loss; if (ctrl & EMAC_TX_ST_ED) ++st->tx_bd_excessive_deferral; if (ctrl & EMAC_TX_ST_EC) ++st->tx_bd_excessive_collisions; if (ctrl & EMAC_TX_ST_LC) ++st->tx_bd_late_collision; if (ctrl & EMAC_TX_ST_MC) ++st->tx_bd_multple_collisions; if (ctrl & EMAC_TX_ST_SC) ++st->tx_bd_single_collision; if (ctrl & EMAC_TX_ST_UR) ++st->tx_bd_underrun; if (ctrl & EMAC_TX_ST_SQE) ++st->tx_bd_sqe;}static void emac_poll_tx(void *param){ struct emac_instance *dev = param; u32 bad_mask; DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot); if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) bad_mask = EMAC_IS_BAD_TX_TAH; else bad_mask = EMAC_IS_BAD_TX; netif_tx_lock_bh(dev->ndev); if (dev->tx_cnt) { u16 ctrl; int slot = dev->ack_slot, n = 0; again: ctrl = dev->tx_desc[slot].ctrl; if (!(ctrl & MAL_TX_CTRL_READY)) { struct sk_buff *skb = dev->tx_skb[slot]; ++n; if (skb) { dev_kfree_skb(skb); dev->tx_skb[slot] = NULL; } slot = (slot + 1) % NUM_TX_BUFF; if (unlikely(ctrl & bad_mask)) emac_parse_tx_error(dev, ctrl); if (--dev->tx_cnt)
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?