⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 netdevice.c

📁 net device driver driver developer can enjoy it
💻 C
📖 第 1 页 / 共 3 页
字号:
        nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
        ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
        ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
        ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
        ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
        ns->collisions += nic->tx_collisions;
        ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
           le32_to_cpu(s->tx_lost_crs);
        ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
           nic->rx_over_length_errors;
        ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
        ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
        ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
        ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
        ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
        ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
           le32_to_cpu(s->rx_alignment_errors) +
           le32_to_cpu(s->rx_short_frame_errors) +
           le32_to_cpu(s->rx_cdt_errors);
        nic->tx_deferred += le32_to_cpu(s->tx_deferred);
        nic->tx_single_collisions +=
           le32_to_cpu(s->tx_single_collisions);
        nic->tx_multiple_collisions +=
           le32_to_cpu(s->tx_multiple_collisions);
        if(nic->mac >= mac_82558_D101_A4) {
           nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
           nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
           nic->rx_fc_unsupported +=
               le32_to_cpu(s->fc_rcv_unsupported);
           if(nic->mac >= mac_82559_D101M) {
               nic->tx_tco_frames +=
                   le16_to_cpu(s->xmt_tco_frames);
               nic->rx_tco_frames +=
                   le16_to_cpu(s->rcv_tco_frames);
           }
       }
    }
 
    
    if(e100_exec_cmd(nic, cuc_dump_reset, 0))
        DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
}
 
// 网卡信息监测
static void e100_watchdog(unsigned long data)
{
    struct nic *nic = (struct nic *)data;
    struct ethtool_cmd cmd;
 
    DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
 
    /* mii library handles link maintenance tasks */
 
    mii_ethtool_gset(&nic->mii, &cmd);
 
    if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
        DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
           cmd.speed == SPEED_100 ? "100" : "10",
           cmd.duplex == DUPLEX_FULL ? "full" : "half");
    } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
        DPRINTK(LINK, INFO, "link down\n");
    }
 
    mii_check_link(&nic->mii);
 
    /* Software generated interrupt to recover from (rare) Rx
    * allocation failure.
    * Unfortunately have to use a spinlock to not re-enable interrupts
    * accidentally, due to hardware that shares a register between the
    * interrupt mask bit and the SW Interrupt generation bit */
    spin_lock_irq(&nic->cmd_lock);
    writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
    spin_unlock_irq(&nic->cmd_lock);
    e100_write_flush(nic);
 
    e100_update_stats(nic);
    e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
 
    if(nic->mac <= mac_82557_D100_C)
       /* Issue a multicast command to workaround a 557 lock up */
        e100_set_multicast_list(nic->netdev);
 
    if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
        nic->flags |= ich_10h_workaround;
    else
        nic->flags &= ~ich_10h_workaround;
 
    mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
}
 
static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
    struct sk_buff *skb)
{
    cb->command = nic->tx_command;
    /* interrupt every 16 packets regardless of delay */
    if((nic->cbs_avail & ~15) == nic->cbs_avail)
        cb->command |= cpu_to_le16(cb_i);
    cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
    cb->u.tcb.tcb_byte_count = 0;
    cb->u.tcb.threshold = nic->tx_threshold;
    cb->u.tcb.tbd_count = 1;
    cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
        skb->data, skb->len, PCI_DMA_TODEVICE));
    /* check for mapping failure? */
    cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
}
 
static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
    struct nic *nic = netdev_priv(netdev);
    int err;
 
    if(nic->flags & ich_10h_workaround) {
       /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
          Issue a NOP command followed by a 1us delay before
          issuing the Tx command. */
        if(e100_exec_cmd(nic, cuc_nop, 0))
           DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
        udelay(1);
    }
 
    err = e100_exec_cb(nic, skb, e100_xmit_prepare);
 
    switch(err) {
    case -ENOSPC:
       /* We queued the skb, but now we're out of space. */
        DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
        netif_stop_queue(netdev);
        break;
    case -ENOMEM:
       /* This is a hard error - log it. */
        DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
        netif_stop_queue(netdev);
        return 1;
    }
 
    netdev->trans_start = jiffies;
    return 0;
}
// 发包过程。
// 对发包对列进行清理
static inline int e100_tx_clean(struct nic *nic)
{
    struct cb *cb;
    int tx_cleaned = 0;
 
    spin_lock(&nic->cb_lock);
 
    DPRINTK(TX_DONE, DEBUG, "cb->status = 0x%04X\n",
        nic->cb_to_clean->status);
 
    /* Clean CBs marked complete */
    for(cb = nic->cb_to_clean;
        cb->status & cpu_to_le16(cb_complete);//转成无符号32位小头数值
        cb = nic->cb_to_clean = cb->next) {
        if(likely(cb->skb != NULL)) {
           nic->net_stats.tx_packets++;
           nic->net_stats.tx_bytes += cb->skb->len;
 
           pci_unmap_single(nic->pdev,
               le32_to_cpu(cb->u.tcb.tbd.buf_addr),
               le16_to_cpu(cb->u.tcb.tbd.size),
               PCI_DMA_TODEVICE);//解除DMA映射
           dev_kfree_skb_any(cb->skb);// 释放skb
           cb->skb = NULL;
           tx_cleaned = 1;
       }
        cb->status = 0;
        nic->cbs_avail++;
    }
 
    spin_unlock(&nic->cb_lock);
 
    /* Recover from running out of Tx resources in xmit_frame */
    if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
        netif_wake_queue(nic->netdev);
 
    return tx_cleaned;
}
// 控制队列操作
static void e100_clean_cbs(struct nic *nic)
{
    if(nic->cbs) {
        while(nic->cbs_avail != nic->params.cbs.count) {
           struct cb *cb = nic->cb_to_clean;
           if(cb->skb) {
               pci_unmap_single(nic->pdev,
                   le32_to_cpu(cb->u.tcb.tbd.buf_addr),
                   le16_to_cpu(cb->u.tcb.tbd.size),
                   PCI_DMA_TODEVICE);
               dev_kfree_skb(cb->skb);
           }
           nic->cb_to_clean = nic->cb_to_clean->next;
           nic->cbs_avail++;
       }
        pci_free_consistent(nic->pdev,
           sizeof(struct cb) * nic->params.cbs.count,
          nic->cbs, nic->cbs_dma_addr);
        nic->cbs = NULL;
        nic->cbs_avail = 0;
    }
    nic->cuc_cmd = cuc_start;
    nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
        nic->cbs;
}
 
static int e100_alloc_cbs(struct nic *nic)
{
    struct cb *cb;
    unsigned int i, count = nic->params.cbs.count;
 
    nic->cuc_cmd = cuc_start;
    nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
    nic->cbs_avail = 0;
 
    nic->cbs = pci_alloc_consistent(nic->pdev,
        sizeof(struct cb) * count, &nic->cbs_dma_addr);
    if(!nic->cbs)
        return -ENOMEM;
 
    for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
        cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
        cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
 
        cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
        cb->link = cpu_to_le32(nic->cbs_dma_addr +
           ((i+1) % count) * sizeof(struct cb));
        cb->skb = NULL;
    }
 
    nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
    nic->cbs_avail = count;
 
    return 0;
}
// 启动接收过程
static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
{
    if(!nic->rxs) return;
    if(RU_SUSPENDED != nic->ru_running) return;
 
    /* handle init time starts */
    if(!rx) rx = nic->rxs;
 
    /* (Re)start RU if suspended or idle and RFA is non-NULL */
    if(rx->skb) {
        e100_exec_cmd(nic, ruc_start, rx->dma_addr);
        nic->ru_running = RU_RUNNING;
    }
}
/*
给收包过程分配skb,这个是非常重要的过程,主要完成skb 的分配工作,如果rx 队列没有
skb,则new 一个,否则把状态同步一下,然后直接使用旧的skb,用于提高效率。分配好的
skb要作pci_map动作,就是把内存挂在网卡的DMA通道,等有中断发生,内存就是网络数据
包了,效验的动作在后面会作*/
 
#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
{
    if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN)))
        return -ENOMEM;
 
    /* Align, init, and map the RFD. */
    rx->skb->dev = nic->netdev;
    skb_reserve(rx->skb, NET_IP_ALIGN);//IP对齐
    memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
// 映射到DMA通道
    rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
        RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
 
    if(pci_dma_mapping_error(rx->dma_addr)) {
        dev_kfree_skb_any(rx->skb);
        rx->skb = 0;
        rx->dma_addr = 0;
        return -ENOMEM;
    }
 
    /* Link the RFD to end of RFA by linking previous RFD to
     * this one, and clearing EL bit of previous. */
    if(rx->prev->skb) {
        struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
        put_unaligned(cpu_to_le32(rx->dma_addr),
           (u32 *)&prev_rfd->link);
        wmb();
        prev_rfd->command &= ~cpu_to_le16(cb_el);
        pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
           sizeof(struct rfd), PCI_DMA_TODEVICE);
    }
 
    return 0;
}
以下为主要的收包过程。
static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
    unsigned int *work_done, unsigned int work_to_do)
{
    struct sk_buff *skb = rx->skb;
    struct rfd *rfd = (struct rfd *)skb->data;
    u16 rfd_status, actual_size;
 
    if(unlikely(work_done && *work_done >= work_to_do))
        return -EAGAIN;
 
    /* Need to sync before taking a peek at cb_complete bit */
    pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
        sizeof(struct rfd), PCI_DMA_FROMDEVICE);
    rfd_status = le16_to_cpu(rfd->status);
 
    DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
 
    /* If data isn't ready, nothing to indicate */
    if(unlikely(!(rfd_status & cb_complete)))
        return -ENODATA;
 
    /* Get actual data size */
    actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
    if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
        actual_size = RFD_BUF_LEN - sizeof(struct rfd);
 
    /* Get data */
    pci_unmap_single(nic->pdev, rx->dma_addr,
        RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
 
    /* this allows for a fast restart without re-enabling interrupts */
    if(le16_to_cpu(rfd->command) & cb_el)
        nic->ru_running = RU_SUSPENDED;
 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -