📄 atl1_main.c
字号:
tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT) | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); spin_unlock(&adapter->mb_lock); }}static void atl1_intr_tx(struct atl1_adapter *adapter){ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; u16 sw_tpd_next_to_clean; u16 cmb_tpd_next_to_clean; sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { struct tx_packet_desc *tpd; tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; if (buffer_info->dma) { pci_unmap_page(adapter->pdev, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb_irq(buffer_info->skb); buffer_info->skb = NULL; } tpd->buffer_addr = 0; tpd->desc.data = 0; if (++sw_tpd_next_to_clean == tpd_ring->count) sw_tpd_next_to_clean = 0; } atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) netif_wake_queue(adapter->netdev);}static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring){ u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 next_to_use = atomic_read(&tpd_ring->next_to_use); return ((next_to_clean > next_to_use) ? next_to_clean - next_to_use - 1 : tpd_ring->count + next_to_clean - next_to_use - 1);}static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, struct tso_param *tso){ /* We enter this function holding a spinlock. */ u8 ipofst; int err; if (skb_shinfo(skb)->gso_size) { if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (unlikely(err)) return err; } if (skb->protocol == ntohs(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); ipofst = skb_network_offset(skb); if (ipofst != ETH_HLEN) /* 802.3 frame */ tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; tso->tsopl |= (iph->ihl & CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; tso->tsopl |= (tcp_hdrlen(skb) & TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; tso->tsopl |= (skb_shinfo(skb)->gso_size & TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT; tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT; return true; } } return false;}static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, struct csum_param *csum){ u8 css, cso; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { cso = skb_transport_offset(skb); css = cso + skb->csum_offset; if (unlikely(cso & 0x1)) { dev_printk(KERN_DEBUG, &adapter->pdev->dev, "payload offset not an even number\n"); return -1; } csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) << CSUM_PARAM_PLOADOFFSET_SHIFT; csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) << CSUM_PARAM_XSUMOFFSET_SHIFT; csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT; return true; } return true;}static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, bool tcp_seg){ /* We enter this function holding a spinlock. */ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; struct page *page; int first_buf_len = skb->len; unsigned long offset; unsigned int nr_frags; unsigned int f; u16 tpd_next_to_use; u16 proto_hdr_len; u16 len12; first_buf_len -= skb->data_len; nr_frags = skb_shinfo(skb)->nr_frags; tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; if (unlikely(buffer_info->skb)) BUG(); buffer_info->skb = NULL; /* put skb in last TPD */ if (tcp_seg) { /* TSO/GSO */ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); buffer_info->length = proto_hdr_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, proto_hdr_len, PCI_DMA_TODEVICE); if (++tpd_next_to_use == tpd_ring->count) tpd_next_to_use = 0; if (first_buf_len > proto_hdr_len) { int i, m; len12 = first_buf_len - proto_hdr_len; m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; for (i = 0; i < m; i++) { buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; buffer_info->skb = NULL; buffer_info->length = (ATL1_MAX_TX_BUF_LEN >= len12) ? ATL1_MAX_TX_BUF_LEN : len12; len12 -= buffer_info->length; page = virt_to_page(skb->data + (proto_hdr_len + i * ATL1_MAX_TX_BUF_LEN)); offset = (unsigned long)(skb->data + (proto_hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, buffer_info->length, PCI_DMA_TODEVICE); if (++tpd_next_to_use == tpd_ring->count) tpd_next_to_use = 0; } } } else { /* not TSO/GSO */ buffer_info->length = first_buf_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, first_buf_len, PCI_DMA_TODEVICE); if (++tpd_next_to_use == tpd_ring->count) tpd_next_to_use = 0; } for (f = 0; f < nr_frags; f++) { struct skb_frag_struct *frag; u16 lenf, i, m; frag = &skb_shinfo(skb)->frags[f]; lenf = frag->size; m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; for (i = 0; i < m; i++) { buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; if (unlikely(buffer_info->skb)) BUG(); buffer_info->skb = NULL; buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? ATL1_MAX_TX_BUF_LEN : lenf; lenf -= buffer_info->length; buffer_info->dma = pci_map_page(adapter->pdev, frag->page, frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), buffer_info->length, PCI_DMA_TODEVICE); if (++tpd_next_to_use == tpd_ring->count) tpd_next_to_use = 0; } } /* last tpd's buffer-info */ buffer_info->skb = skb;}static void atl1_tx_queue(struct atl1_adapter *adapter, int count, union tpd_descr *descr){ /* We enter this function holding a spinlock. */ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; int j; u32 val; struct atl1_buffer *buffer_info; struct tx_packet_desc *tpd; u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); for (j = 0; j < count; j++) { buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use); tpd->desc.csum.csumpu = descr->csum.csumpu; tpd->desc.csum.csumpl = descr->csum.csumpl; tpd->desc.tso.tsopu = descr->tso.tsopu; tpd->desc.tso.tsopl = descr->tso.tsopl; tpd->buffer_addr = cpu_to_le64(buffer_info->dma); tpd->desc.data = descr->data; tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) & CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT; val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & TSO_PARAM_SEGMENT_MASK; if (val && !j) tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT; if (j == (count - 1)) tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT; if (++tpd_next_to_use == tpd_ring->count) tpd_next_to_use = 0; } /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);}static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev){ struct atl1_adapter *adapter = netdev_priv(netdev); int len = skb->len; int tso; int count = 1; int ret_val; u32 val; union tpd_descr param; u16 frag_size; u16 vlan_tag; unsigned long flags; unsigned int nr_frags = 0; unsigned int mss = 0; unsigned int f; unsigned int proto_hdr_len; len -= skb->data_len; if (unlikely(skb->len == 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } param.data = 0; param.tso.tsopu = 0; param.tso.tsopl = 0; param.csum.csumpu = 0; param.csum.csumpl = 0; /* nr_frags will be nonzero if we're doing scatter/gather (SG) */ nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { frag_size = skb_shinfo(skb)->frags[f].size; if (frag_size) count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; } /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ mss = skb_shinfo(skb)->gso_size; if (mss) { if (skb->protocol == htons(ETH_P_IP)) { proto_hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (unlikely(proto_hdr_len > len)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* need additional TPD ? */ if (proto_hdr_len != len) count += (len - proto_hdr_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; } } if (!spin_trylock_irqsave(&adapter->lock, flags)) { /* Can't get lock - tell upper layer to requeue */ dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); return NETDEV_TX_LOCKED; } if (atl1_tpd_avail(&adapter->tpd_ring) < count) { /* not enough descriptors */ netif_stop_queue(netdev); spin_unlock_irqrestore(&adapter->lock, flags); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); return NETDEV_TX_BUSY; } param.data = 0; if (adapter->vlgrp && vlan_tx_tag_present(skb)) { vlan_tag = vlan_tx_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT; param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) << CSUM_PARAM_VALAN_SHIFT; } tso = atl1_tso(adapter, skb, ¶m.tso); if (tso < 0) { spin_unlock_irqrestore(&adapter->lock, flags); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (!tso) { ret_val = atl1_tx_csum(adapter, skb, ¶m.csum); if (ret_val < 0) { spin_unlock_irqrestore(&adapter->lock, flags); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) & CSUM_PARAM_SEGMENT_MASK; atl1_tx_map(adapter, skb, 1 == val); atl1_tx_queue(adapter, count, ¶m); netdev->trans_start = jiffies; spin_unlock_irqrestore(&adapter->lock, flags); atl1_update_mailbox(adapter); return NETDEV_TX_OK;}/* * atl1_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure * @pt_regs: CPU registers structure */static irqreturn_t atl1_intr(int irq, void *data){ struct atl1_adapter *adapter = netdev_priv(data); u32 status; u8 update_rx; int max_ints = 10; status = adapter->cmb.cmb->int_stats; if (!status) return IRQ_NONE; update_rx = 0; do { /* clear CMB interrupt status at once */ adapter->cmb.cmb->int_stats = 0; if (status & ISR_GPHY) /* clear phy status */ atl1_clear_phy_int(adapter); /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); /* check if SMB intr */ if (status & ISR_SMB) atl1_inc_smb(adapter); /* check if PCIE PHY Link down */ if (status & ISR_PHY_LINKDOWN) { dev_printk(KERN_DEBUG, &adapter->pdev->dev, "pcie phy link down %x\n", status); if (netif_running(adapter->netdev)) { /* reset MAC */ iowrite32(0, adapter->hw.hw_addr + REG_IMR); schedule_work(&adapter->pcie_dma_to_rst_task); return IRQ_HANDLED; } } /* check if DMA read/write error ? */ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { dev_printk(KERN_DEBUG, &adapter->pdev->dev, "pcie DMA r/w error (status = 0x%x)\n", status); iowrite32(0, adapter->hw.hw_addr + REG_IMR); schedule_work(&adapter->pcie_dma_to_rst_task); return IRQ_HANDLED; } /* link event */ if (status & ISR_GPHY) { adapter->soft_stats.tx_carrier_errors++; atl1_check_for_link(adapter); } /* transmit event */ if (status & ISR_CMB_TX) atl1_intr_tx(adapter); /* rx exception */ if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV | ISR_CMB_RX))) { if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV))
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -