qla3xxx.c
来自「linux 内核源代码」· C语言 代码 · 共 2,557 行 · 第 1/5 页
C
2,557 行
if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); } tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; /* Check the transmit response flags for any errors */ if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); qdev->ndev->stats.tx_errors++; retval = -EIO; goto frame_not_sent; } if(tx_cb->seg_count == 0) { printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); qdev->ndev->stats.tx_errors++; retval = -EIO; goto invalid_seg_count; } pci_unmap_single(qdev->pdev, pci_unmap_addr(&tx_cb->map[0], mapaddr), pci_unmap_len(&tx_cb->map[0], maplen), PCI_DMA_TODEVICE); tx_cb->seg_count--; if (tx_cb->seg_count) { for (i = 1; i < tx_cb->seg_count; i++) { pci_unmap_page(qdev->pdev, pci_unmap_addr(&tx_cb->map[i], mapaddr), pci_unmap_len(&tx_cb->map[i], maplen), PCI_DMA_TODEVICE); } } qdev->ndev->stats.tx_packets++; qdev->ndev->stats.tx_bytes += tx_cb->skb->len;frame_not_sent: dev_kfree_skb_irq(tx_cb->skb); tx_cb->skb = NULL;invalid_seg_count: atomic_inc(&qdev->tx_count);}static void ql_get_sbuf(struct ql3_adapter *qdev){ if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) qdev->small_buf_index = 0; qdev->small_buf_release_cnt++;}static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev){ struct ql_rcv_buf_cb *lrg_buf_cb = NULL; lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; qdev->lrg_buf_release_cnt++; if (++qdev->lrg_buf_index == qdev->num_large_buffers) qdev->lrg_buf_index = 0; return(lrg_buf_cb);}/* * The difference between 3022 and 3032 for inbound completions: * 3022 uses two buffers per completion. The first buffer contains * (some) header info, the second the remainder of the headers plus * the data. For this chip we reserve some space at the top of the * receive buffer so that the header info in buffer one can be * prepended to the buffer two. Buffer two is the sent up while * buffer one is returned to the hardware to be reused. * 3032 receives all of it's data and headers in one buffer for a * simpler process. 3032 also supports checksum verification as * can be seen in ql_process_macip_rx_intr(). */static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp_ptr){ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb; u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) lrg_buf_cb1 = ql_get_lbuf(qdev); /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb = lrg_buf_cb2->skb; qdev->ndev->stats.rx_packets++; qdev->ndev->stats.rx_bytes += length; skb_put(skb, length); pci_unmap_single(qdev->pdev, pci_unmap_addr(lrg_buf_cb2, mapaddr), pci_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb->data); skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, qdev->ndev); netif_receive_skb(skb); qdev->ndev->last_rx = jiffies; lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);}static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, struct ib_ip_iocb_rsp *ib_ip_rsp_ptr){ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb1 = NULL, *skb2; struct net_device *ndev = qdev->ndev; u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); u16 size = 0; /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) { /* start of first buffer on 3022 */ lrg_buf_cb1 = ql_get_lbuf(qdev); skb1 = lrg_buf_cb1->skb; size = ETH_HLEN; if (*((u16 *) skb1->data) != 0xFFFF) size += VLAN_ETH_HLEN - ETH_HLEN; } /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb2 = lrg_buf_cb2->skb; skb_put(skb2, length); /* Just the second buffer length here. */ pci_unmap_single(qdev->pdev, pci_unmap_addr(lrg_buf_cb2, mapaddr), pci_unmap_len(lrg_buf_cb2, maplen), PCI_DMA_FROMDEVICE); prefetch(skb2->data); skb2->ip_summed = CHECKSUM_NONE; if (qdev->device_id == QL3022_DEVICE_ID) { /* * Copy the ethhdr from first buffer to second. This * is necessary for 3022 IP completions. */ skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, skb_push(skb2, size), size); } else { u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); if (checksum & (IB_IP_IOCB_RSP_3032_ICE | IB_IP_IOCB_RSP_3032_CE)) { printk(KERN_ERR "%s: Bad checksum for this %s packet, checksum = %x.\n", __func__, ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : "UDP"),checksum); } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || (checksum & IB_IP_IOCB_RSP_3032_UDP && !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { skb2->ip_summed = CHECKSUM_UNNECESSARY; } } skb2->protocol = eth_type_trans(skb2, qdev->ndev); netif_receive_skb(skb2); ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; ndev->last_rx = jiffies; lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);}static int ql_tx_rx_clean(struct ql3_adapter *qdev, int *tx_cleaned, int *rx_cleaned, int work_to_do){ struct net_rsp_iocb *net_rsp; struct net_device *ndev = qdev->ndev; int work_done = 0; /* While there are entries in the completion queue. */ while ((le32_to_cpu(*(qdev->prsp_producer_index)) != qdev->rsp_consumer_index) && (work_done < work_to_do)) { net_rsp = qdev->rsp_current; rmb(); /* * Fix 4032 chipe undocumented "feature" where bit-8 is set if the * inbound completion is for a VLAN. */ if (qdev->device_id == QL3032_DEVICE_ID) net_rsp->opcode &= 0x7f; switch (net_rsp->opcode) { case OPCODE_OB_MAC_IOCB_FN0: case OPCODE_OB_MAC_IOCB_FN2: ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) net_rsp); (*tx_cleaned)++; break; case OPCODE_IB_MAC_IOCB: case OPCODE_IB_3032_MAC_IOCB: ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; case OPCODE_IB_IP_IOCB: case OPCODE_IB_3032_IP_IOCB: ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) net_rsp); (*rx_cleaned)++; break; default: { u32 *tmp = (u32 *) net_rsp; printk(KERN_ERR PFX "%s: Hit default case, not " "handled!\n" " dropping the packet, opcode = " "%x.\n", ndev->name, net_rsp->opcode); printk(KERN_ERR PFX "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n", (unsigned long int)tmp[0], (unsigned long int)tmp[1], (unsigned long int)tmp[2], (unsigned long int)tmp[3]); } } qdev->rsp_consumer_index++; if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { qdev->rsp_consumer_index = 0; qdev->rsp_current = qdev->rsp_q_virt_addr; } else { qdev->rsp_current++; } work_done = *tx_cleaned + *rx_cleaned; } return work_done;}static int ql_poll(struct napi_struct *napi, int budget){ struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); struct net_device *ndev = qdev->ndev; int rx_cleaned = 0, tx_cleaned = 0; unsigned long hw_flags; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); if (tx_cleaned + rx_cleaned != budget) { spin_lock_irqsave(&qdev->hw_lock, hw_flags); __netif_rx_complete(ndev, napi); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, &port_regs->CommonRegs.rspQConsumerIndex); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ql_enable_interrupts(qdev); } return tx_cleaned + rx_cleaned;}static irqreturn_t ql3xxx_isr(int irq, void *dev_id){ struct net_device *ndev = dev_id; struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; int handled = 1; u32 var; port_regs = qdev->mem_map_registers; value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { spin_lock(&qdev->adapter_lock); netif_stop_queue(qdev->ndev); netif_carrier_off(qdev->ndev); ql_disable_interrupts(qdev); qdev->port_link_state = LS_DOWN; set_bit(QL_RESET_ACTIVE,&qdev->flags) ; if (value & ISP_CONTROL_FE) { /* * Chip Fatal Error. */ var = ql_read_page0_reg_l(qdev, &port_regs->PortFatalErrStatus); printk(KERN_WARNING PFX "%s: Resetting chip. PortFatalErrStatus " "register = 0x%x\n", ndev->name, var); set_bit(QL_RESET_START,&qdev->flags) ; } else { /* * Soft Reset Requested. */ set_bit(QL_RESET_PER_SCSI,&qdev->flags) ; printk(KERN_ERR PFX "%s: Another function issued a reset to the " "chip. ISR value = %x.\n", ndev->name, value); } queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); if (likely(netif_rx_schedule_prep(ndev, &qdev->napi))) { __netif_rx_schedule(ndev, &qdev->napi); } } else { return IRQ_NONE; } return IRQ_RETVAL(handled);}/* * Get the total number of segments needed for the * given number of fragments. This is necessary because * outbound address lists (OAL) will be used when more than * two frags are given. Each address list has 5 addr/len * pairs. The 5th pair in each AOL is used to point to * the next AOL if more frags are coming. * That is why the frags:segment count ratio is not linear. */static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags){ if (qdev->device_id == QL3022_DEVICE_ID) return 1; switch(frags) { case 0: return 1; /* just the skb->data seg */ case 1: return 2; /* skb->data + 1 frag */ case 2: return 3; /* skb->data + 2 frags */ case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */ case 4: return 6; case 5: return 7; case 6: return 8; case 7: return 10; case 8: return 11; case 9: return 12; case 10: return 13; case 11: return 15; case 12: return 16; case 13: return 17; case 14: return 18; case 15: return 20; case 16: return 21; case 17: return 22; case 18: return 23; } return -1;}static void ql_hw_csum_setup(const struct sk_buff *skb, struct ob_mac_iocb_req *mac_iocb_ptr){ const struct iphdr *ip = ip_hdr(skb); mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); mac_iocb_ptr->ip_hdr_len = ip->ihl; if (ip->protocol == IPPROTO_TCP) { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | OB_3032MAC_IOCB_REQ_IC; } else { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | OB_3032MAC_IOCB_REQ_IC; }}/* * Map the buffers for this transmit. This will return * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. */static int ql_send_map(struct ql3_adapter *qdev, struct ob_mac_iocb_req *mac_iocb_ptr, struct ql_tx_buf_cb *tx_cb, struct sk_buff *skb){ struct oal *oal; struct oal_entry *oal_entry; int len = skb_headlen(skb); dma_addr_t map; int err; int completed_segs, i; int seg_cnt, seg = 0; int frag_cnt = (int)skb_shinfo(skb)->nr_frags; seg_cnt = tx_cb->seg_count; /* * Map the skb buffer first. */ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); return NETDEV_TX_BUSY; } oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(len); pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); pci_unmap_len_set(&tx_cb->map[seg], maplen, len); seg++; if (seg_cnt == 1) { /* Terminate the last segment. */ oal_entry->len = cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); } else { oal = tx_cb->oal; for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; oal_entry++; if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ (seg == 12 && seg_cnt > 13) || /* but necessary. */ (seg == 17 && seg_cnt > 18)) { /* Continuation entry points to outbound address list. */ map = pci_map_single(qdev->pdev, oal, sizeof(struct oal), PCI_DMA_TODEVICE); err = pci_dma_mapping_error(map); if(err) { printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", qdev->ndev->name, err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(sizeof(struct oal) | OAL_CONT_ENTRY); pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); pci_unmap_len_set(&tx_cb->map[seg], maplen, sizeof(struct oal)); oal_entry = (struct oal_entry *)oal; oal++; seg++; } map = pci_map_page(qdev->pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(map); if(err) { printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", qdev->ndev->name, err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(frag->size); pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); pci_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); } /* Terminate the last segment. */ oal_entry->len = cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); } return NETDEV_T
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?