📄 via-velocity.c
字号:
/** * velocity_free_rings - free PCI ring pointers * @vptr: Velocity to free from * * Clean up the PCI ring buffers allocated to this velocity. */static void velocity_free_rings(struct velocity_info *vptr){ int size; size = vptr->options.numrx * sizeof(struct rx_desc) + vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);}static inline void velocity_give_many_rx_descs(struct velocity_info *vptr){ struct mac_regs *regs = vptr->mac_regs; int avail, dirty, unusable; /* * RD number must be equal to 4X per hardware spec * (programming guide rev 1.20, p.13) */ if (vptr->rd_filled < 4) return; wmb(); unusable = vptr->rd_filled & 0x0003; dirty = vptr->rd_dirty - unusable; for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; } writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); vptr->rd_filled = unusable;}static int velocity_rx_refill(struct velocity_info *vptr){ int dirty = vptr->rd_dirty, done = 0, ret = 0; do { struct rx_desc *rd = vptr->rd_ring + dirty; /* Fine for an all zero Rx desc at init time as well */ if (rd->rdesc0.owner == OWNED_BY_NIC) break; if (!vptr->rd_info[dirty].skb) { ret = velocity_alloc_rx_buf(vptr, dirty); if (ret < 0) break; } done++; dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; } while (dirty != vptr->rd_curr); if (done) { vptr->rd_dirty = dirty; vptr->rd_filled += done; velocity_give_many_rx_descs(vptr); } return ret;}/** * velocity_init_rd_ring - set up receive ring * @vptr: velocity to configure * * Allocate and set up the receive buffers for each ring slot and * assign them to the network adapter. */static int velocity_init_rd_ring(struct velocity_info *vptr){ int ret = -ENOMEM; unsigned int rsize = sizeof(struct velocity_rd_info) * vptr->options.numrx; vptr->rd_info = kmalloc(rsize, GFP_KERNEL); if(vptr->rd_info == NULL) goto out; memset(vptr->rd_info, 0, rsize); vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; ret = velocity_rx_refill(vptr); if (ret < 0) { VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: failed to allocate RX buffer.\n", vptr->dev->name); velocity_free_rd_ring(vptr); }out: return ret;}/** * velocity_free_rd_ring - free receive ring * @vptr: velocity to clean up * * Free the receive buffers for each ring slot and any * attached socket buffers that need to go away. */static void velocity_free_rd_ring(struct velocity_info *vptr){ int i; if (vptr->rd_info == NULL) return; for (i = 0; i < vptr->options.numrx; i++) { struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); if (!rd_info->skb) continue; pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); rd_info->skb_dma = (dma_addr_t) NULL; dev_kfree_skb(rd_info->skb); rd_info->skb = NULL; } kfree(vptr->rd_info); vptr->rd_info = NULL;}/** * velocity_init_td_ring - set up transmit ring * @vptr: velocity * * Set up the transmit ring and chain the ring pointers together. * Returns zero on success or a negative posix errno code for * failure. */ static int velocity_init_td_ring(struct velocity_info *vptr){ int i, j; dma_addr_t curr; struct tx_desc *td; struct velocity_td_info *td_info; unsigned int tsize = sizeof(struct velocity_td_info) * vptr->options.numtx; /* Init the TD ring entries */ for (j = 0; j < vptr->num_txq; j++) { curr = vptr->td_pool_dma[j]; vptr->td_infos[j] = kmalloc(tsize, GFP_KERNEL); if(vptr->td_infos[j] == NULL) { while(--j >= 0) kfree(vptr->td_infos[j]); return -ENOMEM; } memset(vptr->td_infos[j], 0, tsize); for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) { td = &(vptr->td_rings[j][i]); td_info = &(vptr->td_infos[j][i]); td_info->buf = vptr->tx_bufs + (j * vptr->options.numtx + i) * PKT_BUF_SZ; td_info->buf_dma = vptr->tx_bufs_dma + (j * vptr->options.numtx + i) * PKT_BUF_SZ; } vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; } return 0;}/* * FIXME: could we merge this with velocity_free_tx_buf ? */static void velocity_free_td_ring_entry(struct velocity_info *vptr, int q, int n){ struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); int i; if (td_info == NULL) return; if (td_info->skb) { for (i = 0; i < td_info->nskb_dma; i++) { if (td_info->skb_dma[i]) { pci_unmap_single(vptr->pdev, td_info->skb_dma[i], td_info->skb->len, PCI_DMA_TODEVICE); td_info->skb_dma[i] = (dma_addr_t) NULL; } } dev_kfree_skb(td_info->skb); td_info->skb = NULL; }}/** * velocity_free_td_ring - free td ring * @vptr: velocity * * Free up the transmit ring for this particular velocity adapter. * We free the ring contents but not the ring itself. */ static void velocity_free_td_ring(struct velocity_info *vptr){ int i, j; for (j = 0; j < vptr->num_txq; j++) { if (vptr->td_infos[j] == NULL) continue; for (i = 0; i < vptr->options.numtx; i++) { velocity_free_td_ring_entry(vptr, j, i); } if (vptr->td_infos[j]) { kfree(vptr->td_infos[j]); vptr->td_infos[j] = NULL; } }}/** * velocity_rx_srv - service RX interrupt * @vptr: velocity * @status: adapter status (unused) * * Walk the receive ring of the velocity adapter and remove * any received packets from the receive queue. Hand the ring * slots back to the adapter for reuse. */ static int velocity_rx_srv(struct velocity_info *vptr, int status){ struct net_device_stats *stats = &vptr->stats; int rd_curr = vptr->rd_curr; int works = 0; do { struct rx_desc *rd = vptr->rd_ring + rd_curr; if (!vptr->rd_info[rd_curr].skb) break; if (rd->rdesc0.owner == OWNED_BY_NIC) break; rmb(); /* * Don't drop CE or RL error frame although RXOK is off */ if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { if (velocity_receive_frame(vptr, rd_curr) < 0) stats->rx_dropped++; } else { if (rd->rdesc0.RSR & RSR_CRC) stats->rx_crc_errors++; if (rd->rdesc0.RSR & RSR_FAE) stats->rx_frame_errors++; stats->rx_dropped++; } rd->inten = 1; vptr->dev->last_rx = jiffies; rd_curr++; if (rd_curr >= vptr->options.numrx) rd_curr = 0; } while (++works <= 15); vptr->rd_curr = rd_curr; if (works > 0 && velocity_rx_refill(vptr) < 0) { VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: rx buf allocation failure\n", vptr->dev->name); } VAR_USED(stats); return works;}/** * velocity_rx_csum - checksum process * @rd: receive packet descriptor * @skb: network layer packet buffer * * Process the status bits for the received packet and determine * if the checksum was computed and verified by the hardware */ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb){ skb->ip_summed = CHECKSUM_NONE; if (rd->rdesc1.CSM & CSM_IPKT) { if (rd->rdesc1.CSM & CSM_IPOK) { if ((rd->rdesc1.CSM & CSM_TCPKT) || (rd->rdesc1.CSM & CSM_UDPKT)) { if (!(rd->rdesc1.CSM & CSM_TUPOK)) { return; } } skb->ip_summed = CHECKSUM_UNNECESSARY; } }}/** * velocity_rx_copy - in place Rx copy for small packets * @rx_skb: network layer packet buffer candidate * @pkt_size: received data size * @rd: receive packet descriptor * @dev: network device * * Replace the current skb that is scheduled for Rx processing by a * shorter, immediatly allocated skb, if the received packet is small * enough. This function returns a negative value if the received * packet is too big or if memory is exhausted. */static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, struct velocity_info *vptr){ int ret = -1; if (pkt_size < rx_copybreak) { struct sk_buff *new_skb; new_skb = dev_alloc_skb(pkt_size + 2); if (new_skb) { new_skb->dev = vptr->dev; new_skb->ip_summed = rx_skb[0]->ip_summed; if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) skb_reserve(new_skb, 2); memcpy(new_skb->data, rx_skb[0]->tail, pkt_size); *rx_skb = new_skb; ret = 0; } } return ret;}/** * velocity_iph_realign - IP header alignment * @vptr: velocity we are handling * @skb: network layer packet buffer * @pkt_size: received data size * * Align IP header on a 2 bytes boundary. This behavior can be * configured by the user. */static inline void velocity_iph_realign(struct velocity_info *vptr, struct sk_buff *skb, int pkt_size){ /* FIXME - memmove ? */ if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { int i; for (i = pkt_size; i >= 0; i--) *(skb->data + i + 2) = *(skb->data + i); skb_reserve(skb, 2); }}/** * velocity_receive_frame - received packet processor * @vptr: velocity we are handling * @idx: ring index * * A packet has arrived. We process the packet and if appropriate * pass the frame up the network stack */ static int velocity_receive_frame(struct velocity_info *vptr, int idx){ void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); struct net_device_stats *stats = &vptr->stats; struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); struct rx_desc *rd = &(vptr->rd_ring[idx]); int pkt_len = rd->rdesc0.len; struct sk_buff *skb; if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); stats->rx_length_errors++; return -EINVAL; } if (rd->rdesc0.RSR & RSR_MAR) vptr->stats.multicast++; skb = rd_info->skb; skb->dev = vptr->dev; pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); /* * Drop frame not meeting IEEE 802.3 */ if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { if (rd->rdesc0.RSR & RSR_RL) { stats->rx_length_errors++; return -EINVAL; } } pci_action = pci_dma_sync_single_for_device; velocity_rx_csum(rd, skb); if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { velocity_iph_realign(vptr, skb, pkt_len); pci_action = pci_unmap_single; rd_info->skb = NULL; } pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_put(skb, pkt_len - 4); skb->protocol = eth_type_trans(skb, skb->dev); stats->rx_bytes += pkt_len; netif_rx(skb); return 0;}/** * velocity_alloc_rx_buf - allocate aligned receive buffer * @vptr: velocity * @idx: ring index * * Allocate a new full sized buffer for the reception of a frame and * map it into PCI space for the hardware to use. The hardware * requires *64* byte alignment of the buffer which makes life * less fun than would be ideal. */ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx){ struct rx_desc *rd = &(vptr->rd_ring[idx]); struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); if (rd_info->skb == NULL) return -ENOMEM; /* * Do the gymnastics to get the buffer head for data at * 64byte alignment. */ skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63); rd_info->skb->dev = vptr->dev; rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); /* * Fill in the descriptor to match */ *((u32 *) & (rd->rdesc0)) = 0; rd->len = cpu_to_le32(vptr->rx_buf_sz); rd->inten = 1; rd->pa_low = cpu_to_le32(rd_info->skb_dma); rd->pa_high = 0; return 0;}/** * tx_srv - transmit interrupt service * @vptr; Velocity * @status: *
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -