📄 lanstreamer.c
字号:
streamer_priv->trb = ntohs(readw(streamer_mmio + LAPDINC)); streamer_priv->streamer_receive_options = 0x00; streamer_priv->streamer_copy_all_options = 0; /* setup rx ring */ /* enable rx channel */ writew(~BMCTL_RX_DIS, streamer_mmio + BMCTL_RUM); /* setup rx descriptors */ streamer_priv->streamer_rx_ring= kmalloc( sizeof(struct streamer_rx_desc)* STREAMER_RX_RING_SIZE,GFP_KERNEL); if (!streamer_priv->streamer_rx_ring) { printk(KERN_WARNING "%s ALLOC of streamer rx ring FAILED!!\n",dev->name); return -EIO; } for (i = 0; i < STREAMER_RX_RING_SIZE; i++) { struct sk_buff *skb; skb = dev_alloc_skb(streamer_priv->pkt_buf_sz); if (skb == NULL) break; skb->dev = dev; streamer_priv->streamer_rx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1], sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)); streamer_priv->streamer_rx_ring[i].status = 0; streamer_priv->streamer_rx_ring[i].buffer = cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz; streamer_priv->rx_ring_skb[i] = skb; } streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0], sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)); if (i == 0) { printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name); free_irq(dev->irq, dev); return -EIO; } streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1; /* last processed rx status */ writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0], sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)), streamer_mmio + RXBDA); writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1], sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)), streamer_mmio + RXLBDA); /* set bus master interrupt event mask */ writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK); /* setup tx ring */ streamer_priv->streamer_tx_ring=kmalloc(sizeof(struct streamer_tx_desc)* STREAMER_TX_RING_SIZE,GFP_KERNEL); if (!streamer_priv->streamer_tx_ring) { printk(KERN_WARNING "%s ALLOC of streamer_tx_ring FAILED\n",dev->name); return -EIO; } writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM); /* Enables TX channel 2 */ for (i = 0; i < STREAMER_TX_RING_SIZE; i++) { streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[i + 1], sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)); streamer_priv->streamer_tx_ring[i].status = 0; streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0; streamer_priv->streamer_tx_ring[i].buffer = 0; streamer_priv->streamer_tx_ring[i].buflen = 0; streamer_priv->streamer_tx_ring[i].rsvd1 = 0; streamer_priv->streamer_tx_ring[i].rsvd2 = 0; streamer_priv->streamer_tx_ring[i].rsvd3 = 0; } streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0], sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)); streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE; streamer_priv->tx_ring_free = 0; /* next entry in tx ring to use */ streamer_priv->tx_ring_last_status = STREAMER_TX_RING_SIZE - 1; /* set Busmaster interrupt event mask (handle receives on interrupt only */ writew(MISR_TX2_EOF | MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK); /* set system event interrupt mask */ writew(SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE, streamer_mmio + SISR_MASK_SUM);#if STREAMER_DEBUG printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM)); printk("SISR MASK: %x\n", readw(streamer_mmio + SISR_MASK));#endif#if STREAMER_NETWORK_MONITOR writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA); printk("%s: Node Address: %04x:%04x:%04x\n", dev->name, ntohs(readw(streamer_mmio + LAPDINC)), ntohs(readw(streamer_mmio + LAPDINC)), ntohs(readw(streamer_mmio + LAPDINC))); readw(streamer_mmio + LAPDINC); readw(streamer_mmio + LAPDINC); printk("%s: Functional Address: %04x:%04x\n", dev->name, ntohs(readw(streamer_mmio + LAPDINC)), ntohs(readw(streamer_mmio + LAPDINC))); writew(streamer_priv->streamer_parms_addr + 4, streamer_mmio + LAPA); printk("%s: NAUN Address: %04x:%04x:%04x\n", dev->name, ntohs(readw(streamer_mmio + LAPDINC)), ntohs(readw(streamer_mmio + LAPDINC)), ntohs(readw(streamer_mmio + LAPDINC)));#endif netif_start_queue(dev); netif_carrier_on(dev); return 0;}/* * When we enter the rx routine we do not know how many frames have been * queued on the rx channel. Therefore we start at the next rx status * position and travel around the receive ring until we have completed * all the frames. * * This means that we may process the frame before we receive the end * of frame interrupt. This is why we always test the status instead * of blindly processing the next frame. * */static void streamer_rx(struct net_device *dev){ struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv; __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; struct streamer_rx_desc *rx_desc; int rx_ring_last_received, length, frame_length, buffer_cnt = 0; struct sk_buff *skb, *skb2; /* setup the next rx descriptor to be received */ rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)]; rx_ring_last_received = streamer_priv->rx_ring_last_received; while (rx_desc->status & 0x01000000) { /* While processed descriptors are available */ if (rx_ring_last_received != streamer_priv->rx_ring_last_received) { printk(KERN_WARNING "RX Error 1 rx_ring_last_received not the same %x %x\n", rx_ring_last_received, streamer_priv->rx_ring_last_received); } streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1); rx_ring_last_received = streamer_priv->rx_ring_last_received; length = rx_desc->framelen_buflen & 0xffff; /* buffer length */ frame_length = (rx_desc->framelen_buflen >> 16) & 0xffff; if (rx_desc->status & 0x7E830000) { /* errors */ if (streamer_priv->streamer_message_level) { printk(KERN_WARNING "%s: Rx Error %x \n", dev->name, rx_desc->status); } } else { /* received without errors */ if (rx_desc->status & 0x80000000) { /* frame complete */ buffer_cnt = 1; skb = dev_alloc_skb(streamer_priv->pkt_buf_sz); } else { skb = dev_alloc_skb(frame_length); } if (skb == NULL) { printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); streamer_priv->streamer_stats.rx_dropped++; } else { /* we allocated an skb OK */ skb->dev = dev; if (buffer_cnt == 1) { /* release the DMA mapping */ pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer), streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE); skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received];#if STREAMER_DEBUG_PACKETS { int i; printk("streamer_rx packet print: skb->data2 %p skb->head %p\n", skb2->data, skb2->head); for (i = 0; i < frame_length; i++) { printk("%x:", skb2->data[i]); if (((i + 1) % 16) == 0) printk("\n"); } printk("\n"); }#endif skb_put(skb2, length); skb2->protocol = tr_type_trans(skb2, dev); /* recycle this descriptor */ streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0; streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz; streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer = cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); streamer_priv->rx_ring_skb[rx_ring_last_received] = skb; /* place recycled descriptor back on the adapter */ writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[rx_ring_last_received], sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)), streamer_mmio + RXLBDA); /* pass the received skb up to the protocol */ netif_rx(skb2); } else { do { /* Walk the buffers */ pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE), memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length); /* copy this fragment */ streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0; streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz; /* give descriptor back to the adapter */ writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[rx_ring_last_received], length, PCI_DMA_FROMDEVICE)), streamer_mmio + RXLBDA); if (rx_desc->status & 0x80000000) break; /* this descriptor completes the frame */ /* else get the next pending descriptor */ if (rx_ring_last_received!= streamer_priv->rx_ring_last_received) { printk("RX Error rx_ring_last_received not the same %x %x\n", rx_ring_last_received, streamer_priv->rx_ring_last_received); } rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE-1)]; length = rx_desc->framelen_buflen & 0xffff; /* buffer length */ streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE - 1); rx_ring_last_received = streamer_priv->rx_ring_last_received; } while (1); skb->protocol = tr_type_trans(skb, dev); /* send up to the protocol */ netif_rx(skb); } dev->last_rx = jiffies; streamer_priv->streamer_stats.rx_packets++; streamer_priv->streamer_stats.rx_bytes += length; } /* if skb == null */ } /* end received without errors */ /* try the next one */ rx_desc = &streamer_priv->streamer_rx_ring[(rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)]; } /* end for all completed rx descriptors */}static irqreturn_t streamer_interrupt(int irq, void *dev_id, struct pt_regs *regs){ struct net_device *dev = (struct net_device *) dev_id; struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv; __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u16 sisr; __u16 misr; u8 max_intr = MAX_INTR; spin_lock(&streamer_priv->streamer_lock); sisr = readw(streamer_mmio + SISR); while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR)) && (max_intr > 0)) { if(sisr & SISR_PAR_ERR) { writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM); (void)readw(streamer_mmio + SISR_RUM); } else if(sisr & SISR_SERR_ERR) { writew(~SISR_SERR_ERR, streamer_mmio + SISR_RUM); (void)readw(streamer_mmio + SISR_RUM); } else if(sisr & SISR_MI) { misr = readw(streamer_mmio + MISR_RUM); if (misr & MISR_TX2_EOF) { while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) { streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1); streamer_priv->free_tx_ring_entries++; streamer_priv->streamer_stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len; streamer_priv->streamer_stats.tx_packets++; dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]); streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef; streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0; streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].bufcnt_framelen = 0; streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buflen = 0; streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd1 = 0; streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd2 = 0; streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd3 = 0; } netif_wake_queue(dev); } if (misr & MISR_RX_EOF) { streamer_rx(dev); } /* MISR_RX_EOF */ if (misr & MISR_RX_NOBUF) { /* According to the documentation, we don't have to do anything, * but trapping it keeps it out of /var/log/messages. */ } /* SISR_RX_NOBUF */ writew(~misr, streamer_mmio + MISR_RUM); (void)readw(streamer_mmio + MISR_RUM); } else if (sisr & SISR_SRB_REPLY) { if (streamer_priv->srb_queued == 1) { wake_up_interruptible(&streamer_priv->srb_wait); } else if (streamer_priv->srb_queued == 2) { streamer_srb_bh(dev); } streamer_priv->srb_queued = 0; writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM); (void)readw(streamer_mmio + SISR_RUM); } else if (sisr & SISR_ADAPTER_CHECK) { printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name); writel(readl(streamer_mmio + LAPWWO), streamer_mmio + LAPA); printk(KERN_WARNING "%s: Words %x:%x:%x:%x:\n", dev->name, readw(streamer_mmio + LAPDINC), ntohs(readw(streamer_mmio + LAPDINC)), ntohs(readw(streamer_mmio + LAPDINC)), ntohs(readw(streamer_mmio + LAPDINC))); netif_stop_queue(dev); netif_carrier_off(dev); printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name); } /* SISR_ADAPTER_CHECK */ else if (sisr & SISR_ASB_FREE) { /* Wake up anything that is waiting for the asb response */ if (streamer_priv->asb_queued) { streamer_asb_bh(dev); } writew(~SISR_ASB_FREE, streamer_mmio + SISR_RUM); (void)readw(streamer_mmio + SISR_RUM); } /* SISR_ASB_FREE */ else if (sisr & SISR_ARB_CMD) { streamer_arb_cmd(dev); writew(~SISR_ARB_CMD, streamer_mmio + SISR_RUM); (void)readw(streamer_mmio + SISR_RUM); } /* SISR_ARB_CMD */ else if (sisr & SISR_TRB_REPLY) { /* Wake up anything that is waiting for the trb response */ if (streamer_priv->trb_queued) { wake_up_interruptible(&streamer_priv-> trb_wait); } streamer_priv->trb_queued = 0; writew(~SISR_TRB_REPLY, streamer_mmio + SISR_RUM); (void)readw(streamer_mmio + SISR_RUM); } /* SISR_TRB_REPLY */ sisr = readw(streamer_mmio + SISR); max_intr--; } /* while() */ spin_unlock(&streamer_priv->streamer_lock) ;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -