📄 olympic.c
字号:
writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */ writel(0,olympic_mmio+EISR) ; writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */ writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);#if OLYMPIC_DEBUG printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM)); printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));#endif if (olympic_priv->olympic_network_monitor) { u8 __iomem *oat ; u8 __iomem *opt ; oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ; opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ; printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)), readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1), readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2), readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3), readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4), readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5)); printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name, readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3)); printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)), readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1), readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2), readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3), readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4), readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5)); } netif_start_queue(dev); return 0;out: free_irq(dev->irq, dev); return -EIO;} /* * When we enter the rx routine we do not know how many frames have been * queued on the rx channel. Therefore we start at the next rx status * position and travel around the receive ring until we have completed * all the frames. * * This means that we may process the frame before we receive the end * of frame interrupt. This is why we always test the status instead * of blindly processing the next frame. * * We also remove the last 4 bytes from the packet as well, these are * just token ring trailer info and upset protocols that don't check * their own length, i.e. SNA. * */static void olympic_rx(struct net_device *dev){ struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; struct olympic_rx_status *rx_status; struct olympic_rx_desc *rx_desc ; int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len; struct sk_buff *skb, *skb2; int i; rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ; while (rx_status->status_buffercnt) { u32 l_status_buffercnt; olympic_priv->rx_status_last_received++ ; olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);#if OLYMPIC_DEBUG printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));#endif length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff; buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff; i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */ frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16; #if OLYMPIC_DEBUG printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);#endif l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt); if(l_status_buffercnt & 0xC0000000) { if (l_status_buffercnt & 0x3B000000) { if (olympic_priv->olympic_message_level) { if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */ printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name); if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */ printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name); if (l_status_buffercnt & (1<<27)) /* No receive buffers */ printk(KERN_WARNING "%s: No receive buffers \n",dev->name); if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */ printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name); if (l_status_buffercnt & (1<<24)) /* Received Error Detect */ printk(KERN_WARNING "%s: Received Error Detect \n",dev->name); } olympic_priv->rx_ring_last_received += i ; olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; olympic_priv->olympic_stats.rx_errors++; } else { if (buffer_cnt == 1) { skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ; } else { skb = dev_alloc_skb(length) ; } if (skb == NULL) { printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ; olympic_priv->olympic_stats.rx_dropped++ ; /* Update counters even though we don't transfer the frame */ olympic_priv->rx_ring_last_received += i ; olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; } else { skb->dev = dev ; /* Optimise based upon number of buffers used. If only one buffer is used we can simply swap the buffers around. If more than one then we must use the new buffer and copy the information first. Ideally all frames would be in a single buffer, this can be tuned by altering the buffer size. If the length of the packet is less than 1500 bytes we're going to copy it over anyway to stop packets getting dropped from sockets with buffers smaller than our pkt_buf_sz. */ if (buffer_cnt==1) { olympic_priv->rx_ring_last_received++ ; olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1); rx_ring_last_received = olympic_priv->rx_ring_last_received ; if (length > 1500) { skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ; /* unmap buffer */ pci_unmap_single(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; skb_put(skb2,length-4); skb2->protocol = tr_type_trans(skb2,dev); olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)); olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz); olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ; netif_rx(skb2) ; } else { pci_dma_sync_single_for_cpu(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ; pci_dma_sync_single_for_device(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; skb->protocol = tr_type_trans(skb,dev) ; netif_rx(skb) ; } } else { do { /* Walk the buffers */ olympic_priv->rx_ring_last_received++ ; olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1); rx_ring_last_received = olympic_priv->rx_ring_last_received ; pci_dma_sync_single_for_cpu(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]); cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ; pci_dma_sync_single_for_device(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; } while (--i) ; skb_trim(skb,skb->len-4) ; skb->protocol = tr_type_trans(skb,dev); netif_rx(skb) ; } dev->last_rx = jiffies ; olympic_priv->olympic_stats.rx_packets++ ; olympic_priv->olympic_stats.rx_bytes += length ; } /* if skb == null */ } /* If status & 0x3b */ } else { /*if buffercnt & 0xC */ olympic_priv->rx_ring_last_received += i ; olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ; } rx_status->fragmentcnt_framelen = 0 ; rx_status->status_buffercnt = 0 ; rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]); writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ); } /* while */}static void olympic_freemem(struct net_device *dev) { struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; int i; for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) { if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) { dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]); olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL; } if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) { pci_unmap_single(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer), olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE); } olympic_priv->rx_status_last_received++; olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1; } /* unmap rings */ pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr, sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE); pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr, sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE); pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr, sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE); pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr, sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE); return ; } static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev= (struct net_device *)dev_id; struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; u32 sisr; u8 __iomem *adapter_check_area ; /* * Read sisr but don't reset it yet. * The indication bit may have been set but the interrupt latch * bit may not be set, so we'd lose the interrupt later. */ sisr=readl(olympic_mmio+SISR) ; if (!(sisr & SISR_MI)) /* Interrupt isn't for us */ return IRQ_NONE; sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */ spin_lock(&olympic_priv->olympic_lock); /* Hotswap gives us this on removal */ if (sisr == 0xffffffff) { printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ; spin_unlock(&olympic_priv->olympic_lock) ; return IRQ_NONE; } if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK | SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) { /* If we ever get this the adapter is seriously dead. Only a reset is going to * bring it back to life. We're talking pci bus errors and such like :( */ if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) { printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ; printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ; printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ; printk(KERN_ERR "or the linux-tr mailing list.\n") ; wake_up_interruptible(&olympic_priv->srb_wait); spin_unlock(&olympic_priv->olympic_lock) ; return IRQ_HANDLED; } /* SISR_ERR */ if(sisr & SISR_SRB_REPLY) { if(olympic_priv->srb_queued==1) { wake_up_interruptible(&olympic_priv->srb_wait); } else if (olympic_priv->srb_queued==2) { olympic_srb_bh(dev) ; } olympic_priv->srb_queued=0; } /* SISR_SRB_REPLY */ /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure we get all tx completions. */ if (sisr & SISR_TX1_EOF) { while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) { olympic_priv->tx_ring_last_status++; olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1); olympic_priv->free_tx_ring_entries++; olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len; olympic_priv->olympic_stats.tx_packets++ ; pci_unmap_single(olympic_priv->pdev, le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE); dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]); olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef; olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0; } netif_wake_queue(dev); } /* SISR_TX1_EOF */ if (sisr & SISR_RX_STATUS) { olympic_rx(dev); } /* SISR_RX_STATUS */ if (sisr & SISR_ADAPTER_CHECK) { netif_stop_queue(dev); printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name); writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA); adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ; printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ; spin_unlock(&olympic_priv->olympic_lock) ; return IRQ_HANDLED; } /* SISR_ADAPTER_CHECK */ if (sisr & SISR_ASB_FREE) { /* Wake up anything that is waiting for the asb response */ if (olympic_priv->asb_queued) { olympic_asb_bh(dev) ; } } /* SISR_ASB_FREE */ if (sisr & SISR_ARB_CMD) { olympic_arb_cmd(dev) ; } /* SISR_ARB_CMD */ if (sisr & SISR_TRB_REPLY) { /* Wake up anything that is waiting for the trb response */ if (olympic_priv->trb_queued) { wake_up_interruptible(&olympic_priv->trb_wait); } olympic_priv->trb_queued = 0 ; } /* SISR_TRB_REPLY */ if (sisr & SISR_RX_NOBUF) { /* According to the documentation, we don't have to do anything, but trapping it keeps it out of /var/log/messages. */ } /* SISR_RX_NOBUF */ } else { printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr); printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ; } /* One if the interrupts we want */ writel(SISR_MI,olympic_mmio+SISR_MASK_SUM); spin_unlock(&olympic_priv->olympic_lock) ;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -