📄 lib82596.c
字号:
if (skb == NULL) return -1; skb_reserve(skb, 2); dma_addr = dma_map_single(dev->dev.parent, skb->data, PKT_BUF_SZ, DMA_FROM_DEVICE); rbd->v_next = rbd+1; rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1)); rbd->b_addr = SWAP32(virt_to_dma(lp, rbd)); rbd->skb = skb; rbd->v_data = skb->data; rbd->b_data = SWAP32(dma_addr); rbd->size = SWAP16(PKT_BUF_SZ); } lp->rbd_head = dma->rbds; rbd = dma->rbds + rx_ring_size - 1; rbd->v_next = dma->rbds; rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds)); /* Now build the Receive Frame Descriptor List */ for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) { rfd->rbd = I596_NULL; rfd->v_next = rfd+1; rfd->v_prev = rfd-1; rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1)); rfd->cmd = SWAP16(CMD_FLEX); } lp->rfd_head = dma->rfds; dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); rfd = dma->rfds; rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head)); rfd->v_prev = dma->rfds + rx_ring_size - 1; rfd = dma->rfds + rx_ring_size - 1; rfd->v_next = dma->rfds; rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds)); rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); return 0;}static inline void remove_rx_bufs(struct net_device *dev){ struct i596_private *lp = netdev_priv(dev); struct i596_rbd *rbd; int i; for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) { if (rbd->skb == NULL) break; dma_unmap_single(dev->dev.parent, (dma_addr_t)SWAP32(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb(rbd->skb); }}static void rebuild_rx_bufs(struct net_device *dev){ struct i596_private *lp = netdev_priv(dev); struct i596_dma *dma = lp->dma; int i; /* Ensure rx frame/buffer descriptors are tidy */ for (i = 0; i < rx_ring_size; i++) { dma->rfds[i].rbd = I596_NULL; dma->rfds[i].cmd = SWAP16(CMD_FLEX); } dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX); lp->rfd_head = dma->rfds; dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); lp->rbd_head = dma->rbds; dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds)); DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));}static int init_i596_mem(struct net_device *dev){ struct i596_private *lp = netdev_priv(dev); struct i596_dma *dma = lp->dma; unsigned long flags; mpu_port(dev, PORT_RESET, 0); udelay(100); /* Wait 100us - seems to help */ /* change the scp address */ lp->last_cmd = jiffies; dma->scp.sysbus = SYSBUS; dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp))); dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb))); dma->iscp.stat = SWAP32(ISCP_BUSY); lp->cmd_backlog = 0; lp->cmd_head = NULL; dma->scb.cmd = I596_NULL; DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp)); DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp)); DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp)); ca(dev); if (wait_istat(dev, dma, 1000, "initialization timed out")) goto failed; DEB(DEB_INIT, printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name)); if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) { printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); goto failed; } /* Ensure rx frame/buffer descriptors are tidy */ rebuild_rx_bufs(dev); dma->scb.command = 0; DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name)); memcpy(dma->cf_cmd.i596_config, init_setup, 14); dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd)); i596_add_cmd(dev, &dma->cf_cmd.cmd); DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6); dma->sa_cmd.cmd.command = SWAP16(CmdSASetup); DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd)); i596_add_cmd(dev, &dma->sa_cmd.cmd); DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); dma->tdr_cmd.cmd.command = SWAP16(CmdTDR); DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd)); i596_add_cmd(dev, &dma->tdr_cmd.cmd); spin_lock_irqsave (&lp->lock, flags); if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) { spin_unlock_irqrestore (&lp->lock, flags); goto failed_free_irq; } DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); dma->scb.command = SWAP16(RX_START); dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); ca(dev); spin_unlock_irqrestore (&lp->lock, flags); if (wait_cmd(dev, dma, 1000, "RX_START not processed")) goto failed_free_irq; DEB(DEB_INIT, printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name)); return 0;failed_free_irq: free_irq(dev->irq, dev);failed: printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name); mpu_port(dev, PORT_RESET, 0); return -1;}static inline int i596_rx(struct net_device *dev){ struct i596_private *lp = netdev_priv(dev); struct i596_rfd *rfd; struct i596_rbd *rbd; int frames = 0; DEB(DEB_RXFRAME, printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n", lp->rfd_head, lp->rbd_head)); rfd = lp->rfd_head; /* Ref next frame to check */ DMA_INV(dev, rfd, sizeof(struct i596_rfd)); while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */ if (rfd->rbd == I596_NULL) rbd = NULL; else if (rfd->rbd == lp->rbd_head->b_addr) { rbd = lp->rbd_head; DMA_INV(dev, rbd, sizeof(struct i596_rbd)); } else { printk(KERN_ERR "%s: rbd chain broken!\n", dev->name); /* XXX Now what? */ rbd = NULL; } DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %08x, rfd.stat %04x\n", rfd, rfd->rbd, rfd->stat)); if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) { /* a good frame */ int pkt_len = SWAP16(rbd->count) & 0x3fff; struct sk_buff *skb = rbd->skb; int rx_in_place = 0; DEB(DEB_RXADDR, print_eth(rbd->v_data, "received")); frames++; /* Check if the packet is long enough to just accept * without copying to a properly sized skbuff. */ if (pkt_len > rx_copybreak) { struct sk_buff *newskb; dma_addr_t dma_addr; dma_unmap_single(dev->dev.parent, (dma_addr_t)SWAP32(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); /* Get fresh skbuff to replace filled one. */ newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); if (newskb == NULL) { skb = NULL; /* drop pkt */ goto memory_squeeze; } skb_reserve(newskb, 2); /* Pass up the skb already on the Rx ring. */ skb_put(skb, pkt_len); rx_in_place = 1; rbd->skb = newskb; dma_addr = dma_map_single(dev->dev.parent, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE); rbd->v_data = newskb->data; rbd->b_data = SWAP32(dma_addr); DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); } else skb = netdev_alloc_skb(dev, pkt_len + 2);memory_squeeze: if (skb == NULL) { /* XXX tulip.c can defer packets here!! */ printk(KERN_ERR "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; } else { if (!rx_in_place) { /* 16 byte align the data fields */ dma_sync_single_for_cpu(dev->dev.parent, (dma_addr_t)SWAP32(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); skb_reserve(skb, 2); memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); dma_sync_single_for_device(dev->dev.parent, (dma_addr_t)SWAP32(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); } skb->len = pkt_len; skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } } else { DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", dev->name, rfd->stat)); dev->stats.rx_errors++; if (rfd->stat & SWAP16(0x0100)) dev->stats.collisions++; if (rfd->stat & SWAP16(0x8000)) dev->stats.rx_length_errors++; if (rfd->stat & SWAP16(0x0001)) dev->stats.rx_over_errors++; if (rfd->stat & SWAP16(0x0002)) dev->stats.rx_fifo_errors++; if (rfd->stat & SWAP16(0x0004)) dev->stats.rx_frame_errors++; if (rfd->stat & SWAP16(0x0008)) dev->stats.rx_crc_errors++; if (rfd->stat & SWAP16(0x0010)) dev->stats.rx_length_errors++; } /* Clear the buffer descriptor count and EOF + F flags */ if (rbd != NULL && (rbd->count & SWAP16(0x4000))) { rbd->count = 0; lp->rbd_head = rbd->v_next; DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); } /* Tidy the frame descriptor, marking it as end of list */ rfd->rbd = I596_NULL; rfd->stat = 0; rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); rfd->count = 0; /* Update record of next frame descriptor to process */ lp->dma->scb.rfd = rfd->b_next; lp->rfd_head = rfd->v_next; DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd)); /* Remove end-of-list from old end descriptor */ rfd->v_prev->cmd = SWAP16(CMD_FLEX); DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd)); rfd = lp->rfd_head; DMA_INV(dev, rfd, sizeof(struct i596_rfd)); } DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames)); return 0;}static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp){ struct i596_cmd *ptr; while (lp->cmd_head != NULL) { ptr = lp->cmd_head; lp->cmd_head = ptr->v_next; lp->cmd_backlog--; switch (SWAP16(ptr->command) & 0x7) { case CmdTx: { struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; struct sk_buff *skb = tx_cmd->skb; dma_unmap_single(dev->dev.parent, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; ptr->v_next = NULL; ptr->b_next = I596_NULL; tx_cmd->cmd.command = 0; /* Mark as free */ break; } default: ptr->v_next = NULL; ptr->b_next = I596_NULL; } DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd)); } wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out"); lp->dma->scb.cmd = I596_NULL; DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));}static inline void i596_reset(struct net_device *dev, struct i596_private *lp){ unsigned long flags; DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n")); spin_lock_irqsave (&lp->lock, flags); wait_cmd(dev, lp->dma, 100, "i596_reset timed out"); netif_stop_queue(dev); /* FIXME: this command might cause an lpmc */ lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); ca(dev); /* wait for shutdown */ wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out"); spin_unlock_irqrestore (&lp->lock, flags); i596_cleanup_cmd(dev, lp); i596_rx(dev); netif_start_queue(dev); init_i596_mem(dev);}static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd){ struct i596_private *lp = netdev_priv(dev); struct i596_dma *dma = lp->dma; unsigned long flags; DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n", lp->cmd_head)); cmd->status = 0; cmd->command |= SWAP16(CMD_EOL | CMD_INTR); cmd->v_next = NULL; cmd->b_next = I596_NULL; DMA_WBACK(dev, cmd, sizeof(struct i596_cmd)); spin_lock_irqsave (&lp->lock, flags); if (lp->cmd_head != NULL) { lp->cmd_tail->v_next = cmd; lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status)); DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd)); } else { lp->cmd_head = cmd; wait_cmd(dev, dma, 100, "i596_add_cmd timed out"); dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status)); dma->scb.command = SWAP16(CUC_START); DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); ca(dev); } lp->cmd_tail = cmd; lp->cmd_backlog++; spin_unlock_irqrestore (&lp->lock, flags); if (lp->cmd_backlog > max_cmd_backlog) { unsigned long tickssofar = jiffies - lp->last_cmd; if (tickssofar < ticks_limit) return; printk(KERN_ERR "%s: command unit timed out, status resetting.\n", dev->name);#if 1 i596_reset(dev, lp);#endif }}static int i596_open(struct net_device *dev){ DEB(DEB_OPEN, printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq)); if (init_rx_bufs(dev)) { printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name); return -EAGAIN; } if (init_i596_mem(dev)) { printk(KERN_ERR "%s: Failed to init memory\n", dev->name); goto out_remove_rx_bufs; } netif_start_queue(dev); return 0;out_remove_rx_bufs: remove_rx_bufs(dev); return -EAGAIN;}static void i596_tx_timeout (struct net_device *dev){ struct i596_private *lp = netdev_priv(dev);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -