📄 lp486e.c
字号:
rfd->rbd = rbd; } else { printk("Could not kmalloc rbd\n"); } } lp->rbd_tail->next = rfd->rbd;#endif return (i);}static inline voidremove_rx_bufs(struct net_device *dev) { struct i596_private *lp; struct i596_rfd *rfd; lp = (struct i596_private *) dev->priv; lp->rx_tail->pa_next = I596_NULL; do { rfd = pa_to_va(lp->scb.pa_rfd); lp->scb.pa_rfd = rfd->pa_next; kfree(rfd); } while (rfd != lp->rx_tail); lp->rx_tail = 0;#if 0 for (lp->rbd_list) { }#endif}#define PORT_RESET 0x00 /* reset 82596 */#define PORT_SELFTEST 0x01 /* selftest */#define PORT_ALTSCP 0x02 /* alternate SCB address */#define PORT_DUMP 0x03 /* dump */#define IOADDR 0xcb0 /* real constant */#define IRQ 10 /* default IRQ - can be changed by ECU *//* The 82596 requires two 16-bit write cycles for a port command */static inline voidPORT(phys_addr a, unsigned int cmd) { if (a & 0xf) printk("lp486e.c: PORT: address not aligned\n"); outw(((a & 0xffff) | cmd), IOADDR); outw(((a>>16) & 0xffff), IOADDR+2);}static inline voidCA(void) { outb(0, IOADDR+4); udelay(8);}static inline voidCLEAR_INT(void) { outb(0, IOADDR+8);}#define SIZE(x) (sizeof(x)/sizeof((x)[0]))#if 0/* selftest or dump */static voidi596_port_do(struct net_device *dev, int portcmd, char *cmdname) { volatile struct i596_private *lp = dev->priv; volatile u16 *outp; int i, m; memset((void *)&(lp->dump), 0, sizeof(struct i596_dump)); outp = &(lp->dump.dump[0]); PORT(va_to_pa(outp), portcmd); mdelay(30); /* random, unmotivated */ printk("lp486e i82596 %s result:\n", cmdname); for (m = SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--) ; for (i = 0; i < m; i++) { printk(" %04x", lp->dump.dump[i]); if (i%8 == 7) printk("\n"); } printk("\n");}#endifstatic inti596_scp_setup(struct net_device *dev) { volatile struct i596_private *lp = dev->priv; int boguscnt; /* Setup SCP, ISCP, SCB */ /* * sysbus bits: * only a single byte is significant - here 0x44 * 0x80: big endian mode (details depend on stepping) * 0x40: 1 * 0x20: interrupt pin is active low * 0x10: lock function disabled * 0x08: external triggering of bus throttle timers * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode * 0x01: unused */ lp->scp.sysbus = 0x00440000; /* linear mode */ lp->scp.pad = 0; /* must be zero */ lp->scp.pa_iscp = va_to_pa(&(lp->iscp)); /* * The CPU sets the ISCP to 1 before it gives the first CA() */ lp->iscp.busy = 0x0001; lp->iscp.pa_scb = va_to_pa(&(lp->scb)); lp->scb.command = 0; lp->scb.status = 0; lp->scb.pa_cmd = I596_NULL; /* lp->scb.pa_rfd has been initialised already */ lp->last_cmd = jiffies; lp->cmd_backlog = 0; lp->cmd_head = NULL; /* * Reset the 82596. * We need to wait 10 systemclock cycles, and * 5 serial clock cycles. */ PORT(0, PORT_RESET); /* address part ignored */ udelay(100); /* * Before the CA signal is asserted, the default SCP address * (0x00fffff4) can be changed to a 16-byte aligned value */ PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */ /* * The initialization procedure begins when a * Channel Attention signal is asserted after a reset. */ CA(); /* * The ISCP busy is cleared by the 82596 after the SCB address is read. */ boguscnt = 100; while (lp->iscp.busy) { if (--boguscnt == 0) { /* No i82596 present? */ printk("%s: i82596 initialization timed out\n", dev->name); return 1; } udelay(5); } /* I find here boguscnt==100, so no delay was required. */ return 0;}static intinit_i596(struct net_device *dev) { volatile struct i596_private *lp; if (i596_scp_setup(dev)) return 1; lp = (struct i596_private *) dev->priv; lp->scb.command = 0; memcpy ((void *)lp->i596_config, init_setup, 14); lp->set_conf.command = CmdConfigure; i596_add_cmd(dev, (void *)&lp->set_conf); memcpy ((void *)lp->eth_addr, dev->dev_addr, 6); lp->set_add.command = CmdIASetup; i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add); lp->tdr.command = CmdTDR; i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr); if (lp->scb.command && i596_timeout(dev, "i82596 init", 200)) return 1; lp->scb.command = RX_START; CA(); if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100)) return 1; return 0;}/* Receive a single frame */static inline inti596_rx_one(struct net_device *dev, volatile struct i596_private *lp, struct i596_rfd *rfd, int *frames) { if (rfd->stat & RFD_STAT_OK) { /* a good frame */ int pkt_len = (rfd->count & 0x3fff); struct sk_buff *skb = dev_alloc_skb(pkt_len); (*frames)++; if (rfd->cmd & CMD_EOL) printk("Received on EOL\n"); if (skb == NULL) { printk ("%s: i596_rx Memory squeeze, " "dropping packet.\n", dev->name); lp->stats.rx_dropped++; return 1; } skb->dev = dev; memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len); skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); lp->stats.rx_packets++; } else {#if 0 printk("Frame reception error status %04x\n", rfd->stat);#endif lp->stats.rx_errors++; if (rfd->stat & RFD_COLLISION) lp->stats.collisions++; if (rfd->stat & RFD_SHORT_FRAME_ERR) lp->stats.rx_length_errors++; if (rfd->stat & RFD_DMA_ERR) lp->stats.rx_over_errors++; if (rfd->stat & RFD_NOBUFS_ERR) lp->stats.rx_fifo_errors++; if (rfd->stat & RFD_ALIGN_ERR) lp->stats.rx_frame_errors++; if (rfd->stat & RFD_CRC_ERR) lp->stats.rx_crc_errors++; if (rfd->stat & RFD_LENGTH_ERR) lp->stats.rx_length_errors++; } rfd->stat = rfd->count = 0; return 0;}static inti596_rx(struct net_device *dev) { volatile struct i596_private *lp = (struct i596_private *) dev->priv; struct i596_rfd *rfd; int frames = 0; while (1) { rfd = pa_to_va(lp->scb.pa_rfd); if (!rfd) { printk("i596_rx: NULL rfd?\n"); return 0; }#if 1 if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B))) printk("SF:%p-%04x\n", rfd, rfd->stat);#endif if (!(rfd->stat & RFD_STAT_C)) break; /* next one not ready */ if (i596_rx_one(dev, lp, rfd, &frames)) break; /* out of memory */ rfd->cmd = CMD_EOL; lp->rx_tail->cmd = 0; lp->rx_tail = rfd; lp->scb.pa_rfd = rfd->pa_next; } return frames;}static voidi596_cleanup_cmd(struct net_device *dev) { volatile struct i596_private *lp; struct i596_cmd *cmd; lp = (struct i596_private *) dev->priv; while (lp->cmd_head) { cmd = (struct i596_cmd *)lp->cmd_head; lp->cmd_head = pa_to_va(lp->cmd_head->pa_next); lp->cmd_backlog--; switch ((cmd->command) & 0x7) { case CmdTx: { struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd; struct i596_tbd * tx_cmd_tbd; tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd); dev_kfree_skb_any(tx_cmd_tbd->skb); lp->stats.tx_errors++; lp->stats.tx_aborted_errors++; cmd->pa_next = I596_NULL; kfree((unsigned char *)tx_cmd); netif_wake_queue(dev); break; } case CmdMulticastList: { // unsigned short count = *((unsigned short *) (ptr + 1)); cmd->pa_next = I596_NULL; kfree((unsigned char *)cmd); break; } default: { cmd->pa_next = I596_NULL; break; } } } if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100)) ; lp->scb.pa_cmd = va_to_pa(lp->cmd_head);}static inline voidi596_reset(struct net_device *dev, volatile struct i596_private *lp, int ioaddr) { if (lp->scb.command && i596_timeout(dev, "i596_reset", 100)) ; netif_stop_queue(dev); lp->scb.command = CUC_ABORT | RX_ABORT; CA(); /* wait for shutdown */ if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400)) ; i596_cleanup_cmd(dev); i596_rx(dev); netif_start_queue(dev); /*dev_kfree_skb(skb, FREE_WRITE);*/ init_i596(dev);}static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) { volatile struct i596_private *lp = dev->priv; int ioaddr = dev->base_addr; unsigned long flags; cmd->status = 0; cmd->command |= (CMD_EOL | CMD_INTR); cmd->pa_next = I596_NULL; save_flags(flags); cli(); if (lp->cmd_head) { lp->cmd_tail->pa_next = va_to_pa(cmd); } else { lp->cmd_head = cmd; if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100)) ; lp->scb.pa_cmd = va_to_pa(cmd); lp->scb.command = CUC_START; CA(); } lp->cmd_tail = cmd; lp->cmd_backlog++; lp->cmd_head = pa_to_va(lp->scb.pa_cmd); restore_flags(flags); if (lp->cmd_backlog > 16) { int tickssofar = jiffies - lp->last_cmd; if (tickssofar < 25) return; printk("%s: command unit timed out, status resetting.\n", dev->name); i596_reset(dev, lp, ioaddr); }}static inti596_open(struct net_device *dev) { int i; i = request_irq(dev->irq, &i596_interrupt, SA_SHIRQ, dev->name, dev); if (i) { printk("%s: IRQ %d not free\n", dev->name, dev->irq); return i; } if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE) printk("%s: only able to allocate %d receive buffers\n", dev->name, i); if (i < 4) {// release buffers free_irq(dev->irq, dev); return -EAGAIN; } netif_start_queue(dev); init_i596(dev); return 0; /* Always succeed */}static inti596_start_xmit (struct sk_buff *skb, struct net_device *dev) { volatile struct i596_private *lp = dev->priv; struct tx_cmd *tx_cmd; short length; /* If some higher level thinks we've missed a tx-done interrupt we are passed NULL. n.b. dev_tint handles the cli()/sti() itself. */ if (skb == NULL) { printk ("What about dev_tint\n"); /* dev_tint(dev); */ return 0; } /* shouldn't happen */ if (skb->len <= 0) return 0; length = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; dev->trans_start = jiffies; tx_cmd = (struct tx_cmd *) kmalloc ((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); if (tx_cmd == NULL) { printk ("%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); lp->stats.tx_dropped++; dev_kfree_skb (skb); } else { struct i596_tbd *tx_cmd_tbd; tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1); tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd); tx_cmd_tbd->pa_next = I596_NULL; tx_cmd->cmd.command = (CMD_FLEX | CmdTx); tx_cmd->pad = 0; tx_cmd->size = 0;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -