📄 lmc_main.c
字号:
if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */ sc->stats.rx_errors++; sc->stats.rx_crc_errors++; goto skip_packet; } if (len > LMC_PKT_BUF_SZ){ sc->stats.rx_length_errors++; localLengthErrCnt++; goto skip_packet; } if (len < sc->lmc_crcSize + 2) { sc->stats.rx_length_errors++; sc->stats.rx_SmallPktCnt++; localLengthErrCnt++; goto skip_packet; } if(stat & 0x00004000){ printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); } len -= sc->lmc_crcSize; skb = sc->lmc_rxq[i]; /* * We ran out of memory at some point * just allocate an skb buff and continue. */ if(skb == 0x0){ nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); if (nsb) { LMC_SKB_FREE(nsb, 1); sc->lmc_rxq[i] = nsb; nsb->dev = dev; sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); } sc->failed_recv_alloc = 1; goto skip_packet; } dev->last_rx = jiffies; sc->stats.rx_packets++; sc->stats.rx_bytes += len; LMC_CONSOLE_LOG("recv", skb->data, len); /* * I'm not sure of the sanity of this * Packets could be arriving at a constant * 44.210mbits/sec and we're going to copy * them into a new buffer?? */ if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */ /* * If it's a large packet don't copy it just hand it up */ give_it_anyways: sc->lmc_rxq[i] = 0x0; sc->lmc_rxring[i].buffer1 = 0x0; skb_put (skb, len); skb->protocol = lmc_proto_type(sc, skb); skb->protocol = htons(ETH_P_WAN_PPP); skb->mac.raw = skb->data;// skb->nh.raw = skb->data; skb->dev = dev; lmc_proto_netif(sc, skb); /* * This skb will be destroyed by the upper layers, make a new one */ nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); if (nsb) { LMC_SKB_FREE(nsb, 1); sc->lmc_rxq[i] = nsb; nsb->dev = dev; sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); /* Transferred to 21140 below */ } else { /* * We've run out of memory, stop trying to allocate * memory and exit the interrupt handler * * The chip may run out of receivers and stop * in which care we'll try to allocate the buffer * again. (once a second) */ sc->stats.rx_BuffAllocErr++; LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); sc->failed_recv_alloc = 1; goto skip_out_of_mem; } } else { nsb = dev_alloc_skb(len); if(!nsb) { goto give_it_anyways; } memcpy(skb_put(nsb, len), skb->data, len); nsb->protocol = lmc_proto_type(sc, skb); nsb->mac.raw = nsb->data;// nsb->nh.raw = nsb->data; nsb->dev = dev; lmc_proto_netif(sc, nsb); } skip_packet: LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4; sc->lmc_next_rx++; i = sc->lmc_next_rx % LMC_RXDESCS; rx_work_limit--; if (rx_work_limit < 0) break; } /* detect condition for LMC1000 where DSU cable attaches and fills * descriptors with bogus packets * if (localLengthErrCnt > LMC_RXDESCS - 3) { sc->stats.rx_BadPktSurgeCnt++; LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt, sc->stats.rx_BadPktSurgeCnt); } */ /* save max count of receive descriptors serviced */ if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) { sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ }#ifdef DEBUG if (rxIntLoopCnt == 0) { for (i = 0; i < LMC_RXDESCS; i++) { if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4) { rxIntLoopCnt++; } } LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0); }#endif lmc_led_off(sc, LMC_DS3_LED3);skip_out_of_mem: lmc_trace(dev, "lmc_rx out"); return 0;}static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/{ lmc_softc_t *sc; LMC_SPIN_FLAGS; lmc_trace(dev, "lmc_get_stats in"); sc = dev->priv; spin_lock_irqsave(&sc->lmc_lock, flags); sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; spin_unlock_irqrestore(&sc->lmc_lock, flags); lmc_trace(dev, "lmc_get_stats out"); return (struct net_device_stats *) &sc->stats;}#ifdef MODULEint init_module (void) /*fold00*/{ printk ("lmc: module loaded\n"); /* Have lmc_probe search for all the cards, and allocate devices */ if (lmc_probe (NULL) < 0) return -EIO; return 0;}void cleanup_module (void) /*fold00*/{ struct net_device *dev, *next; lmc_softc_t *sc; /* we have no pointer to our devices, since they are all dynamically * allocated. So, here we loop through all the network devices * looking for ours. When found, dispose of them properly. */ for (dev = Lmc_root_dev; dev != NULL; dev = next ) { next = ((lmc_softc_t *) dev->priv)->next_module; /* get it now before we deallocate it */ printk ("%s: removing...\n", dev->name); /* close the syncppp stuff, and release irq. Close is run on unreg net */ lmc_close (dev); sc = dev->priv; if (sc != NULL) lmc_proto_detach(sc); /* Remove the device from the linked list */ unregister_netdev (dev); /* Let go of the io region */; release_region (dev->base_addr, LMC_REG_RANGE); /* free our allocated structures. */ kfree (dev->priv); dev->priv = NULL; kfree ((struct ppp_device *) dev); dev = NULL; } Lmc_root_dev = NULL; printk ("lmc module unloaded\n");}#endifunsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/{ int i; int command = (0xf6 << 10) | (devaddr << 5) | regno; int retval = 0; lmc_trace(sc->lmc_device, "lmc_mii_readreg in"); LMC_MII_SYNC (sc); lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync"); for (i = 15; i >= 0; i--) { int dataval = (command & (1 << i)) ? 0x20000 : 0; LMC_CSR_WRITE (sc, csr_9, dataval); lmc_delay (); /* __SLOW_DOWN_IO; */ LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000); lmc_delay (); /* __SLOW_DOWN_IO; */ } lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1"); for (i = 19; i > 0; i--) { LMC_CSR_WRITE (sc, csr_9, 0x40000); lmc_delay (); /* __SLOW_DOWN_IO; */ retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0); LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000); lmc_delay (); /* __SLOW_DOWN_IO; */ } lmc_trace(sc->lmc_device, "lmc_mii_readreg out"); return (retval >> 1) & 0xffff;}void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/{ int i = 32; int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data; lmc_trace(sc->lmc_device, "lmc_mii_writereg in"); LMC_MII_SYNC (sc); i = 31; while (i >= 0) { int datav; if (command & (1 << i)) datav = 0x20000; else datav = 0x00000; LMC_CSR_WRITE (sc, csr_9, datav); lmc_delay (); /* __SLOW_DOWN_IO; */ LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000)); lmc_delay (); /* __SLOW_DOWN_IO; */ i--; } i = 2; while (i > 0) { LMC_CSR_WRITE (sc, csr_9, 0x40000); lmc_delay (); /* __SLOW_DOWN_IO; */ LMC_CSR_WRITE (sc, csr_9, 0x50000); lmc_delay (); /* __SLOW_DOWN_IO; */ i--; } lmc_trace(sc->lmc_device, "lmc_mii_writereg out");}static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/{ int i; lmc_trace(sc->lmc_device, "lmc_softreset in"); /* Initialize the receive rings and buffers. */ sc->lmc_txfull = 0; sc->lmc_next_rx = 0; sc->lmc_next_tx = 0; sc->lmc_taint_rx = 0; sc->lmc_taint_tx = 0; /* * Setup each one of the receiver buffers * allocate an skbuff for each one, setup the descriptor table * and point each buffer at the next one */ for (i = 0; i < LMC_RXDESCS; i++) { struct sk_buff *skb; if (sc->lmc_rxq[i] == NULL) { skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); if(skb == NULL){ printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name); sc->failed_ring = 1; break; } else{ sc->lmc_rxq[i] = skb; } } else { skb = sc->lmc_rxq[i]; } skb->dev = sc->lmc_device; LMC_SKB_FREE(skb, 1); /* owned by 21140 */ sc->lmc_rxring[i].status = 0x80000000; /* used to be PKT_BUF_SZ now uses skb since we loose some to head room */ sc->lmc_rxring[i].length = skb->end - skb->data; /* use to be tail which is dumb since you're thinking why write * to the end of the packj,et but since there's nothing there tail == data */ sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data); /* This is fair since the structure is static and we have the next address */ sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]); } /* * Sets end of ring */ sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */ sc->lmc_rxring[i - 1].buffer2 = virt_to_bus (&sc->lmc_rxring[0]); /* Point back to the start */ LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */ /* Initialize the transmit rings and buffers */ for (i = 0; i < LMC_TXDESCS; i++) { if (sc->lmc_txq[i] != NULL){ /* have buffer */ dev_kfree_skb(sc->lmc_txq[i]); /* free it */ sc->stats.tx_dropped++; /* We just dropped a packet */ } sc->lmc_txq[i] = 0; sc->lmc_txring[i].status = 0x00000000; sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]); } sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]); LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring)); lmc_trace(sc->lmc_device, "lmc_softreset out");}static int lmc_set_config(struct net_device *dev, struct ifmap *map) /*fold00*/{ lmc_trace(dev, "lmc_set_config in"); lmc_trace(dev, "lmc_set_config out"); return -EOPNOTSUPP;}void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/{ lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); sc->lmc_gpio_io &= ~bits; LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");}void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/{ lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); sc->lmc_gpio_io |= bits; LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");}void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/{ lmc_trace(sc->lmc_device, "lmc_led_on in"); if((~sc->lmc_miireg16) & led){ /* Already on! */ lmc_trace(sc->lmc_device, "lmc_led_on aon out"); return; } sc->lmc_miireg16 &= ~led; lmc_mii
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -