📄 acacia.c
字号:
memcpy(eth->h_source, odev->dev_addr, 6); memcpy(eth->h_dest, rt->u.dst. neighbour->ha, 6); skb->dev = odev; if (odev->hard_start_xmit(skb, odev)!= 0) { panic ("I am dead"); } netdev_rx_stat[CPU_ID]. fastroute_success++; } else { /* Semi Fast Route Path */ skb->pkt_type = PACKET_FASTROUTE; skb->nh.raw = skb->data + ETH_HLEN; skb->protocol = __constant_htons (ETH_P_IP); netdev_rx_stat[CPU_ID]. fastroute_defer++; netif_rx(skb); } } } } // bigpacket: if (fast_routed == 0)#endif /* #ifdef CONFIG_NET_FASTROUTE */ { skb->protocol = eth_type_trans(skb, dev);//printk("pkt rcvd. len = %d type = 0x%x\n", pktuncrc_len, skb->protocol); /* pass the packet to upper layers */ netif_rx(skb); } dev->last_rx = jiffies; lp->stats.rx_packets++; lp->stats.rx_bytes += pktuncrc_len; if (IS_RCV_MP(devcs)) lp->stats.multicast++; /* 16 bit align */ skb_reserve(skb_new, 2); skb_new->dev = dev; lp->rx_skb[lp->rx_next_done] = skb_new; } else { ERR("no memory, dropping rx packet.\n"); lp->stats.rx_errors++; lp->stats.rx_dropped++; } } } else { /* This should only happen if we enable accepting broken packets */ lp->stats.rx_errors++; lp->stats.rx_dropped++; /* add statistics counters */ if (IS_RCV_CRC_ERR(devcs)) { DBG(2, "RX CRC error\n"); lp->stats.rx_crc_errors++; } else if (IS_RCV_LOR_ERR(devcs)) { DBG(2, "RX LOR error\n"); lp->stats.rx_length_errors++; } else if (IS_RCV_LE_ERR(devcs)) { DBG(2, "RX LE error\n"); lp->stats.rx_length_errors++; } else if (IS_RCV_OVR_ERR(devcs)) { /* * The overflow errors are handled through * an interrupt handler. */ lp->stats.rx_over_errors++; } else if (IS_RCV_CV_ERR(devcs)) { /* code violation */ DBG(2, "RX CV error\n"); lp->stats.rx_frame_errors++; } else if (IS_RCV_CES_ERR(devcs)) { DBG(2, "RX Preamble error\n"); } } rd->devcs = 0; /* restore descriptor's curr_addr */ if(skb_new) rd->ca = virt_to_phys(skb_new->data); else rd->ca = virt_to_phys(skb->data); rd->control = DMA_COUNT(ACACIA_RBSIZE) |DMAD_cod_m |DMAD_iod_m; lp->rd_ring[(lp->rx_next_done-1)& ACACIA_RDS_MASK].control &= ~(DMAD_cod_m); lp->rx_next_done = (lp->rx_next_done + 1) & ACACIA_RDS_MASK; rd = KSEG1ADDR(&lp->rd_ring[lp->rx_next_done]); local_writel( ~DMAS_d_m, &lp->rx_dma_regs->dmas); } dmas = local_readl(&lp->rx_dma_regs->dmas); if(dmas & DMAS_h_m) {#ifdef ACACIA_PROC_DEBUG lp->dma_halt_cnt++;#endif rd->devcs = 0; skb = lp->rx_skb[lp->rx_next_done]; rd->ca = virt_to_phys(skb->data); acacia_chain_rx(lp,rd); } local_writel( ~(DMAS_h_m | DMAS_e_m), &lp->rx_dma_regs->dmas); /* Enable D H E bit in Rx DMA */ local_writel(local_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m |DMASM_e_m), &lp->rx_dma_regs->dmasm); spin_unlock_irqrestore(&lp->lock, flags); } /* Ethernet Tx DMA interrupt */static void acacia_tx_dma_interrupt(int irq, void *dev_id, struct pt_regs * regs){ struct net_device *dev = (struct net_device *)dev_id; struct acacia_local *lp; volatile u32 dmas; ASSERT(dev != NULL); lp = (struct acacia_local *)dev->priv; spin_lock(&lp->lock); /* Mask F E bit in Tx DMA */ local_writel(local_readl(&lp->tx_dma_regs->dmasm) | (DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm); dmas = local_readl(&lp->tx_dma_regs->dmas); if (dmas & (DMAS_f_m | DMAS_e_m)) { tasklet_hi_schedule(lp->tx_tasklet); if (dmas & DMAS_e_m) ERR(__FUNCTION__ ": DMA error\n"); } local_writel(~dmas, &lp->tx_dma_regs->dmas); if(lp->tx_chain_status == filled && (local_readl(&(lp->tx_dma_regs->dmandptr)) == 0)) { local_writel(virt_to_phys(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr)); lp->tx_chain_status = empty; lp->tx_chain_head = lp->tx_chain_tail; } spin_unlock(&lp->lock);}static void acacia_tx_tasklet(unsigned long tx_data_dev){ struct net_device *dev = (struct net_device *)tx_data_dev; struct acacia_local* lp = (struct acacia_local *)dev->priv; volatile DMAD_t td = KSEG1ADDR(&lp->td_ring[lp->tx_next_done]); u32 devcs; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); /* process all desc that are done */ while(IS_DMA_FINISHED(td->control)) { if(lp->tx_full == 1) { netif_wake_queue(dev); lp->tx_full = 0; } devcs = lp->td_ring[lp->tx_next_done].devcs; if ((devcs & (ETHTX_fd_m | ETHTX_ld_m)) != (ETHTX_fd_m | ETHTX_ld_m)) { lp->stats.tx_errors++; lp->stats.tx_dropped++; /* should never happen */ DBG(1, __FUNCTION__ ": split tx ignored\n"); } else if (IS_TX_TOK(devcs)) { /* transmit OK */ lp->stats.tx_packets++; lp->stats.tx_bytes += lp->tx_skb[lp->tx_next_done]->len; } else { lp->stats.tx_errors++; lp->stats.tx_dropped++; /* underflow */ if (IS_TX_UND_ERR(devcs)) lp->stats.tx_fifo_errors++; /* oversized frame */ if (IS_TX_OF_ERR(devcs)) lp->stats.tx_aborted_errors++; /* excessive deferrals */ if (IS_TX_ED_ERR(devcs)) lp->stats.tx_carrier_errors++; /* collisions: medium busy */ if (IS_TX_EC_ERR(devcs)) lp->stats.collisions++; /* late collision */ if (IS_TX_LC_ERR(devcs)) lp->stats.tx_window_errors++; } /* We must always free the original skb */ if (lp->tx_skb[lp->tx_next_done] != NULL) { dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]); lp->tx_skb[lp->tx_next_done] = NULL; } lp->td_ring[lp->tx_next_done].control = DMAD_iof_m; lp->td_ring[lp->tx_next_done].devcs = ETHTX_fd_m | ETHTX_ld_m; lp->td_ring[lp->tx_next_done].link = 0; lp->td_ring[lp->tx_next_done].ca = 0; lp->tx_count --; /* go on to next transmission */ lp->tx_next_done = (lp->tx_next_done + 1) & ACACIA_TDS_MASK; td = KSEG1ADDR(&lp->td_ring[lp->tx_next_done]); }/* dmas = local_readl(&lp->tx_dma_regs->dmas); if(dmas & DMAS_e_m) { ERR(__FUNCTION__ ": DMAS_e_m\n"); lp->stats.tx_errors++; } local_writel( ~dmas, &lp->tx_dma_regs->dmas);*/ /* Enable F E bit in Tx DMA */ local_writel(local_readl(&lp->tx_dma_regs->dmasm) & ~(DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm); spin_unlock_irqrestore(&lp->lock, flags); }/* * Get the current statistics. * This may be called with the device open or closed. */static struct net_device_stats * acacia_get_stats(struct net_device *dev){ struct acacia_local *lp = (struct acacia_local *)dev->priv; return &lp->stats;}/* * Set or clear the multicast filter for this adaptor. */static void acacia_multicast_list(struct net_device *dev){ /* listen to broadcasts always and to treat */ /* IFF bits independantly */ struct acacia_local *lp = (struct acacia_local *)dev->priv; unsigned long flags; u32 recognise = ETHARC_ab_m; /* always accept broadcasts */ if (dev->flags & IFF_PROMISC) /* set promiscuous mode */ recognise |= ETHARC_pro_m; if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) recognise |= ETHARC_am_m; /* all multicast & bcast */ else if (dev->mc_count > 0) { DBG(2, __FUNCTION__ ": mc_count %d\n", dev->mc_count); recognise |= ETHARC_am_m; /* for the time being */ } spin_lock_irqsave(&lp->lock, flags); local_writel(recognise, &lp->eth_regs->etharc); spin_unlock_irqrestore(&lp->lock, flags);}static void acacia_tx_timeout(struct net_device *dev){ struct acacia_local *lp = (struct acacia_local *)dev->priv; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); acacia_restart(dev); spin_unlock_irqrestore(&lp->lock, flags); }/* * Initialize the ACACIA ethernet controller. */static int acacia_init(struct net_device *dev){ struct acacia_local *lp = (struct acacia_local *)dev->priv; int i, j; /* Disable DMA */ acacia_abort_tx(dev); acacia_abort_rx(dev); /* reset ethernet logic */ local_writel(0, &lp->eth_regs->ethintfc); while((local_readl(&lp->eth_regs->ethintfc) & ETHINTFC_rip_m)) dev->trans_start = jiffies; /* Enable Ethernet Interface */ local_writel(ETHINTFC_en_m, &lp->eth_regs->ethintfc); tasklet_disable(lp->rx_tasklet); tasklet_disable(lp->tx_tasklet); /* Initialize the transmit Descriptors */ for (i = 0; i < ACACIA_NUM_TDS; i++) { lp->td_ring[i].control = DMAD_iof_m; lp->td_ring[i].devcs = ETHTX_fd_m | ETHTX_ld_m; lp->td_ring[i].ca = 0; lp->td_ring[i].link = 0; if (lp->tx_skb[i] != NULL) { /* free dangling skb */ dev_kfree_skb_any(lp->tx_skb[i]); lp->tx_skb[i] = NULL; } } lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail = lp->tx_full = lp->tx_count = 0; lp-> tx_chain_status = empty; /* * Initialize the receive descriptors so that they * become a circular linked list, ie. let the last * descriptor point to the first again. */ for (i=0; i<ACACIA_NUM_RDS; i++) { struct sk_buff *skb = lp->rx_skb[i]; if (lp->rx_skb[i] == NULL) { skb = dev_alloc_skb(ACACIA_RBSIZE + 2); if (skb == NULL) { ERR("No memory in the system\n"); for (j = 0; j < ACACIA_NUM_RDS; j ++) if (lp->rx_skb[j] != NULL) dev_kfree_skb_any(lp->rx_skb[j]); return 1; } else { skb->dev = dev; skb_reserve(skb, 2); lp->rx_skb[i] = skb; lp->rd_ring[i].ca = virt_to_phys(skb->data); } } lp->rd_ring[i].control = DMAD_iod_m | DMA_COUNT(ACACIA_RBSIZE); lp->rd_ring[i].devcs = 0; lp->rd_ring[i].ca = virt_to_phys(skb->data); lp->rd_ring[i].link = virt_to_phys(&lp->rd_ring[i+1]); } /* loop back */ lp->rd_ring[ACACIA_NUM_RDS-1].link = virt_to_phys(&lp->rd_ring[0]); lp->rx_next_done = 0; lp->rd_ring[ACACIA_NUM_RDS-1].control |= DMAD_cod_m; lp->rx_chain_head = 0; lp->rx_chain_tail = 0; lp->rx_chain_status = empty; local_writel(0, &lp->rx_dma_regs->dmas); /* Start Rx DMA */ acacia_start_rx(lp, &lp->rd_ring[0]); /* Enable F E bit in Tx DMA */ local_writel(local_readl(&lp->tx_dma_regs->dmasm) & ~(DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm); /* Enable D H E bit in Rx DMA */ local_writel(local_readl(&lp->rx_dma_regs->dmasm) & ~(DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm); /* Accept only packets destined for this Ethernet device address */ local_writel(ETHARC_ab_m, &lp->eth_regs->etharc); /* Set all Ether station address registers to their initial values */ local_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0); local_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0); local_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1); local_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1); local_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2); local_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2); local_writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3); local_writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3); /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */ local_writel(ETHMAC2_pe_m | ETHMAC2_cen_m | ETHMAC2_fd_m, &lp->eth_regs->ethmac2); //ETHMAC2_flc_m ETHMAC2_fd_m lp->duplex_mode /* Back to back inter-packet-gap */ local_writel(0x15, &lp->eth_regs->ethipgt); /* Non - Back to back inter-packet-gap */ local_writel(0x12, &lp->eth_regs->ethipgr); /* Management Clock Prescaler Divisor */ /* Clock independent setting */ local_writel(((idt_cpu_freq)/MII_CLOCK+1) & ~1, &lp->eth_regs->ethmcp); /* don't transmit until fifo contains 48b */ local_writel(48, &lp->eth_regs->ethfifott); local_writel(ETHMAC1_re_m, &lp->eth_regs->ethmac1); tasklet_enable(lp->rx_tasklet); tasklet_enable(lp->tx_tasklet); netif_start_queue(dev); return 0; }static void acacia_cleanup_module(void){ int i; for (i = 0; acacia_iflist[i].iobase; i++) { struct acacia_if_t * bif = &acacia_iflist[i]; if (bif->dev != NULL) { struct acacia_local *lp = (struct acacia_local *)bif->dev->priv; if (lp != NULL) { if (lp->eth_regs) iounmap((void*)lp->eth_regs); if (lp->rx_dma_regs) iounmap((void*)lp->rx_dma_regs); if (lp->tx_dma_regs) iounmap((void*)lp->tx_dma_regs); if (lp->td_ring) kfree((void*)KSEG0ADDR(lp->td_ring));#ifdef ACACIA_PROC_DEBUG if (lp->ps) remove_proc_entry("net/rc32438", NULL);#endif kfree(lp); } unregister_netdev(bif->dev); kfree(bif->dev); release_region(bif->iobase, 0x24C); } }}#ifndef MODULEstatic int __init acacia_setup(char *options){ /* no options yet */ return 1;}static int __init acacia_setup_ethaddr0(char *options){ memcpy(mac0, options, 6); mac0[6]= '\0'; return 1;}static int __init acacia_setup_ethaddr1(char *options){ memcpy(mac1, options, 6); mac1[6]= '\0'; return 1;}__setup("rc32438eth=", acacia_setup);__setup("ethaddr0=", acacia_setup_ethaddr0);__setup("ethaddr1=", acacia_setup_ethaddr1);#endif /* MODULE */module_init(acacia_init_module);module_exit(acacia_cleanup_module);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -