meth.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 859 行 · 第 1/2 页
C
859 行
} else { skb=alloc_skb(METH_RX_BUFF_SIZE,GFP_ATOMIC|GFP_DMA); if(!skb){ /* Ouch! No memory! Drop packet on the floor */ DPRINTK("No mem: dropping packet\n"); priv->stats.rx_dropped++; skb=priv->rx_skbs[priv->rx_write]; } else { struct sk_buff *skb_c=priv->rx_skbs[priv->rx_write]; /* 8byte status vector+3quad padding + 2byte padding, to put data on 64bit aligned boundary */ skb_reserve(skb,METH_RX_HEAD); /* Write metadata, and then pass to the receive level */ skb_put(skb_c,len); priv->rx_skbs[priv->rx_write]=skb; skb_c->dev = dev; skb_c->protocol = eth_type_trans(skb_c, dev); dev->last_rx = jiffies; priv->stats.rx_packets++; priv->stats.rx_bytes+=len; netif_rx(skb_c); } } } else { priv->stats.rx_errors++; skb=priv->rx_skbs[priv->rx_write];#if MFE_DEBUG>0 printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); if(status&METH_RX_ST_RCV_CODE_VIOLATION) printk(KERN_WARNING "Receive Code Violation\n"); if(status&METH_RX_ST_CRC_ERR) printk(KERN_WARNING "CRC error\n"); if(status&METH_RX_ST_INV_PREAMBLE_CTX) printk(KERN_WARNING "Invalid Preamble Context\n"); if(status&METH_RX_ST_LONG_EVT_SEEN) printk(KERN_WARNING "Long Event Seen...\n"); if(status&METH_RX_ST_BAD_PACKET) printk(KERN_WARNING "Bad Packet\n"); if(status&METH_RX_ST_CARRIER_EVT_SEEN) printk(KERN_WARNING "Carrier Event Seen\n");#endif } priv->rx_ring[priv->rx_write]=(rx_packet*)skb->head; priv->rx_ring[priv->rx_write]->status.raw=0; priv->rx_ring_dmas[priv->rx_write]=dma_map_single(NULL,priv->rx_ring[priv->rx_write], METH_RX_BUFF_SIZE,DMA_FROM_DEVICE); mace_eth_write(priv->rx_ring_dmas[priv->rx_write], rx_fifo); ADVANCE_RX_PTR(priv->rx_write); } spin_lock(&priv->meth_lock); /* In case there was underflow, and Rx DMA was disabled */ priv->dma_ctrl|=METH_DMA_RX_INT_EN|METH_DMA_RX_EN; mace_eth_write(priv->dma_ctrl, dma_ctrl); mace_eth_write(METH_INT_RX_THRESHOLD, int_stat); spin_unlock(&priv->meth_lock);}static int meth_tx_full(struct net_device *dev){ struct meth_private *priv = (struct meth_private *) dev->priv; return(priv->tx_count >= TX_RING_ENTRIES-1);}static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status){ struct meth_private *priv = dev->priv; u64 status; struct sk_buff *skb; unsigned long rptr=(int_status&TX_INFO_RPTR)>>16; spin_lock(&priv->meth_lock); /* Stop DMA notification */ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); mace_eth_write(priv->dma_ctrl, dma_ctrl); while(priv->tx_read != rptr){ skb = priv->tx_skbs[priv->tx_read]; status = priv->tx_ring[priv->tx_read].header.raw;#if MFE_DEBUG>=1 if(priv->tx_read==priv->tx_write) DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n",priv->tx_read,priv->tx_write,rptr);#endif if(status & METH_TX_ST_DONE) { if(status & METH_TX_ST_SUCCESS){ priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len; } else { priv->stats.tx_errors++;#if MFE_DEBUG>=1 DPRINTK("TX error: status=%016lx <",status); if(status & METH_TX_ST_SUCCESS) printk(" SUCCESS"); if(status & METH_TX_ST_TOOLONG) printk(" TOOLONG"); if(status & METH_TX_ST_UNDERRUN) printk(" UNDERRUN"); if(status & METH_TX_ST_EXCCOLL) printk(" EXCCOLL"); if(status & METH_TX_ST_DEFER) printk(" DEFER"); if(status & METH_TX_ST_LATECOLL) printk(" LATECOLL"); printk(" >\n");#endif } } else { DPRINTK("RPTR points us here, but packet not done?\n"); break; } dev_kfree_skb_irq(skb); priv->tx_skbs[priv->tx_read] = NULL; priv->tx_ring[priv->tx_read].header.raw = 0; priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); priv->tx_count --; } /* wake up queue if it was stopped */ if (netif_queue_stopped(dev) && ! meth_tx_full(dev)) { netif_wake_queue(dev); } mace_eth_write(METH_INT_TX_EMPTY | METH_INT_TX_PKT, int_stat); spin_unlock(&priv->meth_lock);}static void meth_error(struct net_device* dev, u32 status){ struct meth_private *priv = (struct meth_private *) dev->priv; printk(KERN_WARNING "meth: error status: 0x%08x\n",status); /* check for errors too... */ if (status & (METH_INT_TX_LINK_FAIL)) printk(KERN_WARNING "meth: link failure\n"); /* Should I do full reset in this case? */ if (status & (METH_INT_MEM_ERROR)) printk(KERN_WARNING "meth: memory error\n"); if (status & (METH_INT_TX_ABORT)) printk(KERN_WARNING "meth: aborted\n"); if (status & (METH_INT_RX_OVERFLOW)) printk(KERN_WARNING "meth: Rx overflow\n"); if (status & (METH_INT_RX_UNDERFLOW)) { printk(KERN_WARNING "meth: Rx underflow\n"); spin_lock(&priv->meth_lock); mace_eth_write(METH_INT_RX_UNDERFLOW, int_stat); /* more underflow interrupts will be delivered, effectively throwing us into an infinite loop. Thus I stop processing Rx in this case. */ priv->dma_ctrl&=~METH_DMA_RX_EN; mace_eth_write(priv->dma_ctrl, dma_ctrl); DPRINTK("Disabled meth Rx DMA temporarily\n"); spin_unlock(&priv->meth_lock); } mace_eth_write(METH_INT_ERROR, int_stat);}/* * The typical interrupt entry point */static irqreturn_t meth_interrupt(int irq, void *dev_id, struct pt_regs *pregs){ struct net_device *dev = (struct net_device *)dev_id; struct meth_private *priv = (struct meth_private *) dev->priv; unsigned long status; status = mace_eth_read(int_stat); while (status & 0xFF) { /* First handle errors - if we get Rx underflow, Rx DMA will be disabled, and Rx handler will reenable it. I don't think it's possible to get Rx underflow, without getting Rx interrupt */ if (status & METH_INT_ERROR) { meth_error(dev, status); } if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) { /* a transmission is over: free the skb */ meth_tx_cleanup(dev, status); } if (status & METH_INT_RX_THRESHOLD) { if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN)) break; /* send it to meth_rx for handling */ meth_rx(dev, status); } status = mace_eth_read(int_stat); } return IRQ_HANDLED;}/* * Transmits packets that fit into TX descriptor (are <=120B) */static void meth_tx_short_prepare(struct meth_private *priv, struct sk_buff *skb){ tx_packet *desc=&priv->tx_ring[priv->tx_write]; int len = (skb->len<ETH_ZLEN)?ETH_ZLEN:skb->len; desc->header.raw=METH_TX_CMD_INT_EN|(len-1)|((128-len)<<16); /* maybe I should set whole thing to 0 first... */ memcpy(desc->data.dt+(120-len),skb->data,skb->len); if(skb->len < len) memset(desc->data.dt+120-len+skb->len,0,len-skb->len);}#define TX_CATBUF1 BIT(25)static void meth_tx_1page_prepare(struct meth_private *priv, struct sk_buff *skb){ tx_packet *desc=&priv->tx_ring[priv->tx_write]; void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7); int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data); int buffer_len = skb->len - unaligned_len; dma_addr_t catbuf; desc->header.raw=METH_TX_CMD_INT_EN|TX_CATBUF1|(skb->len-1); /* unaligned part */ if(unaligned_len){ memcpy(desc->data.dt+(120-unaligned_len), skb->data, unaligned_len); desc->header.raw |= (128-unaligned_len) << 16; } /* first page */ catbuf = dma_map_single(NULL, buffer_data, buffer_len, DMA_TO_DEVICE); desc->data.cat_buf[0].form.start_addr = catbuf >> 3; desc->data.cat_buf[0].form.len = buffer_len-1;}#define TX_CATBUF2 BIT(26)static void meth_tx_2page_prepare(struct meth_private *priv, struct sk_buff *skb){ tx_packet *desc=&priv->tx_ring[priv->tx_write]; void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7); void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data); int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data); int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data); int buffer2_len = skb->len - buffer1_len - unaligned_len; dma_addr_t catbuf1, catbuf2; desc->header.raw=METH_TX_CMD_INT_EN|TX_CATBUF1|TX_CATBUF2|(skb->len-1); /* unaligned part */ if(unaligned_len){ memcpy(desc->data.dt+(120-unaligned_len), skb->data, unaligned_len); desc->header.raw |= (128-unaligned_len) << 16; } /* first page */ catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len, DMA_TO_DEVICE); desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; desc->data.cat_buf[0].form.len = buffer1_len-1; /* second page */ catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len, DMA_TO_DEVICE); desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; desc->data.cat_buf[1].form.len = buffer2_len-1;}static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb){ /* Remember the skb, so we can free it at interrupt time */ priv->tx_skbs[priv->tx_write] = skb; if(skb->len <= 120) { /* Whole packet fits into descriptor */ meth_tx_short_prepare(priv,skb); } else if(PAGE_ALIGN((unsigned long)skb->data) != PAGE_ALIGN((unsigned long)skb->data+skb->len-1)) { /* Packet crosses page boundary */ meth_tx_2page_prepare(priv,skb); } else { /* Packet is in one page */ meth_tx_1page_prepare(priv,skb); } priv->tx_write = (priv->tx_write+1) & (TX_RING_ENTRIES-1); mace_eth_write(priv->tx_write, tx_info); priv->tx_count ++;}/* * Transmit a packet (called by the kernel) */static int meth_tx(struct sk_buff *skb, struct net_device *dev){ struct meth_private *priv = (struct meth_private *) dev->priv; unsigned long flags; spin_lock_irqsave(&priv->meth_lock,flags); /* Stop DMA notification */ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); mace_eth_write(priv->dma_ctrl, dma_ctrl); meth_add_to_tx_ring(priv, skb); dev->trans_start = jiffies; /* save the timestamp */ /* If TX ring is full, tell the upper layer to stop sending packets */ if (meth_tx_full(dev)) { printk(KERN_DEBUG "TX full: stopping\n"); netif_stop_queue(dev); } /* Restart DMA notification */ priv->dma_ctrl |= METH_DMA_TX_INT_EN; mace_eth_write(priv->dma_ctrl, dma_ctrl); spin_unlock_irqrestore(&priv->meth_lock,flags); return 0;}/* * Deal with a transmit timeout. */static void meth_tx_timeout(struct net_device *dev){ struct meth_private *priv = (struct meth_private *) dev->priv; unsigned long flags; printk(KERN_WARNING "%s: transmit timed out\n", dev->name); /* Protect against concurrent rx interrupts */ spin_lock_irqsave(&priv->meth_lock,flags); /* Try to reset the interface. */ meth_reset(dev); priv->stats.tx_errors++; /* Clear all rings */ meth_free_tx_ring(priv); meth_free_rx_ring(priv); meth_init_tx_ring(priv); meth_init_rx_ring(priv); /* Restart dma */ priv->dma_ctrl|=METH_DMA_TX_EN|METH_DMA_RX_EN|METH_DMA_RX_INT_EN; mace_eth_write(priv->dma_ctrl, dma_ctrl); /* Enable interrupt */ spin_unlock_irqrestore(&priv->meth_lock,flags); dev->trans_start = jiffies; netif_wake_queue(dev); return;}/* * Ioctl commands */static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){ DPRINTK("ioctl\n"); return 0;}/* * Return statistics to the caller */static struct net_device_stats *meth_stats(struct net_device *dev){ struct meth_private *priv = (struct meth_private *) dev->priv; return &priv->stats;}/* * The init function. */static struct net_device *meth_init(void){ struct net_device *dev; struct meth_private *priv; int ret; dev = alloc_etherdev(sizeof(struct meth_private)); if (!dev) return ERR_PTR(-ENOMEM); dev->open = meth_open; dev->stop = meth_release; dev->set_config = meth_config; dev->hard_start_xmit = meth_tx; dev->do_ioctl = meth_ioctl; dev->get_stats = meth_stats;#ifdef HAVE_TX_TIMEOUT dev->tx_timeout = meth_tx_timeout; dev->watchdog_timeo = timeout;#endif dev->irq = MACE_ETHERNET_IRQ; dev->base_addr = (unsigned long)&mace->eth; priv = (struct meth_private *) dev->priv; spin_lock_init(&priv->meth_lock); ret = register_netdev(dev); if (ret) { free_netdev(dev); return ERR_PTR(ret); } printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n", dev->name, (unsigned int)mace_eth_read(mac_ctrl) >> 29); return 0;}static struct net_device *meth_dev;static int __init meth_init_module(void){ meth_dev = meth_init(); if (IS_ERR(meth_dev)) return PTR_ERR(meth_dev); return 0;}static void __exit meth_exit_module(void){ unregister_netdev(meth_dev); free_netdev(meth_dev);}module_init(meth_init_module);module_exit(meth_exit_module);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?