📄 adapter.c
字号:
lp->phy_timer.function = &poll_gmii; init_timer(&lp->phy_timer); add_timer(&lp->phy_timer); return 0;}static intxenet_close(struct net_device *dev){ struct net_local *lp ; unsigned long flags, flags_reset; spin_lock_irqsave(&XTE_spinlock, flags_reset); lp = (struct net_local *) dev->priv; /* Shut down the PHY monitoring timer. */ del_timer_sync(&lp->phy_timer); /* Stop Send queue */ netif_stop_queue(dev); /* Now we could stop the device */ XTemac_Stop(&lp->Emac); /* * If not in polled mode, free the interrupt. Currently, there * isn't any code to set polled mode, so this check is probably * superfluous. */ if ((XTemac_GetOptions(&lp->Emac) & XTE_POLLED_OPTION) == 0) free_irq(dev->irq, dev); spin_unlock_irqrestore(&XTE_spinlock, flags_reset); spin_lock_irqsave(&receivedQueueSpin, flags); list_del(&(lp->rcv)); spin_unlock_irqrestore(&receivedQueueSpin, flags); spin_lock_irqsave(&sentQueueSpin, flags); list_del(&(lp->xmit)); spin_unlock_irqrestore(&sentQueueSpin, flags); return 0;}static struct net_device_stats *xenet_get_stats(struct net_device *dev){ struct net_local *lp = (struct net_local *) dev->priv; return &lp->stats;}static intxenet_change_mtu(struct net_device *dev, int new_mtu){#ifdef CONFIG_XILINX_GIGE_VLAN int head_size = XTE_HDR_VLAN_SIZE;#else int head_size = XTE_HDR_SIZE;#endif struct net_local *lp = (struct net_local *) dev->priv; int max_frame = new_mtu + head_size + XTE_TRL_SIZE; int min_frame = 1 + head_size + XTE_TRL_SIZE; if((max_frame < min_frame) || (max_frame > lp->max_frame_size)) return -EINVAL; dev->mtu = new_mtu; /* change mtu in net_device structure */ return 0;}static intxenet_FifoSend(struct sk_buff *skb, struct net_device *dev){ struct net_local *lp; unsigned int len; XStatus result; unsigned long flags, fifo_free_bytes; /* The following lock is used to protect GetFreeBytes, FifoWrite * and FifoSend sequence which could happen from FifoSendHandler * or other processor in SMP case. */ spin_lock_irqsave(&XTE_tx_spinlock, flags); lp = (struct net_local *) dev->priv; len = skb->len; fifo_free_bytes = XTemac_FifoGetFreeBytes(&lp->Emac, XTE_SEND); if (fifo_free_bytes < len) { netif_stop_queue(dev); /* stop send queue */ lp->deferred_skb = skb; /* buffer the sk_buffer and will send it in interrupt context */ spin_unlock_irqrestore(&XTE_tx_spinlock, flags); return 0; } /* Write frame data to FIFO */ result = XTemac_FifoWrite(&lp->Emac, (void *)skb->data, len, XTE_END_OF_PACKET); if (result != XST_SUCCESS) { reset(dev, __LINE__); lp->stats.tx_errors++; spin_unlock_irqrestore(&XTE_tx_spinlock, flags); return -EIO; } /* Initiate transmit */ if ((result = XTemac_FifoSend(&lp->Emac, len)) != XST_SUCCESS) { reset(dev, __LINE__); lp->stats.tx_errors++; spin_unlock_irqrestore(&XTE_tx_spinlock, flags); return -EIO; } lp->stats.tx_bytes += len; spin_unlock_irqrestore(&XTE_tx_spinlock, flags); dev_kfree_skb(skb); /* free skb */ dev->trans_start = jiffies; return 0;}/* Callback function for completed frames sent in FIFO interrupt driven mode */static voidFifoSendHandler(void *CallbackRef){ struct net_device *dev; struct net_local *lp; XStatus result; struct sk_buff *skb; spin_lock(&XTE_tx_spinlock); dev = (struct net_device *) CallbackRef; lp = (struct net_local *) dev->priv; lp->stats.tx_packets++; /*Send out the deferred skb and wake up send queue if a deferred skb exists*/ if (lp->deferred_skb) { skb = lp->deferred_skb; /* If no room for the deferred packet, return */ if (XTemac_FifoGetFreeBytes(&lp->Emac, XTE_SEND) < skb->len) { spin_unlock(&XTE_tx_spinlock); return; } /* Write frame data to FIFO */ result = XTemac_FifoWrite(&lp->Emac, (void *)skb->data, skb->len, XTE_END_OF_PACKET); if (result != XST_SUCCESS) { reset(dev, __LINE__); lp->stats.tx_errors++; spin_unlock(&XTE_tx_spinlock); return; } /* Initiate transmit */ if ((result = XTemac_FifoSend(&lp->Emac, skb->len)) != XST_SUCCESS) { reset(dev, __LINE__); lp->stats.tx_errors++; spin_unlock(&XTE_tx_spinlock); return; } dev_kfree_skb_irq(skb); lp->deferred_skb = NULL; lp->stats.tx_packets++; lp->stats.tx_bytes += skb->len; dev->trans_start = jiffies; netif_wake_queue(dev); /* wake up send queue */ } spin_unlock(&XTE_tx_spinlock);}#if 0/* * These are used for debugging purposes, left here in case they are useful * for further debugging */static unsigned int_xenet_tx_csum(struct sk_buff *skb) { unsigned int csum = 0; long csstart = skb->h.raw - skb->data; if (csstart != skb->len) { csum = skb_checksum(skb, csstart, skb->len-csstart, 0); } return csum;}static inline unsigned int_xenet_rx_csum(struct sk_buff *skb) { return skb_checksum(skb, 0, skb->len, 0);}#endif/* * xenet_SgSend_internal is an internal use, send routine. * Any locks that need to be acquired, should be acquired * prior to calling this routine. */static XStatusxenet_SgSend_internal(struct sk_buff *skb, struct net_device *dev){ struct net_local *lp; XDmaBdV3 *bd_ptr; int result; int total_frags; int i; void *virt_addr; size_t len; dma_addr_t phy_addr; XDmaBdV3 *first_bd_ptr; skb_frag_t *frag; lp = (struct net_local *) dev->priv; /* get skb_shinfo(skb)->nr_frags + 1 buffer descriptors */ total_frags = skb_shinfo(skb)->nr_frags + 1; /* stats */ if (lp->max_frags_in_a_packet < total_frags) { lp->max_frags_in_a_packet = total_frags; } if (total_frags < XTE_SEND_BD_CNT) { result = XTemac_SgAlloc(&lp->Emac, XTE_SEND, total_frags, &bd_ptr); if (result != XST_SUCCESS) { netif_stop_queue(dev); /* stop send queue */ lp->deferred_skb = skb; /* buffer the sk_buffer and will send it in interrupt context */ return result; } } else { dev_kfree_skb(skb); lp->stats.tx_dropped++; printk(KERN_ERR "%s: XTemac: could not send TX socket buffers (too many fragments).\n", dev->name); return XST_FAILURE; } len = skb_headlen(skb); /* get the physical address of the header */ phy_addr = (u32) dma_map_single(NULL, skb->data, len, DMA_TO_DEVICE); /* get the header fragment, it's in the skb differently */ XDmaBdV3_mSetBufAddrLow(bd_ptr, phy_addr); XDmaBdV3_mSetLength(bd_ptr, len); XDmaBdV3_mSetId(bd_ptr, skb); XDmaBdV3_mClearLast(bd_ptr); /* * if tx checksum offloading is enabled, when the ethernet stack * wants us to perform the checksum in hardware, * skb->ip_summed is CHECKSUM_HW. Otherwise skb->ip_summed is * CHECKSUM_NONE, meaning the checksum is already done, or * CHECKSUM_UNNECESSARY, meaning checksumming is turned off (e.g. * loopback interface) * * skb->csum is an overloaded value. On send, skb->csum is the offset * into the buffer (skb->h.raw) to place the csum value. On receive * this feild gets set to the actual csum value, before it's passed up * the stack. * * When we get here, the ethernet stack above will have already * computed the pseudoheader csum value and have placed it in the * TCP/UDP header. * * The IP header csum has also already been computed and inserted. * * Since the IP header with it's own csum should compute to a null * csum, it should be ok to include it in the hw csum. If it is decided * to change this scheme, skb should be examined before dma_map_single() * is called, which flushes the page from the cpu's cache. * * skb->data points to the beginning of the whole packet * skb->h.raw points to the beginning of the ip header * */ if (skb->ip_summed == CHECKSUM_HW) {#if 0 { unsigned int csum = _xenet_tx_csum(skb); *((unsigned short *)(skb->h.raw + skb->csum)) = csum_fold(csum); XTemac_mSgSendBdCsumDisable(bd_ptr); }#else XTemac_mSgSendBdCsumEnable(bd_ptr); XTemac_mSgSendBdCsumSetup(bd_ptr, skb->h.raw - skb->data, (skb->h.raw - skb->data) + skb->csum);#endif lp->tx_hw_csums++; } else { /* * This routine will do no harm even if hardware checksum capability is * off. */ XTemac_mSgSendBdCsumDisable(bd_ptr); } first_bd_ptr = bd_ptr; frag = &skb_shinfo(skb)->frags[0]; for (i = 1; i < total_frags; i++, frag++) { bd_ptr = XTemac_mSgSendBdNext(&lp->Emac, bd_ptr); virt_addr = (void *)page_address(frag->page) + frag->page_offset; phy_addr = (u32) dma_map_single(NULL, virt_addr, frag->size, DMA_TO_DEVICE); XDmaBdV3_mSetBufAddrLow(bd_ptr, phy_addr); XDmaBdV3_mSetLength(bd_ptr, frag->size); XDmaBdV3_mSetId(bd_ptr, NULL); if (i < (total_frags - 1)) { XDmaBdV3_mClearLast(bd_ptr); } } XDmaBdV3_mSetLast(bd_ptr); /* Enqueue to HW */ result = XTemac_SgCommit(&lp->Emac, XTE_SEND, total_frags, first_bd_ptr); if (result != XST_SUCCESS) { netif_stop_queue(dev); /* stop send queue */ dev_kfree_skb(skb); XDmaBdV3_mSetId(first_bd_ptr, NULL); lp->stats.tx_dropped++; printk(KERN_ERR "%s: XTemac: could not send commit TX buffer descriptor (%d).\n", dev->name, result); reset(dev, __LINE__); return XST_FAILURE; } dev->trans_start = jiffies; return XST_SUCCESS;}/* The send function for frames sent in SGDMA mode and TEMAC has TX DRE. */static intxenet_SgSend(struct sk_buff *skb, struct net_device *dev){ /* The following spin_lock protects * SgAlloc, SgCommit sequence, which also exists in SgSendHandlerBH Bottom * Half, or triggered by other processor in SMP case. */ spin_lock_bh(&XTE_tx_spinlock); xenet_SgSend_internal(skb, dev); spin_unlock_bh(&XTE_tx_spinlock); return 0;}/* The send function for frames sent in SGDMA mode (and no TX DRE is in TEMAC). */static intxenet_SgSend_NoDRE(struct sk_buff *skb, struct net_device *dev){ int result; void *tx_addr; void *cur_addr; dma_addr_t phy_addr; size_t len; XDmaBdV3 *bd_ptr; skb_frag_t *frag; int nr_frags; int total_frags; int i;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -