⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 adapter.c

📁 xilinx trimode mac driver for linux
💻 C
📖 第 1 页 / 共 5 页
字号:
    struct net_local *lp = (struct net_local *) dev->priv;    /* Without the DRE hardware engine, DMA transfers must be double word     * aligned (8 bytes), front and back. If there are no fragments, and the     * main chunk is aligned at the front, let the regular, SgSend handle it.     * Otherwise, just go ahead and copy the whole darn thing to the tx ring     * buffer before sending it out.     *     * For better performance the tx rign buffer alignment set in     * ALIGNMENT_SEND can be set to 32 which is cache line aligned, on the     * PPC405 and PPC440.     */    if (!skb_is_nonlinear(skb) && (0 == BUFFER_ALIGNSEND(skb->data))) {        /* buffer is linear and already aligned nicely. We can send it using         * xenet_SgSend(). Done.         */        return xenet_SgSend(skb, dev);    }    /* The buffer is either nonlinear or not aligned. We have to copy it.     */    nr_frags = skb_shinfo(skb)->nr_frags;    total_frags = nr_frags + 1;    /* stats */    lp->realignments++;    if (lp->max_frags_in_a_packet < total_frags) {        lp->max_frags_in_a_packet = total_frags;    }    /* Copy the skb. Get the address of the next buffer in the ring. Also,     * remember the physical address of that buffer for the DMA setup.     */    cur_addr = lp->tx_buffers[lp->tx_buffers_cur];    phy_addr = lp->tx_phys_buffers[lp->tx_buffers_cur];    /* set up tx_buffers_cur for the next use */    lp->tx_buffers_cur++;    if (lp->tx_buffers_cur >= XTE_SEND_BD_CNT) {        lp->tx_buffers_cur = 0;    }    tx_addr = cur_addr;    len = skb_headlen(skb);    cacheable_memcpy(cur_addr, skb->data, len);    cur_addr += len;    frag = &skb_shinfo(skb)->frags[0];    for (i = 1; i < nr_frags; i++, frag++) {        void *p = (void *) page_address(frag->page) + frag->page_offset;        len = frag->size;        cacheable_memcpy(cur_addr, p, len);        cur_addr += len;    }    /*     * set up the transfer     */    result = XTemac_SgAlloc(&lp->Emac, XTE_SEND, 1, &bd_ptr);    if (result != XST_SUCCESS)    {        netif_stop_queue(dev);  /* stop send queue */        lp->deferred_skb = skb; /* buffer the sk_buffer and will send                                   it in interrupt context */        return result;    }    /* get the header fragment, it's in the skb differently */    XDmaBdV3_mSetBufAddrLow(bd_ptr, phy_addr);    XDmaBdV3_mSetLength(bd_ptr, len);    XDmaBdV3_mSetId(bd_ptr, skb);    XDmaBdV3_mClearLast(bd_ptr);    if (skb->ip_summed == CHECKSUM_HW) {        /*         * skb->data points to the beginning of the whole packet         * skb->h.raw points to the beginning of the ip header         * skb->csum, on send, is the offset into the buffer (skb->h.raw)         * to place the csum value.         * tx_addr is the address where the data is really copied (for         * alignment)         */        XTemac_mSgSendBdCsumEnable(bd_ptr);        XTemac_mSgSendBdCsumSetup(bd_ptr, (u32)(tx_addr + (skb->h.raw - skb->data)),                                  (u32)(tx_addr + (skb->h.raw - skb->data) + skb->csum));        lp->tx_hw_csums++;    } else {        /*         * This routine will do no harm even if hardware checksum capability is         * off.         */        XTemac_mSgSendBdCsumDisable(bd_ptr);    }    XDmaBdV3_mSetLast(bd_ptr);    /* Enqueue to HW */    result = XTemac_SgCommit(&lp->Emac, XTE_SEND, total_frags, bd_ptr);    if (result != XST_SUCCESS)    {        netif_stop_queue(dev);  /* stop send queue */        dev_kfree_skb(skb);        XDmaBdV3_mSetId(bd_ptr, NULL);        lp->stats.tx_dropped++;        printk(KERN_ERR "%s: XTemac: could not send commit TX buffer descriptor (%d).\n",               dev->name, result);        reset(dev, __LINE__);        return XST_FAILURE;    }    dev->trans_start = jiffies;    return XST_SUCCESS;}/* The callback function for completed frames sent in SGDMA mode. */static void SgSendHandlerBH (unsigned long p);static void SgRecvHandlerBH (unsigned long p);DECLARE_TASKLET (SgSendBH, SgSendHandlerBH, 0);DECLARE_TASKLET (SgRecvBH, SgRecvHandlerBH, 0);static voidSgSendHandlerBH (unsigned long p){    struct net_device *dev;    struct net_local *lp;    XDmaBdV3 * BdPtr, * BdCurPtr;    unsigned long len;    unsigned long flags;    struct sk_buff *skb;    dma_addr_t skb_dma_addr;    XStatus result = XST_SUCCESS;    unsigned int bd_processed, bd_processed_save;    while (1) {        spin_lock_irqsave(&sentQueueSpin, flags);        if (list_empty(&sentQueue)) {            spin_unlock_irqrestore(&sentQueueSpin, flags);            break;        }        lp = list_entry(sentQueue.next, struct net_local, xmit);        list_del_init(&(lp->xmit));        spin_unlock_irqrestore(&sentQueueSpin, flags);        spin_lock(&XTE_tx_spinlock);        dev = lp->ndev;        bd_processed_save = 0;        while((bd_processed = XTemac_SgGetProcessed(&lp->Emac, XTE_SEND, XTE_SEND_BD_CNT, &BdPtr)) > 0) {            bd_processed_save = bd_processed;            BdCurPtr = BdPtr;            do {                len = XDmaBdV3_mGetLength(BdCurPtr);                skb_dma_addr = (dma_addr_t)XDmaBdV3_mGetBufAddrLow(BdCurPtr);                dma_unmap_single(NULL, skb_dma_addr, len, DMA_TO_DEVICE);                /* get ptr to skb */                skb = (struct sk_buff *) XDmaBdV3_mGetId(BdCurPtr);                if (skb) dev_kfree_skb(skb);                /* reset BD id */                XDmaBdV3_mSetId(BdCurPtr, NULL);                lp->stats.tx_bytes += len;                if (XDmaBdV3_mSetLast(&BdCurPtr)) {                    lp->stats.tx_packets++;                }                BdCurPtr = XTemac_mSgSendBdNext(&lp->Emac, BdCurPtr);                bd_processed--;            } while(bd_processed > 0);            result = XTemac_SgFree(&lp->Emac, XTE_SEND, bd_processed_save, BdPtr);            if (result != XST_SUCCESS)            {                printk(KERN_ERR "%s: XTemac: SgFree() error %d.\n", dev->name, result);                reset(dev, __LINE__);                spin_unlock(&XTE_tx_spinlock);                return;            }        }        XTemac_IntrSgEnable(&lp->Emac, XTE_SEND);        /* Send out the deferred skb if it exists */        if ((lp->deferred_skb) && bd_processed_save) {            skb = lp->deferred_skb;            lp->deferred_skb = NULL;            result = xenet_SgSend_internal(skb, dev);        }        if (result == XST_SUCCESS) {            netif_wake_queue(dev); /* wake up send queue */        }        spin_unlock(&XTE_tx_spinlock);    }}static voidSgSendHandler(void *CallBackRef){    struct net_local *lp;    struct list_head* cur_lp;    spin_lock(&sentQueueSpin);    lp = (struct net_local *) CallBackRef;    list_for_each (cur_lp, &sentQueue) {        if (cur_lp == &(lp->xmit)) {            break;        }    }    if (cur_lp != &(lp->xmit)) {        list_add_tail(&lp->xmit,&sentQueue);        XTemac_IntrSgDisable(&lp->Emac, XTE_SEND);        tasklet_schedule (&SgSendBH);    }    spin_unlock(&sentQueueSpin);}static voidxenet_tx_timeout(struct net_device *dev){    struct net_local *lp;    unsigned long flags;    /*     * Make sure that no interrupts come in that could cause reentrancy     * problems in reset.     */    spin_lock_irqsave(&XTE_tx_spinlock, flags);    lp = (struct net_local *) dev->priv;    printk(KERN_ERR "%s: XTemac: exceeded transmit timeout of %lu ms.  Resetting emac.\n",           dev->name, TX_TIMEOUT * 1000UL / HZ);    lp->stats.tx_errors++;    reset(dev, __LINE__);    spin_unlock_irqrestore(&XTE_tx_spinlock, flags);}/* The callback function for frames received when in FIFO mode. */static voidFifoRecvHandler(void *CallbackRef){    struct net_device *dev;    struct net_local *lp;    struct sk_buff *skb;    u32 len;    XStatus Result;#define XTE_RX_SINK_BUFFER_SIZE 1024    static u32 rx_buffer_sink[XTE_RX_SINK_BUFFER_SIZE/sizeof(u32)];    spin_lock(&XTE_rx_spinlock);    dev = (struct net_device *) CallbackRef;    lp = (struct net_local *) dev->priv;    Result = XTemac_FifoRecv(&lp->Emac, &len);    if (Result != XST_SUCCESS){        printk(KERN_ERR "%s: XTemac: could not read received packet length, error=%d.\n",                    dev->name, Result);        lp->stats.rx_errors++;        reset(dev, __LINE__);        spin_unlock(&XTE_rx_spinlock);        return;    }    if (!(skb = /*dev_ */ alloc_skb(len + ALIGNMENT_RECV, GFP_ATOMIC))) {        /* Couldn't get memory. */        lp->stats.rx_dropped++;        printk(KERN_ERR "%s: XTemac: could not allocate receive buffer.\n", dev->name);        /* consume data in Xilinx TEMAC RX data fifo so it is sync with RX length fifo */        for(; len > XTE_RX_SINK_BUFFER_SIZE; len -= XTE_RX_SINK_BUFFER_SIZE)        {            XTemac_FifoRead(&lp->Emac, rx_buffer_sink, XTE_RX_SINK_BUFFER_SIZE,                            XTE_PARTIAL_PACKET);        }        XTemac_FifoRead(&lp->Emac, rx_buffer_sink, len, XTE_END_OF_PACKET);        spin_unlock(&XTE_rx_spinlock);        return;    }    /* Read the packet data */    Result = XTemac_FifoRead(&lp->Emac, skb->data, len, XTE_END_OF_PACKET);    if (Result != XST_SUCCESS) {        lp->stats.rx_errors++;        dev_kfree_skb_irq(skb);        printk(KERN_ERR "%s: XTemac: could not receive buffer, error=%d.\n",                    dev->name, Result);        reset(dev, __LINE__);        spin_unlock(&XTE_rx_spinlock);        return;    }    lp->stats.rx_packets++;    lp->stats.rx_bytes += len;    spin_unlock(&XTE_rx_spinlock);    skb_put(skb, len);  /* Tell the skb how much data we got. */    skb->dev = dev;     /* Fill out required meta-data. */    skb->protocol = eth_type_trans(skb, dev);    skb->ip_summed = CHECKSUM_NONE;    netif_rx(skb);      /* Send the packet upstream. */}/* * _xenet_SgSetupRecvBuffers allocates as many socket buffers (sk_buff's) as it * can up to the number of free RX buffer descriptors. Then it sets up the RX * buffer descriptors to DMA into the socket_buffers. * * The net_device, dev, indcates on which device to operate for buffer * descriptor allocation. */static void_xenet_SgSetupRecvBuffers(struct net_device *dev){    struct net_local *lp = (struct net_local *) dev->priv;    int free_bd_count = XDmaV3_mSgGetFreeCnt(&(lp->Emac.RecvDma));    int num_sk_buffs;    struct sk_buff_head sk_buff_list;    struct sk_buff *new_skb;    u32 new_skb_baddr;    XDmaBdV3 * BdPtr, * BdCurPtr;    u32 align;    XStatus result;    int align_max = ALIGNMENT_RECV;    if (lp->local_features & LOCAL_FEATURE_RX_DRE) {        align_max = 0;    }    skb_queue_head_init(&sk_buff_list);    for (num_sk_buffs = 0; num_sk_buffs < free_bd_count; num_sk_buffs++) {        new_skb = alloc_skb(lp->max_frame_size + align_max, GFP_ATOMIC);        if (new_skb == NULL) {            break;        }        /*         * I think the XTE_spinlock, and Recv DMA int disabled will protect this         * list as well, so we can use the __ version just fine         */        __skb_queue_tail(&sk_buff_list, new_skb);    }    if (!num_sk_buffs) {        printk(KERN_ERR "%s: XTemac: alloc_skb unsuccessful\n", dev->name);        return;    }    /* now we got a bunch o' sk_buffs */    result = XTemac_SgAlloc(&lp->Emac, XTE_RECV, num_sk_buffs, &BdPtr);    if (result != XST_SUCCESS)    {        /* we really shouldn't get this */        skb_queue_purge(&sk_buff_list);        printk(KERN_ERR "%s: XTemac: SgAlloc unsuccessful (%d)\n", dev->name, result);        reset(dev, __LINE__);        return;    }    BdCurPtr = BdPtr;    new_skb = skb_dequeue(&sk_buff_list);    while (new_skb) {        /* make sure we're long-word aligned */        if (lp->local_features & LOCAL_FEATURE_RX_DRE) {           align = BUFFER_ALIGNRECV(new_skb->data);           if (align) {               skb_reserve(new_skb, align);           }        }        /* Get dma handle of skb->data */        new_skb_baddr = (u32) dma_map_single(NULL, new_skb->data,                                             lp->max_frame_size,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -