⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 adapter.c

📁 xilinx trimode mac driver for linux
💻 C
📖 第 1 页 / 共 5 页
字号:
                                             DMA_FROM_DEVICE);        XDmaBdV3_mSetBufAddrLow(BdCurPtr, new_skb_baddr);        XDmaBdV3_mSetLength(BdCurPtr, lp->max_frame_size);        XDmaBdV3_mSetId(BdCurPtr, new_skb);        BdCurPtr = XTemac_mSgRecvBdNext(&lp->Emac, BdCurPtr);        new_skb = skb_dequeue(&sk_buff_list);    }    /* enqueue RxBD with the attached skb buffers such that it is     * ready for frame reception */    result = XTemac_SgCommit(&lp->Emac, XTE_RECV, num_sk_buffs, BdPtr);    if (result != XST_SUCCESS)    {        printk(KERN_ERR "%s: XTemac: (SgSetupRecvBuffers) XTemac_SgCommit unsuccessful (%d)\n",                dev->name, result);        skb_queue_purge(&sk_buff_list);        BdCurPtr = BdPtr;        while (num_sk_buffs > 0) {            XDmaBdV3_mSetId(BdCurPtr, NULL);            BdCurPtr = XTemac_mSgRecvBdNext(&lp->Emac, BdCurPtr);            num_sk_buffs--;        }        reset(dev, __LINE__);        return;    }}static voidSgRecvHandlerBH (unsigned long p){    struct net_device *dev;    struct net_local *lp;    struct sk_buff *skb;    u32 len, skb_baddr;    XStatus result;    unsigned long flags;    XDmaBdV3 * BdPtr, * BdCurPtr;    unsigned int bd_processed, bd_processed_saved;    while (1) {        spin_lock_irqsave(&receivedQueueSpin,flags);        if (list_empty(&receivedQueue)) {            spin_unlock_irqrestore(&receivedQueueSpin,flags);            break;        }        lp = list_entry(receivedQueue.next, struct net_local, rcv);        list_del_init(&(lp->rcv));        dev = lp->ndev;        spin_unlock_irqrestore(&receivedQueueSpin,flags);        spin_lock(&XTE_rx_spinlock);        if ((bd_processed = XTemac_SgGetProcessed(&lp->Emac, XTE_RECV, XTE_RECV_BD_CNT, &BdPtr)) > 0) {            bd_processed_saved = bd_processed;            BdCurPtr = BdPtr;            do {                len = XDmaBdV3_mGetLength(BdCurPtr);                /* get ptr to skb */                skb = (struct sk_buff *) XDmaBdV3_mGetId(BdCurPtr);                /* get and free up dma handle used by skb->data */                skb_baddr = (dma_addr_t)XDmaBdV3_mGetBufAddrLow(BdCurPtr);                dma_unmap_single(NULL, skb_baddr, lp->max_frame_size, DMA_FROM_DEVICE);                /* reset ID */                XDmaBdV3_mSetId(BdCurPtr, NULL);                /* setup received skb and send it upstream */                skb_put(skb, len);  /* Tell the skb how much data we got. */                skb->dev = dev;                /* this routine adjusts skb->data to skip the header */                skb->protocol = eth_type_trans(skb, dev);                /* default the ip_summed value */                skb->ip_summed = CHECKSUM_NONE;                /* if we're doing rx csum offload, set it up */                if (((lp->local_features & LOCAL_FEATURE_RX_CSUM) != 0) &&                    (skb->protocol == __constant_htons(ETH_P_IP)) &&                    (skb->len > 64)) {                    unsigned int csum;                    /*                     * This hardware only supports proper checksum calculations                     * on TCP/UDP packets.                     *                     * skb->csum is an overloaded value. On send, skb->csum is                     * the offset into the buffer (skb->h.raw) to place the                     * csum value. On receive this feild gets set to the actual                     * csum value, before it's passed up the stack.                     *                     * If we set skb->ip_summed to CHECKSUM_HW, the ethernet                     * stack above will compute the pseudoheader csum value and                     * add it to the partial checksum already computed (to be                     * placed in skb->csum) and verify it.                     *                     * Setting skb->ip_summed to CHECKSUM_NONE means that the                     * cheksum didn't verify and the stack will (re)check it.                     *                     * Setting skb->ip_summed to CHECKSUM_UNNECESSARY means                     * that the cheksum was verified/assumed to be good and the                     * stack does not need to (re)check it.                     *                     * The ethernet stack above will (re)compute the checksum                     * under the following conditions:                     * 1) skb->ip_summed was set to CHECKSUM_NONE                     * 2) skb->len does not match the length of the ethernet                     *    packet determined by parsing the packet. In this case                     *    the ethernet stack will assume any prior checksum                     *    value was miscomputed and throw it away.                     * 3) skb->ip_summed was set to CHECKSUM_HW, skb->csum was                     *    set, but the result does not check out ok by the                     *    ethernet stack.                     *                     * If the TEMAC hardware stripping feature is off, each                     * packet will contain an FCS feild which will have been                     * computed by the hardware checksum operation. This 4 byte                     * FCS value needs to be subtracted back out of the checksum                     * value computed by hardware as it's not included in a                     * normal ethernet packet checksum.                     *                     * The minimum transfer packet size over the wire is 64                     * bytes. If the packet is sent as exactly 64 bytes, then                     * it probably contains some random padding bytes. It's                     * somewhat difficult to determine the actual length of the                     * real packet data, so we just let the stack recheck the                     * checksum for us.                     *                     * After the call to eth_type_trans(), the following holds                     * true:                     *    skb->data points to the beginning of the ip header                     */                    csum = XTemac_mSgRecvBdCsumGet(BdCurPtr);#if ! XTE_AUTOSTRIPPING                    if (! lp->stripping) {                        /* take off the FCS */                        u16 *data;                        /* FCS is 4 bytes */                        skb_put(skb, -4);                        data = (u16 *)(& skb->data[skb->len]);                        /* subtract out the FCS from the csum value */                        csum = csum_sub(csum, *data /* & 0xffff */);                        data++;                        csum = csum_sub(csum, *data /* & 0xffff */);                    }#endif                    skb->csum = csum;                    skb->ip_summed = CHECKSUM_HW;                    lp->rx_hw_csums++;                }                lp->stats.rx_packets++;                lp->stats.rx_bytes += len;                netif_rx(skb);          /* Send the packet upstream. */                BdCurPtr = XTemac_mSgRecvBdNext(&lp->Emac, BdCurPtr);                bd_processed --;            } while(bd_processed > 0);            /* give the descriptor back to the driver */            result = XTemac_SgFree(&lp->Emac, XTE_RECV, bd_processed_saved, BdPtr);            if (result != XST_SUCCESS)            {                printk(KERN_ERR "%s: XTemac: SgFree unsuccessful (%d)\n", dev->name, result);                reset(dev, __LINE__);                spin_unlock(&XTE_rx_spinlock);                return;            }            _xenet_SgSetupRecvBuffers(dev);        }        XTemac_IntrSgEnable(&lp->Emac, XTE_RECV);        spin_unlock(&XTE_rx_spinlock);    }}static voidSgRecvHandler(void *CallBackRef){    struct net_local *lp;    struct list_head* cur_lp;    spin_lock(&receivedQueueSpin);    lp = (struct net_local *) CallBackRef;    list_for_each (cur_lp, &receivedQueue) {        if (cur_lp == &(lp->rcv)) {            break;        }    }    if (cur_lp != &(lp->rcv)) {        list_add_tail(&lp->rcv, &receivedQueue);        XTemac_IntrSgDisable(&lp->Emac, XTE_RECV);        tasklet_schedule (&SgRecvBH);    }    spin_unlock(&receivedQueueSpin);}/* The callback function for errors. */static voidErrorHandler(void *CallbackRef, XStatus ErrClass, u32 Word1, u32 Word2){    struct net_device* dev;    struct net_local* lp;    int need_reset;    spin_lock(&XTE_spinlock);    dev = (struct net_device*) CallbackRef;    lp = (struct net_local*) dev->priv;    need_reset = status_requires_reset(ErrClass);    printk(KERN_ERR "%s: XTemac device error %d (%d, %d) %s\n",           dev->name, ErrClass, Word1, Word2, need_reset ? ", resetting device." : "");    if (need_reset)        reset(dev, __LINE__);    spin_unlock(&XTE_spinlock);}static intdescriptor_init(struct net_device *dev){    struct net_local *lp = (struct net_local *) dev->priv;    int recvsize, sendsize;    int dftsize;    u32 *recvpoolptr, *sendpoolptr;    void *recvpoolphy, *sendpoolphy;    XStatus result;    XDmaBdV3 bd_tx_template;    XDmaBdV3 bd_rx_template;    int XferType = XDMAV3_DMACR_TYPE_BFBURST_MASK;    int XferWidth = XDMAV3_DMACR_DSIZE_64_MASK;    /* calc size of descriptor space pool; alloc from non-cached memory */    dftsize = XDmaV3_mSgListMemCalc(ALIGNMENT_BD, XTE_RECV_BD_CNT + XTE_SEND_BD_CNT);    printk (KERN_INFO "XTemac: buffer descriptor size: %d (0x%0x)\n", dftsize, dftsize);#if BD_IN_BRAM == 0    lp->desc_space = dma_alloc_coherent(NULL, dftsize,                                        &lp->desc_space_handle, GFP_KERNEL);#else    lp->desc_space_handle = BRAM_BASEADDR;    lp->desc_space = ioremap(lp->desc_space_handle, dftsize);#endif    if (lp->desc_space == 0) {        return -1;    }    lp->desc_space_size = dftsize;    printk(KERN_INFO           "XTemac: (buffer_descriptor_init) phy: 0x%x, virt: 0x%x, size: 0x%x\n",           lp->desc_space_handle, (unsigned int)lp->desc_space,           lp->desc_space_size);    /* calc size of send and recv descriptor space */    recvsize = XDmaV3_mSgListMemCalc(ALIGNMENT_BD, XTE_RECV_BD_CNT);    sendsize = XDmaV3_mSgListMemCalc(ALIGNMENT_BD, XTE_SEND_BD_CNT);    recvpoolptr = lp->desc_space;    sendpoolptr = (void *) ((u32) lp->desc_space + recvsize);    recvpoolphy = (void *) lp->desc_space_handle;    sendpoolphy = (void *) ((u32) lp->desc_space_handle + recvsize);    /* set up descriptor spaces using a template */    /* rx template */    /*     * Create the ring for Rx descriptors.     * The following attributes will be in effect for all RxBDs     */    XDmaBdV3_mClear(&bd_rx_template);    XDmaBdV3_mSetLast(&bd_rx_template);          /* 1:1 mapping of BDs to buffers */    XDmaBdV3_mSetBufIncrement(&bd_rx_template);  /* Buffers exist along incrementing                                                    addresses */    XDmaBdV3_mSetBdPage(&bd_rx_template, 0);     /* Default to 32 bit addressing */    XDmaBdV3_mSetBufAddrHigh(&bd_rx_template, 0);/* Default to 32 bit addressing */    XDmaBdV3_mSetDevSel(&bd_rx_template, 0);     /* Always 0 */    XDmaBdV3_mSetTransferType(&bd_rx_template, XferType, XferWidth);  /* Data bus                                                                         attributes */    /* tx template */    /*     * Create the ring for Tx descriptors. If no Tx DRE then buffers must occupy     * a single descriptor, so set the "last" field for all descriptors.     */    XDmaBdV3_mClear(&bd_tx_template);    XDmaBdV3_mUseDre(&bd_tx_template);           /* Always use DRE if available */    XDmaBdV3_mSetBufIncrement(&bd_tx_template);  /* Buffers exist along incrementing                                                    addresses */    XDmaBdV3_mSetBdPage(&bd_tx_template, 0);     /* Default to 32 bit addressing */    XDmaBdV3_mSetBufAddrHigh(&bd_tx_template, 0);/* Default to 32 bit addressing */    XDmaBdV3_mSetDevSel(&bd_tx_template, 0);     /* Always 0 */    XDmaBdV3_mSetTransferType(&bd_tx_template, XferType, XferWidth);  /* Data bus                                                                         attributes */    XTemac_mSgSendBdCsumDisable(&bd_tx_template); /* Disable csum offload by default*/    XTemac_mSgSendBdCsumSeed(&bd_tx_template, 0); /* Don't need csum seed feature */    if (XTemac_mIsTxDre(&lp->Emac) == FALSE)    {        XDmaBdV3_mSetLast(&bd_tx_template);    }    if ((result = XTemac_SgSetSpace(&lp->Emac, XTE_RECV, (u32)recvpoolphy,                    (u32)recvpoolptr, ALIGNMENT_BD, XTE_RECV_BD_CNT,                    &bd_rx_template)) != XST_SUCCESS) {        printk (KERN_ERR "%s: XTemac: SgSetSpace RECV ERROR %d\n", dev->name, result);        return -EIO;    }    if ((result = XTemac_SgSetSpace(&lp->Emac, XTE_SEND, (u32)sendpoolphy,                    (u32)sendpoolptr, ALIGNMENT_BD, XTE_SEND_BD_CNT,                    &bd_tx_template)) != XST_SUCCESS) {        printk (KERN_ERR "%s: XTemac: SgSetSpace SEND ERROR %d\n", dev->name, result);        return -EIO;    }    _xenet_SgSetupRecvBuffers(dev);    return 0;}/* * If DRE is not enabled, allocate a ring buffer to use to aid in transferring * aligned packets for DMA. */static inttx_ring_buffer_init(struct net_device *dev, unsigned max_frame_size){    struct net_local *lp = (struct net_local *) dev->priv;    int idx;    lp->tx_buffers_cur  = -1;    /* pre-initialize values. The error handling code relies on those. */    lp->tx_buffers      = NULL;    lp->tx_orig_buffers = NULL;    lp->tx_phys_buffers = NULL;    idx = -1;    if (XTemac_mIsTxDre(&lp->Emac) == FALSE) {        /* Allocate the space for the buffer pointer array.         */        lp->tx_orig_buffers = vmalloc(sizeof(void *)     * XTE_SEND_BD_CNT);        lp->tx_phys_buffers = vmalloc(sizeof(dma_addr_t) * XTE_SEND_BD_CNT);        lp->tx_buffers      = vmalloc(sizeof(void *)     * XTE_SEND_BD_CNT);        /* Handle allocation error         */        if ((!lp->tx_orig_buffers) || (!lp->tx_buffers) || (!lp->tx_phys_buffers)) {            printk(KERN_ERR "XTemac: Could not vmalloc descriptor pointer arrays.\n");            goto error;        }        /* Now, allocate the actual buffers.         */        for (idx = 0; idx < XTE_SEND_BD_CNT; idx++) {            lp->tx_orig_buffers[idx] = dma_alloc_coherent(NULL,                                                          max_frame_size + ALIGNMENT_SEND_PERF,                                                          &lp->tx_phys_buffers[idx], GFP_KERNEL);            /* Handle allocation error.             */            if (!lp->tx_orig_buffers[idx]) {                printk(KERN_ERR "XTemac: Could not alloc TX buffer %d (%d bytes). "                                "Cleaning up.\n", idx, max_frame_size + ALIGNMENT_SEND_PERF);                goto error;            }            lp->tx_buffers[idx] = lp->tx_orig_buffers[idx] +                                    BUFFER_ALIGNSEND_PERF(lp->tx_orig_buffers[id

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -