📄 dec21x4xend.c
字号:
/* Successful return */ return (&pDrvCtrl->endObj); /***** Handle error cases *****/error: { dec21x4xUnload (pDrvCtrl); return (NULL); } }/********************************************************************************* dec21x4xUnload - unload a driver from the system** This routine deallocates lists, and free allocated memory.** RETURNS: OK, always.*/LOCAL STATUS dec21x4xUnload ( DRV_CTRL * pDrvCtrl ) { DRV_LOG (DRV_DEBUG_LOAD, "EndUnload\n", 0, 0, 0, 0, 0, 0); /* deallocate lists */ END_OBJ_UNLOAD (&pDrvCtrl->endObj); /* deallocate allocated shared memory */ if (DRV_FLAGS_ISSET (DEC_MEMOWN) && pDrvCtrl->memBase) cacheDmaFree (pDrvCtrl->memBase); return (OK); }/********************************************************************************* dec21x4xInitParse - parse parameter values from initString** Parse the input string. Fill in values in the driver control structure.** The initialization string format is:* "<device addr>:<PCI addr>:<ivec>:<ilevel>:<mem base>:<mem size>: \* <user flags>:<offset>"** .IP <device addr>* base address of hardware device registers* .IP <PCI addr>* main memory address over the PCI bus* .IP <ivec>* interrupt vector number* .IP <ilevel>* interrupt level* .IP <mem base>* base address of a DMA-able, cache free,pre-allocated memory* .IP <mem size>* size of the pre-allocated memory* .IP <user flags>* User flags control the run-time characteristics of the chip* .IP <offset>* Memory offset for alignment* .LP** RETURNS: OK or ERROR for invalid arguments.*/LOCAL STATUS dec21x4xInitParse ( DRV_CTRL * pDrvCtrl, char * initString ) { char * tok; /* an initString token */ char * holder=NULL; /* points to initString fragment beyond tok */ DRV_LOG (DRV_DEBUG_LOAD, "InitParse: Initstr=%s\n", (int)initString, 0, 0, 0, 0, 0); tok = strtok_r(initString, ":", &holder); if (tok == NULL) return ERROR; pDrvCtrl->unit = atoi(tok); tok=strtok_r(NULL, ":", &holder); if (tok == NULL) return ERROR; pDrvCtrl->devAdrs = strtoul (tok, NULL, 16); tok=strtok_r(NULL, ":", &holder); if (tok == NULL) return ERROR; pDrvCtrl->pciMemBase = strtoul (tok, NULL, 16); tok=strtok_r(NULL, ":", &holder); if (tok == NULL) return ERROR; pDrvCtrl->ivec = strtoul (tok, NULL, 16); tok=strtok_r(NULL, ":", &holder); if (tok == NULL) return ERROR; pDrvCtrl->ilevel = strtoul (tok, NULL, 16); tok=strtok_r(NULL, ":", &holder); if (tok == NULL) return ERROR; pDrvCtrl->memBase = (char *) strtoul (tok, NULL, 16); tok=strtok_r(NULL, ":", &holder); if (tok == NULL) return ERROR; pDrvCtrl->memSize = strtoul (tok, NULL, 16); tok=strtok_r(NULL, ":", &holder); if (tok == NULL) return (ERROR); pDrvCtrl->usrFlags = strtoul(tok, NULL, 16); tok = strtok_r (NULL, ":", &holder); if (tok == NULL) return ERROR; pDrvCtrl->offset = atoi (tok); /* decode non-register user flags */ if (pDrvCtrl->usrFlags & DEC_USR_XEA) DRV_FLAGS_SET (DEC_BSP_EADRS); switch (pDrvCtrl->usrFlags & DEC_USR_VER_MSK) { case DEC_USR_21143 : DRV_FLAGS_SET (DEC_21143); break; case DEC_USR_21140 : DRV_FLAGS_SET (DEC_21140); break; default : DRV_FLAGS_SET (DEC_21040); break; } /* print debug info */ DRV_LOG (DRV_DEBUG_LOAD, "EndLoad: unit=%d devAdrs=%#x ivec=%#x ilevel=%#x " "membase=%#x memSize=%#x\n", pDrvCtrl->unit, pDrvCtrl->devAdrs, pDrvCtrl->ivec, pDrvCtrl->ilevel, (int)pDrvCtrl->memBase, pDrvCtrl->memSize); DRV_LOG (DRV_DEBUG_LOAD, "pciMemBase=%#x flags=%#x usrFlags=%#x offset=%#x\n", (int)pDrvCtrl->pciMemBase, pDrvCtrl->flags, pDrvCtrl->usrFlags, pDrvCtrl->offset, 0, 0); return (OK); }/********************************************************************************* dec21x4xInitMem - initialize memory** Using data in the control structure, setup and initialize the memory* areas needed. If the memory address is not already specified, then allocate* cache safe memory.** RETURNS: OK or ERROR*/LOCAL STATUS dec21x4xInitMem ( DRV_CTRL * pDrvCtrl ) { DEC_RD * pRxD = pDrvCtrl->rxRing; DEC_TD * pTxD = pDrvCtrl->txRing; M_CL_CONFIG dcMclBlkConfig; CL_DESC clDesc; /* cluster description */ char * pBuf; int ix; int sz; char * pShMem; /* Establish size of shared memory region we require */ DRV_LOG (DRV_DEBUG_LOAD, "InitMem\n", 0, 0, 0, 0, 0, 0); if ((int)pDrvCtrl->memBase != NONE) /* specified memory pool */ { sz = ((pDrvCtrl->memSize - (RD_SIZ + TD_SIZ)) / (((2 + NUM_LOAN) * DEC_BUFSIZ) + RD_SIZ + TD_SIZ)); pDrvCtrl->numRds = max (sz, MIN_RDS); pDrvCtrl->numTds = max (sz, MIN_TDS); } else { pDrvCtrl->numRds = NUM_RDS_DEF; pDrvCtrl->numTds = NUM_TDS_DEF; } /* Establish a region of shared memory */ /* * OK. We now know how much shared memory we need. If the caller * provides a specific memory region, we check to see if the provided * region is large enough for our needs. If the caller did not * provide a specific region, then we attempt to allocate the memory * from the system, using the cache aware allocation system call. */ switch ((int)pDrvCtrl->memBase) { default : /* caller provided memory */ sz = ((pDrvCtrl->numRds * (DEC_BUFSIZ + RD_SIZ + 8)) + 4 + (pDrvCtrl->numTds * (DEC_BUFSIZ + TD_SIZ + 8)) + 4 + (NUM_LOAN * (DEC_BUFSIZ + 8)) + 4); if ((int) pDrvCtrl->memSize < sz ) /* not enough space */ { LOG_MSG( "%s%d: not enough memory provided\n", (int) DRV_NAME, pDrvCtrl->unit, 0, 0, 0, 0); return ( ERROR ); } pShMem = pDrvCtrl->memBase; /* set the beginning of pool */ /* assume pool is cache coherent, copy null structure */ pDrvCtrl->cacheFuncs = cacheNullFuncs; break; case NONE : /* get our own memory */ /* * Because the structures that are shared between the device * and the driver may share cache lines, the possibility exists * that the driver could flush a cache line for a structure and * wipe out an asynchronous change by the device to a neighboring * structure. Therefore, this driver cannot operate with memory * that is not write coherent. We check for the availability of * such memory here, and abort if the system did not give us what * we need. */ if (!CACHE_DMA_IS_WRITE_COHERENT ()) { LOG_MSG ( "%s%d: device requires cache coherent memory\n", (int) DRV_NAME, pDrvCtrl->unit, 0, 0, 0, 0); return (ERROR); } sz = (((pDrvCtrl->numRds + 1) * RD_SIZ) + ((pDrvCtrl->numTds + 1) * TD_SIZ)); pDrvCtrl->memBase = pShMem = (char *) cacheDmaMalloc ( sz ); if ((int)pShMem == NULL) { LOG_MSG ( "%s%d - system memory unavailable\n", (int) DRV_NAME, pDrvCtrl->unit, 0, 0, 0, 0); return (ERROR); } pDrvCtrl->memSize = sz; DRV_FLAGS_SET (DEC_MEMOWN); /* copy the DMA structure */ pDrvCtrl->cacheFuncs = cacheDmaFuncs; break; } /* zero the shared memory */ memset (pShMem, 0, (int) sz); /* carve Rx memory structure */ pRxD = pDrvCtrl->rxRing = (DEC_RD *) (((int)pShMem + 0x03) & ~0x03); /* carve Tx memory structure */ pTxD = pDrvCtrl->txRing = (DEC_TD *) (pDrvCtrl->rxRing + pDrvCtrl->numRds); /* Initialize net buffer pool for tx/rx buffers */ memset ((char *)&dcMclBlkConfig, 0, sizeof(dcMclBlkConfig)); memset ((char *)&clDesc, 0, sizeof(clDesc)); dcMclBlkConfig.mBlkNum = pDrvCtrl->numRds * 4; clDesc.clNum = pDrvCtrl->numRds + pDrvCtrl->numTds + NUM_LOAN; dcMclBlkConfig.clBlkNum = clDesc.clNum; /* * mBlk and cluster configuration memory size initialization * memory size adjusted to hold the netPool pointer at the head. */ dcMclBlkConfig.memSize = ((dcMclBlkConfig.mBlkNum * (MSIZE + sizeof (long))) + (dcMclBlkConfig.clBlkNum * (CL_BLK_SZ + sizeof (long)))); if ((dcMclBlkConfig.memArea = (char *)memalign(sizeof (long), dcMclBlkConfig.memSize)) == NULL) return (ERROR); clDesc.clSize = DEC_BUFSIZ; clDesc.memSize = ((clDesc.clNum * (clDesc.clSize + 4)) + 4); if (DRV_FLAGS_ISSET(DEC_MEMOWN)) { clDesc.memArea = (char *) cacheDmaMalloc (clDesc.memSize); if ((int)clDesc.memArea == NULL) { LOG_MSG ( "%s%d - system memory unavailable\n", (int) DRV_NAME, pDrvCtrl->unit, 0, 0, 0, 0); return (ERROR); } } else clDesc.memArea = (char *) (pDrvCtrl->txRing + pDrvCtrl->numTds); if ((pDrvCtrl->endObj.pNetPool = malloc (sizeof(NET_POOL))) == NULL) return (ERROR); /* Initialize the net buffer pool with transmit buffers */ if (netPoolInit (pDrvCtrl->endObj.pNetPool, &dcMclBlkConfig, &clDesc, 1, NULL) == ERROR) { LOG_MSG ("%s%d - netPoolInit failed\n", (int) DRV_NAME, pDrvCtrl->unit, 0, 0, 0, 0); return (ERROR); } /* Save the cluster pool id */ pDrvCtrl->clPoolId = clPoolIdGet (pDrvCtrl->endObj.pNetPool, DEC_BUFSIZ, FALSE); /* Clear all indices */ pDrvCtrl->rxIndex=0; pDrvCtrl->txIndex=0; pDrvCtrl->txDiIndex=0; /* Setup the receive ring */ for (ix = 0; ix < pDrvCtrl->numRds; ix++, pRxD++) { pBuf = (char *) NET_BUF_ALLOC(); if (pBuf == NULL) { LOG_MSG ("%s%d - netClusterGet failed\n", (int) DRV_NAME, pDrvCtrl->unit, 0, 0, 0, 0); return (ERROR); } pRxD->rDesc2 = PCISWAP (DEC_VIRT_TO_PCI (pBuf)); /* buffer 1 */ pRxD->rDesc3 = 0; /* no second buffer */ /* buffer size */ pRxD->rDesc1 = PCISWAP (RDESC1_RBS1_VAL (DEC_BUFSIZ) | RDESC1_RBS2_VAL (0)); if (ix == (pDrvCtrl->numRds - 1)) /* if its is last one */ pRxD->rDesc1 |= PCISWAP (RDESC1_RER); /* end of receive ring */ pRxD->rDesc0 = PCISWAP (RDESC0_OWN); /* give ownership to chip */ } /* Setup the transmit ring */ for (ix = 0; ix < pDrvCtrl->numTds; ix++, pTxD++) { /* empty -- no buffers at this time */ pTxD->tDesc2 = 0; pTxD->tDesc3 = 0; pTxD->tDesc1 = PCISWAP ((TDESC1_TBS1_PUT(0) | /* buffer1 size */ TDESC1_TBS2_PUT(0) | /* buffer2 size */ TDESC1_IC | /* intrpt on xmit */ TDESC1_LS | /* last segment */ TDESC1_FS)); /* first segment */ if (ix == (pDrvCtrl->numTds - 1)) /* if its is last one */ pTxD->tDesc1 |= PCISWAP (TDESC1_TER); /* end of Xmit ring */ pTxD->tDesc0 = 0; /* owner is host */ } /* Flush the write pipe */ CACHE_PIPE_FLUSH (); return (OK); }/********************************************************************************* dec21x4xStart - start the device** This function initializes the device and calls BSP functions to connect* interrupts and start the device running in interrupt mode.** The complement of this routine is dec21x4xStop. Once a unit is reset by* dec21x4xStop, it may be re-initialized to a running state by this routine.** RETURNS: OK if successful, otherwise ERROR*/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -