📄 core_exp.c
字号:
pCore->Max_Io = maxIo; if ( maxIo==1 ) pCore->Is_Dump = MV_TRUE; else pCore->Is_Dump = MV_FALSE; switch( pCore->Device_Id) { case DEVICE_ID_6320: pCore->SATA_Port_Num = 2; pCore->Phy_Num = 2; pCore->MaxRegisterSet = 16; pCore->Port_Num = 0; pCore->MaxCmdSlotWidth = 9;#ifndef _OS_BIOS MV_PRINT("DEVICE_ID_6320 is found.\n");#endif break; case DEVICE_ID_6340: pCore->SATA_Port_Num = 4; pCore->Phy_Num = 4; pCore->MaxRegisterSet = 16; pCore->Port_Num = 0; pCore->MaxCmdSlotWidth = 9;#ifndef _OS_BIOS MV_PRINT("DEVICE_ID_6340 is found.\n");#endif break; case DEVICE_ID_6440: pCore->SATA_Port_Num = 4; pCore->Phy_Num = 4; pCore->MaxRegisterSet = 16; pCore->Port_Num = 0; pCore->MaxCmdSlotWidth = 9;#ifndef _OS_BIOS MV_PRINT("DEVICE_ID_6440 is found. Rev %x\n", pCore->Revision_Id);#endif break; case DEVICE_ID_6480: pCore->SATA_Port_Num = 8; pCore->Phy_Num = 8; pCore->MaxRegisterSet = 32; pCore->Port_Num = 0; pCore->MaxCmdSlotWidth = 10;#ifndef _OS_BIOS MV_PRINT("DEVICE_ID_6480 is found.\n");#endif break; }// temp = ROUNDING( (MV_PTR_INTEGER)This + ROUNDING(sizeof(Core_Driver_Extension),8), 8 ); temp = (MV_PTR_INTEGER) mod_desc->extension + ROUNDING(sizeof(Core_Driver_Extension),8); /* allocate memory for Domain_Device */ pCore->Devices = (PDomain_Device)temp; temp += ROUNDING(sizeof(Domain_Device) * devCount, 8); /* allocate memory for Domain_Expander */ pCore->Expanders = (PDomain_Expander)temp; temp += ROUNDING(sizeof(Domain_Expander) * expanderCount, 8);#ifdef SUPPORT_PM /* allocate memory for Domain_PM */ pCore->PMs = (PDomain_PM)temp; temp += ROUNDING(sizeof(Domain_PM) * pmCount, 8);#endif /* SUPPORT_PM */ /* allocate memory for Running_Req */ pCore->Running_Req = (PMV_Request *)(temp); temp += ROUNDING(sizeof(PMV_Request) * slotCount, 8); /* If performance mode, allocate rounting table for expander */ if (maxIo != 1) { for (i = 0; i < expanderCount; i++) { pCore->Expanders[i].Route_Table = (struct _Route_Table_Entry *)temp; temp += ROUNDING(sizeof(struct _Route_Table_Entry) * (MAX_PHY_NUM * MAX_ROUTE_INDEX), 8); } }#if !__RES_MGMT__ sgSize = sizeof(MV_SG_Entry) * sgEntryCount; tmpSG = temp; temp = temp + sgSize * internalReqCount; for ( i=0; i<internalReqCount; i++ ) { pReq = (PMV_Request)temp; pReq->SG_Table.Entry_Ptr = (PMV_SG_Entry)tmpSG; pReq->SG_Table.Max_Entry_Count = sgEntryCount; List_AddTail(&pReq->Queue_Pointer, &pCore->Internal_Req_List); tmpSG += sgSize; temp += MV_REQUEST_SIZE; /* MV_Request is 64bit aligned. */ } #else /* !__RES_MGMT__ */ pCore->req_pool = (MV_PVOID) res_reserve_req_pool(MODULE_CORE, internalReqCount, sgEntryCount);#endif /* !__RES_MGMT__ */#ifdef CORE_SAS_SUPPORT_ATA_COMMAND for ( i=0; i<contextCount; i++ ) { pContext = (PCORE_CONTEXT)temp; pContext->Context_Type = CORE_CONTEXT_TYPE_NONE; List_AddTail(&pContext->Queue_Pointer, &pCore->Context_List); temp += sizeof(CORE_CONTEXT); }#endif#ifdef SUPPORT_LARGE_REQUEST MV_LIST_HEAD_INIT(&pCore->Sub_Req_List);#endif for ( i=0; i<SATAScratchCount; i++ ) { pSATASB = (PSATA_Scratch_Buffer)temp; List_AddTail(&pSATASB->Queue_Pointer, &pCore->SATA_Scratch_List); temp += sizeof(SATA_Scratch_Buffer); } for ( i=0; i<SMPScratchCount; i++ ) { pSMPSB = (PSMP_Scratch_Buffer)temp; List_AddTail(&pSMPSB->Queue_Pointer, &pCore->SMP_Scratch_List); temp += sizeof(SMP_Scratch_Buffer); } for ( i=0; i<sgBufferCount; i++ ) { pSGBuf = (PSG_Buffer)temp; List_AddTail(&pSGBuf->Queue_Pointer, &pCore->SG_Buffer_List); temp += sizeof(SG_Buffer); } /* Initialize pools */ /* Tag pools */ /* Special case here: In hibernation we must handle only one request at a time, but we also need 2 slots in order for completion queue entry pointer to work properly. Therefore, slotCount will be allocated to 2 during hibernation but we only give one tag in the pool; same for XOR slots */ pCore->Tag_Pool.Stack = (MV_PU16)temp; if( maxIo == 1 ) { pCore->Tag_Pool.Size = 1; temp += sizeof(MV_U16) * 1; } else { pCore->Tag_Pool.Size = slotCount; temp += sizeof(MV_U16) * slotCount; } Tag_Init(&pCore->Tag_Pool, pCore->Tag_Pool.Size); /* Device Pool */ pCore->Device_Pool.Stack = (MV_PU16)temp; pCore->Device_Pool.Size = devCount; temp += sizeof(MV_U16) * devCount; Tag_Init(&pCore->Device_Pool, pCore->Device_Pool.Size); /* Expander Pool */ pCore->Expander_Pool.Stack = (MV_PU16)temp; pCore->Expander_Pool.Size = expanderCount; temp += sizeof(MV_U16) * expanderCount; Tag_Init(&pCore->Expander_Pool, pCore->Expander_Pool.Size);#ifdef SUPPORT_PM /* PM Pool */ pCore->PM_Pool.Stack = (MV_PU16)temp; pCore->PM_Pool.Size = pmCount; temp += sizeof(MV_U16) * pmCount; Tag_Init(&pCore->PM_Pool, pCore->PM_Pool.Size);#endif /* SUPPORT_PM */ /* Port Pool */ pCore->Port_Pool.Stack = (MV_PU16)temp; pCore->Port_Pool.Size = MAX_PORT_NUMBER; temp += sizeof(MV_U16) * MAX_PORT_NUMBER; Tag_Init(&pCore->Port_Pool, MAX_PORT_NUMBER); #ifdef SUPPORT_CONSOLIDATE if ( pCore->Is_Dump ) { pCore->pConsolid_Device = NULL; pCore->pConsolid_Extent = NULL; } else { /* Allocate resources for Consolidate_Extension->Requests[]. */ pCore->pConsolid_Extent = (PConsolidate_Extension)(temp); temp += ROUNDING(sizeof(Consolidate_Extension),8); pCore->pConsolid_Device = (PConsolidate_Device)temp; temp += ROUNDING(sizeof(Consolidate_Device), 8) * devCount; //Initialize some fields for pCore->pConsolid_Extent->Requests[i] tmpSG = temp; temp = temp + sgSize * slotCount; pCore->pConsolid_Extent->Requests = (PMV_Request)(temp); for (i=0; i<slotCount; i++) { pReq = &pCore->pConsolid_Extent->Requests[i]; pReq->SG_Table.Max_Entry_Count = sgEntryCount; pReq->SG_Table.Entry_Ptr = (PMV_SG_Entry)tmpSG; tmpSG += sgSize; temp += sizeof(MV_Request); } Consolid_InitializeExtension(This, slotCount); for ( i=0; i<devCount; i++ ) Consolid_InitializeDevice(This, i); }#endif#ifndef SOFTWARE_XOR /* allocate memory for XOR_Running_Req */ pCore->XOR_Running_Req = (PMV_XOR_Request *)(temp); temp += ROUNDING(sizeof(PMV_XOR_Request) * slotCount, 8);#ifdef RAID6_HARDWARE_XOR /* XOR Table wrapper*/ for ( i=0; i<slotCount*MAX_XOR_TABLE_PER_XOR_REQUEST; i++ ) { pXORTableWrapper = (PXOR_Table_Wrapper)temp; List_AddTail(&pXORTableWrapper->Queue_Pointer, &pCore->XOR_Table_List); temp += sizeof(XOR_Table_Wrapper); } /* XOR Request Context */ for ( i=0; i<slotCount; i++ ){ pXORContext = (PCORE_XOR_CONTEXT)temp; pXORContext->Context_Type = CORE_XOR_CONTEXT_TYPE_TABLE; pXORContext->Table_Wrapper = NULL; List_AddTail(&pXORContext->Queue_Pointer, &pCore->XOR_Context_List); temp += sizeof(CORE_XOR_CONTEXT); }#endif /* XOR Tag Pool */ pCore->XOR_Tag_Pool.Stack = (MV_PU16)temp; if( maxIo == 1 ) { pCore->XOR_Tag_Pool.Size = 1; temp += sizeof(MV_U16) * 1; } else { pCore->XOR_Tag_Pool.Size = slotCount; temp += sizeof(MV_U16) * slotCount; } Tag_Init(&pCore->XOR_Tag_Pool, pCore->XOR_Tag_Pool.Size);#endif /* Rest of memory will be used for discovery */ pCore->pDiscoverBuffer = (MV_PVOID)temp; usedSize = (MV_U32)(temp - (MV_PTR_INTEGER) mod_desc->extension); DISC_SetResource(pCore->pDiscoverBuffer, mod_desc->extension_size - usedSize); /* Port_Map and Port_Num will be read from the register */ /* Init port data structure */ for ( i=0; i<MAX_PORT_NUMBER; i++ ) { port = &pCore->Ports[i]; port->Id = (MV_U8) i; port->Port_State = PORT_STATE_IDLE; port->DiscoveryInstance = 0; port->Core_Extension = pCore; port->Device_Number = 0; port->Expander_Number=0; MV_LIST_HEAD_INIT( &port->pExpTreeRoot); MV_LIST_HEAD_INIT( &port->Device_List ); MV_LIST_HEAD_INIT( &port->Expander_List ); //TBD: PORT TYPE will be determined after InitChip. port->Type=PORT_TYPE_SAS; /*default*/ } pCore->Port_Num = 0; /* Init device map structure */ for (i=0; i<MAX_PORT_ID; i++) pCore->Port_Map[i] = ID_NOT_MAPPED; /* Init device data structure */ for (i=0; i<devCount; i++) { pCore->Devices[i].Status = DEVICE_STATUS_NO_DEVICE; pCore->Devices[i].State = DEVICE_STATE_IDLE;#ifdef SUPPORT_TIMER pCore->Devices[i].Timer_ID = NO_CURRENT_TIMER;#endif#ifdef USE_DYN_REGISTER_SET pCore->Devices[i].Register_Set = ID_NOT_MAPPED;#endif pCore->Devices[i].Timeout_Count = 0; pCore->Devices[i].PM_Number = ID_NOT_MAPPED; pCore->Devices[i].Retry_Count = 0; MV_LIST_HEAD_INIT(&pCore->Devices[i].Sent_Req_List); } pCore->Current_Device_Id = 0; /* Init device map structure, here we have to use maximum number */#ifdef SUPPORT_PM for (i=0; i<MAX_PM_ID; i++) pCore->Device_Map[i] = ID_NOT_MAPPED;#else for (i=0; i<MAX_EXPANDER_ID; i++) pCore->Device_Map[i] = ID_NOT_MAPPED;#endif /* Init Expander data structure */ for (i=0; i<expanderCount; i++) { pCore->Expanders[i].Device_Number=0; pCore->Expanders[i].Phy_Count=0; pCore->Expanders[i].pParent = NULL; MV_LIST_HEAD_INIT( &pCore->Expanders[i].pChild); MV_LIST_HEAD_INIT( &pCore->Expanders[i].pSibling); pCore->Expanders[i].pPort=(MV_PVOID)port; MV_LIST_HEAD_INIT( &pCore->Expanders[i].Device_List ); } pCore->Current_Expander_Id = MIN_EXPANDER_ID;#ifdef SUPPORT_PM /* Init PM data structure */ for (i = 0; i < pmCount; i++) { pCore->PMs[i].Status = PM_STATUS_RESET; pCore->PMs[i].State = PM_STATE_IDLE;#ifdef SUPPORT_TIMER pCore->PMs[i].Timer_ID = NO_CURRENT_TIMER;#endif#ifdef USE_DYN_REGISTER_SET pCore->PMs[i].Register_Set = ID_NOT_MAPPED;#endif MV_LIST_HEAD_INIT(&pCore->PMs[i].Sent_Req_List); pCore->PMs[i].pPort = (MV_PVOID)port; } pCore->Current_PM_Id = MIN_PM_ID;#endif for (i=0; i<16; i++) { pCore->Running_Slot[i] = 0; pCore->Resetting_Slot[i] = 0; } pCore->LastDELV_Q=0xfff; pCore->LastCMPL_Q=0xfff;#ifndef SOFTWARE_XOR pCore->XOR_LastDELV_Q=0xfff; pCore->XOR_LastCMPL_Q=0xfff; MV_LIST_HEAD_INIT(&pCore->XOR_Waiting_List);#endif size = pCore->desc->ops->get_res_desc(RESOURCE_UNCACHED_MEMORY, maxIo); if (HBA_GetResource(pCore->desc, RESOURCE_UNCACHED_MEMORY, size, &dmaResource)) { pCore->State = CORE_STATE_ZOMBIE; return; } memVir = dmaResource.Virtual_Address; memDMA = dmaResource.Physical_Address; /* Reset memory*/ MV_ZeroMemory(memVir, dmaResource.Byte_Size); /* Assign uncached memory for command list (64 byte align) */ offset = (MV_U32)(ROUNDING(memDMA.value,64)-memDMA.value); memDMA = U64_ADD_U32(memDMA, offset); memVir = (MV_PU8)memVir + offset; pCore->Cmd_List_DMA=memDMA; pCore->Cmd_List=memVir; memVir = (MV_PU8)memVir + sizeof(MV_Command_Header) * slotCount; memDMA = U64_ADD_U32(memDMA, sizeof(MV_Command_Header) * slotCount); /* Assign uncached memory for received FIS (256 byte align) */ offset = (MV_U32)(ROUNDING(memDMA.value,256)-memDMA.value); memDMA = U64_ADD_U32(memDMA, offset); memVir = (MV_PU8)memVir + offset; pCore->RX_FIS = memVir; pCore->RX_FIS_DMA = memDMA; if (pCore->Is_Dump) usedSize = RX_FIS_SIZE; else usedSize = RX_FIS_POOL_SIZE; memVir = (MV_PU8)memVir + usedSize; memDMA = U64_ADD_U32(memDMA, usedSize); /* Assign the 32 command tables. (128 byte align) */ offset = (MV_U32)(ROUNDING(memDMA.value,128)-memDMA.value); memDMA = U64_ADD_U32(memDMA, offset); memVir = (MV_PU8)memVir + offset; pCore->Cmd_Table = memVir; pCore->Cmd_Table_DMA = memDMA; memVir = (MV_PU8)memVir + sizeof(MV_Command_Table) * slotCount; memDMA = U64_ADD_U32(memDMA, sizeof(MV_Command_Table) * slotCount); /* Assign delivery queue. */ offset = (MV_U32)(ROUNDING(memDMA.value,8)-memDMA.value); memDMA = U64_ADD_U32(memDMA, offset); memVir = (MV_PU8)memVir + offset; pCore->DELV_Q_ = memVir; pCore->DELV_Q_DMA = memDMA; memVir = (MV_PU8)memVir + sizeof(DELIVERY_QUEUE_ENTRY) * slotCount; memDMA = U64_ADD_U32(memDMA, sizeof(DELIVERY_QUEUE_ENTRY) * slotCount); /* Assign completion queue. */ offset = (MV_U32)(ROUNDING(memDMA.value,8)-memDMA.value); memDMA = U64_ADD_U32(memDMA, offset); memVir = (MV_PU8)memVir + offset; pCore->CMPL_Q = memVir; pCore->CMPL_Q_DMA = memDMA; memVir = (MV_PU8)memVir + sizeof(COMPLETION_QUEUE_ENTRY) * (slotCount+1); memDMA = U64_ADD_U32(memDMA, sizeof(COMPLETION_QUEUE_ENTRY) * (slotCount+1));/* Assign the scratch buffer (8 byte align) Number of Taret/Device can not be determined until */ offset = (MV_U32)(ROUNDING(memDMA.value,8)-memDMA.value); memDMA = U64_ADD_U32(memDMA, offset); memVir = (MV_PU8)memVir + offset; for ( i=0; i<SATAScratchCount; i++ ) { pSATASB = (PSATA_Scratch_Buffer)List_GetFirstEntry(&pCore->SATA_Scratch_List, SATA_Scratch_Buffer, Queue_Pointer); pSATASB->Scratch_Vir = memVir; pSATASB->Scratch_DMA = memDMA; List_AddTail(&pSATASB->Queue_Pointer, &pCore->SATA_Scratch_List); memVir = (MV_PU8)memVir + SATA_SCRATCH_BUFFER_SIZE; memDMA = U64_ADD_U32(memDMA, SATA_SCRATCH_BUFFER_SIZE); }/* Assign the SG buffer */ offset = (MV_U32)(ROUNDING(memDMA.value,8)-memDMA.value); memDMA = U64_ADD_U32(memDMA, offset); memVir = (MV_PU8)memVir + offset; for ( i=0; i<sgBufferCount; i++ ) { pSGBuf = (PSG_Buffer)List_GetFirstEntry(&pCore->SG_Buffer_List, SG_Buffer, Queue_Pointer); pSGBuf->Buffer_Vir = memVir;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -