⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 core_exp.c

📁 6440linuxDriver的源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		pSGBuf->Buffer_DMA = memDMA;		List_AddTail(&pSGBuf->Queue_Pointer, &pCore->SG_Buffer_List);		memVir = (MV_PU8)memVir + SG_BUFFER_SIZE;		memDMA = U64_ADD_U32(memDMA, SG_BUFFER_SIZE);	}#ifndef SOFTWARE_XOR	/* Assign uncached memory for XOR command list (32 byte align) */	offset = (MV_U32)(ROUNDING(memDMA.value,32)-memDMA.value);	memDMA = U64_ADD_U32(memDMA, offset);	memVir = (MV_PU8)memVir + offset;	pCore->XOR_Cmd_List_DMA=memDMA;	pCore->XOR_Cmd_List=memVir;	if( (pCore->Device_Id==DEVICE_ID_6440)&&(pCore->Revision_Id==0x0) )	{		memVir = (MV_PU8)memVir + sizeof(MV_XOR_Command_Header_A1) * slotCount;		memDMA = U64_ADD_U32(memDMA, sizeof(MV_XOR_Command_Header_A1) * slotCount);	}	else	{		memVir = (MV_PU8)memVir + sizeof(MV_XOR_Command_Header) * slotCount;		memDMA = U64_ADD_U32(memDMA, sizeof(MV_XOR_Command_Header) * slotCount);	}	if( (pCore->Device_Id==DEVICE_ID_6440)&&(pCore->Revision_Id==0x0) )	{	/* Assign the MAX_XOR_CMD_ENTRY XOR command tables. ((MAX_XOR_CMD_ENTRY+1)*12) byte align) */		offset = (MV_U32)(ROUNDING(memDMA.value,(MAX_XOR_CMD_ENTRY+1)*12)-memDMA.value);	}	else	{	/* Assign the MAX_XOR_CMD_ENTRY XOR command tables. ((MAX_XOR_CMD_ENTRY+1)*16) byte align) */		offset = (MV_U32)(ROUNDING(memDMA.value,(MAX_XOR_CMD_ENTRY+1)*16)-memDMA.value);	}	memDMA = U64_ADD_U32(memDMA, offset);	memVir = (MV_PU8)memVir + offset;	pCore->XOR_Cmd_Table = memVir;	pCore->XOR_Cmd_Table_DMA = memDMA;	xorTableCount = slotCount * MAX_XOR_TABLE_PER_XOR_REQUEST;	if( (pCore->Device_Id==DEVICE_ID_6440)&&(pCore->Revision_Id==0x0) )	{		xorTableSize = sizeof(MV_XOR_Command_Table_A1);		//memVir = (MV_PU8)memVir + sizeof(MV_XOR_Command_Table_A1) * xorTableCount;		//memDMA = U64_ADD_U32(memDMA, sizeof(MV_XOR_Command_Table_A1) * xorTableCount);	}	else	{		xorTableSize = sizeof(MV_XOR_Command_Table);		//memVir = (MV_PU8)memVir + sizeof(MV_XOR_Command_Table) * xorTableCount;		//memDMA = U64_ADD_U32(memDMA, sizeof(MV_XOR_Command_Table) * xorTableCount);	}	for ( i=0; i<xorTableCount; i++ )	{	#ifdef RAID6_HARDWARE_XOR		pXORTableWrapper = 			(PXOR_Table_Wrapper)List_GetFirstEntry(&pCore->XOR_Table_List, XOR_Table_Wrapper, Queue_Pointer);		pXORTableWrapper->Buffer_Vir = memVir;		pXORTableWrapper->Buffer_DMA = memDMA;		List_AddTail(&pXORTableWrapper->Queue_Pointer, &pCore->XOR_Table_List);	#endif		memVir = (MV_PU8)memVir + xorTableSize;		memDMA = U64_ADD_U32(memDMA, xorTableSize);	}		/* Assign XOR delivery queue. */	offset = (MV_U32)(ROUNDING(memDMA.value,8)-memDMA.value);	memDMA = U64_ADD_U32(memDMA, offset);	memVir = (MV_PU8)memVir + offset;	pCore->XOR_DELV_Q = memVir;	pCore->XOR_DELV_Q_DMA = memDMA;	memVir = (MV_PU8)memVir + sizeof(XOR_DELIVERY_QUEUE_ENTRY) * slotCount;	memDMA = U64_ADD_U32(memDMA, sizeof(XOR_DELIVERY_QUEUE_ENTRY) * slotCount);	/* Assign XOR completion queue. */	offset = (MV_U32)(ROUNDING(memDMA.value,8)-memDMA.value);	memDMA = U64_ADD_U32(memDMA, offset);	memVir = (MV_PU8)memVir + offset;	pCore->XOR_CMPL_Q = memVir;	pCore->XOR_CMPL_Q_DMA = memDMA;	memVir = (MV_PU8)memVir + sizeof(XOR_COMPLETION_QUEUE_ENTRY) * (slotCount + 1);	memDMA = U64_ADD_U32(memDMA, sizeof(XOR_COMPLETION_QUEUE_ENTRY) * (slotCount + 1));#endif/*DELIVERY ( DELV_Q_: global, reusable )1. prepare a command (command header from the command list + a command table ....)2. find a free delivery queue entry (COMMON_DELV_Q_RD_PTR::DLVRY_QUEUE_RD_PTR+1)3. fill this delivery queue entry4. write this entry number to COMMON_DELV_Q_WR_PTR::DLVRY_QUEUE_WRT_PTRCOMPLETION ( CMPL_Q: global, reused )5. receive IRQ, maybe.6. check ATTENTION and clear by COMMON_IRQ_MASK or by PORT_IRQ_MASK, etc.7. CMPL_Q_0 is reserved as COMMON_CMPL_Q_WR_PTR for quick check (may be increased by more than 1).6. check status in CMPL_Q_N.COMMAND LIST ( global, reused )1. aka command slot, a global, fixed number of command headers.2. contain command table, open_address, PRD, status.COMMAND TABLE1. contain SSP frames, IU, PIR, etc.2. from a reserved pool.*/}static MV_VOID __core_ops_start(MV_PVOID This){	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;	/* 	 * ZOMBIE is when core cannot get enough resource to do a proper	 * init, then cease from starting up and let hba handle a 	 * module_start time-out (end up releasing modules & unload the	 * driver in linux).	 */	if (pCore->State != CORE_STATE_ZOMBIE)		mvAdapterStateMachine(pCore, NULL);}static MV_VOID __core_ops_stop(MV_PVOID This){	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;	MV_U8 i;	for(i=0; i<MAX_PORT_ID;i++)	{		MV_U8 index;		index=pCore->Port_Map[i];		if(index!=ID_NOT_MAPPED) 			Port_AbortRequests(&pCore->Ports[index], REQ_STATUS_NO_DEVICE, NULL);	}	mv_disable_hba(pCore->Mmio_Base);#if __RES_MGMT__	res_release_req_pool(pCore->req_pool);#endif /* __RES_MGMT__ */}void Core_ModuleNotification(MV_PVOID This,			     enum Module_Event event,			     struct mod_notif_param *param){}void Core_HandleWaitingList(PCore_Driver_Extension pCore);void Core_InternalSendRequest(MV_PVOID This, PMV_Request pReq){	PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;	//MV_DUMPRUN(0xCCF1);	/* Check whether we can handle this request */	switch (pReq->Cdb[0])	{		case SCSI_CMD_INQUIRY:		case SCSI_CMD_START_STOP_UNIT:		case SCSI_CMD_TEST_UNIT_READY:		case SCSI_CMD_READ_10:		case SCSI_CMD_WRITE_10:		case SCSI_CMD_VERIFY_10:		case SCSI_CMD_READ_CAPACITY_10:		case SCSI_CMD_REQUEST_SENSE:		case SCSI_CMD_MODE_SELECT_10:		case SCSI_CMD_MODE_SENSE_10:		case SCSI_CMD_MARVELL_SPECIFIC:		default:			if ( pReq->Cmd_Initiator==pCore )			{				if ( !SCSI_IS_READ(pReq->Cdb[0]) && !SCSI_IS_WRITE(pReq->Cdb[0]) )				{					/* Reset request */					List_Add(&pReq->Queue_Pointer, &pCore->Waiting_List);		/* Add to the header. */				}				else				{					#ifdef SUPPORT_CONSOLIDATE					/* Consolidate request */					MV_DASSERT( !pCore->Is_Dump );					List_AddTail(&pReq->Queue_Pointer, &pCore->Waiting_List);	/* Append to the tail. */					#else					MV_ASSERT(MV_FALSE);					#endif				}			}			else			{				List_AddTail(&pReq->Queue_Pointer, &pCore->Waiting_List);		/* Append to the tail. */			}			Core_HandleWaitingList(pCore);			break;	}}static void __core_ops_send_request(MV_PVOID This, PMV_Request pReq){#ifdef SUPPORT_CONSOLIDATE	if (!IS_A_SMP_REQ(pReq)) {		PCore_Driver_Extension pCore = (PCore_Driver_Extension)This;		MV_U8 id = MapDeviceId(pCore, 				       DEV_ID_TO_TARGET_ID(pReq->Device_Id));		if (IS_PM_REQ(pReq))		{			if (!pCore->Is_Dump)			{				Consolid_ModuleSendRequest(pCore, pReq);				return;			}		}		/* Check if the request will be sent to consolidate handling function */		else if ( id!=ID_NOT_MAPPED ) {			PDomain_Device pDevice = NULL;			pDevice = &pCore->Devices[id];			if ((!IS_ATAPI(pDevice)) && (!IS_TAPE(pDevice)) &&			    (pReq->Device_Id != VIRTUAL_DEVICE_ID) &&			    (!pCore->Is_Dump))			{				Consolid_ModuleSendRequest(pCore, pReq);				return;			}		}	}#endif /* SUPPORT_CONSOLIDATE */	Core_InternalSendRequest(This, pReq);}//TBD: Replace this function with existing functions.void Core_ResetHardware(MV_PVOID pExtension){	PCore_Driver_Extension pCore = (PCore_Driver_Extension)pExtension;	MV_U8 i;	PDomain_Port pPort = NULL;	/* Re-initialize some variables to make the reset go. */	//TBD: Any more variables?	pCore->Adapter_State = ADAPTER_INITIALIZING;	Tag_Init(&pCore->Tag_Pool, pCore->Slot_Count_Supported);	Tag_Init(&pCore->Device_Pool, pCore->PD_Count_Supported);	Tag_Init(&pCore->Expander_Pool, pCore->Expander_Count_Supported);#ifdef SUPPORT_PM	Tag_Init(&pCore->PM_Pool, pCore->PM_Count_Supported);#endif	Tag_Init(&pCore->Port_Pool, MAX_PORT_NUMBER);	/* Init port data structure */	for ( i=0; i<MAX_PORT_NUMBER; i++ )	{		pPort = &pCore->Ports[i];				pPort->Id = i;		pPort->Port_State = PORT_STATE_IDLE;		pPort->DiscoveryInstance = 0;		pPort->Device_Number = 0;		pPort->Expander_Number=0;		MV_LIST_HEAD_INIT( &pPort->pExpTreeRoot);		MV_LIST_HEAD_INIT( &pPort->Device_List );		MV_LIST_HEAD_INIT( &pPort->Expander_List );		//TBD: PORT TYPE will be determined after InitChip.		pPort->Type=PORT_TYPE_SAS; /*default*/	}	pCore->Port_Num = 0;	/* Init device map structure */	for (i=0; i<MAX_PORT_ID; i++)		pCore->Port_Map[i] = ID_NOT_MAPPED;	/* Init device data structure */	for (i = 0; i < pCore->PD_Count_Supported; i++)	{		pCore->Devices[i].Status = DEVICE_STATUS_NO_DEVICE;		pCore->Devices[i].State = DEVICE_STATE_IDLE;#ifdef SUPPORT_TIMER		pCore->Devices[i].Timer_ID = NO_CURRENT_TIMER;#endif /* SUPPORT_TIMER */#ifdef USE_DYN_REGISTER_SET		pCore->Devices[i].Register_Set = ID_NOT_MAPPED;#endif /* USE_DYN_REGISTER_SET */		pCore->Devices[i].Timeout_Count = 0;		pCore->Devices[i].Retry_Count = 0;	}	pCore->Current_Device_Id = 0;	/* Init device map structure */#ifdef SUPPORT_PM	for (i=0; i<MAX_PM_ID; i++)		pCore->Device_Map[i] = ID_NOT_MAPPED;#else	for (i=0; i<MAX_EXPANDER_ID; i++)		pCore->Device_Map[i] = ID_NOT_MAPPED;#endif	/* Init Expander data structure */	for (i=0; i<pCore->Expander_Count_Supported; i++)	{		pCore->Expanders[i].Device_Number=0;		pCore->Expanders[i].Phy_Count=0;		pCore->Expanders[i].pParent = NULL;		MV_LIST_HEAD_INIT( &pCore->Expanders[i].pChild);		MV_LIST_HEAD_INIT( &pCore->Expanders[i].pSibling);		pCore->Expanders[i].pPort = pPort;		MV_LIST_HEAD_INIT( &pCore->Expanders[i].Device_List );	}	pCore->Current_Expander_Id = MIN_EXPANDER_ID;#ifdef SUPPORT_PM	/* Init PM data structure */	for (i = 0; i < pCore->PM_Count_Supported; i++)	{		pCore->PMs[i].Status = PM_STATUS_RESET;		pCore->PMs[i].State = PM_STATE_IDLE;#ifdef SUPPORT_TIMER		pCore->PMs[i].Timer_ID = NO_CURRENT_TIMER;#endif#ifdef USE_DYN_REGISTER_SET		pCore->PMs[i].Register_Set = ID_NOT_MAPPED;#endif		MV_LIST_HEAD_INIT(&pCore->PMs[i].Sent_Req_List);		pCore->PMs[i].pPort = pPort;	}	pCore->Current_PM_Id = MIN_PM_ID;#endif		for (i=0; i<16; i++) 	{		pCore->Running_Slot[i] = 0;		pCore->Resetting_Slot[i] = 0;		pCore->Completing_Slot[i] = 0;	}	pCore->LastDELV_Q=0xfff;	pCore->LastCMPL_Q=0xfff;	/* Go through the mvAdapterStateMachine. */	if( pCore->Resetting==0 )	{		pCore->Resetting = 1;		if( !mvAdapterStateMachine(pCore,NULL) )		{			MV_ASSERT(MV_FALSE);//WRONG. But it's not easy to fix yet.		}	}	else	{		/* I suppose that we only have one chance to call Core_ResetHardware. */		MV_DASSERT(MV_FALSE);	}		return;}void HandleDeviceReset(PCore_Driver_Extension pCore, PDomain_Device pDevice);void HandlePortReset(PCore_Driver_Extension pCore, PDomain_Port pPort);static inline MV_BOOLEAN __is_scsi_cmd_simulated(MV_U8 cmd_type){	switch (cmd_type)	{	case SCSI_CMD_INQUIRY:	case SCSI_CMD_READ_CAPACITY_10:	case SCSI_CMD_SYNCHRONIZE_CACHE_10:	case SCSI_CMD_TEST_UNIT_READY:	case SCSI_CMD_REQUEST_SENSE:	case SCSI_CMD_RESERVE_6:	case SCSI_CMD_RELEASE_6:	case SCSI_CMD_REPORT_LUN:	case SCSI_CMD_MODE_SENSE_6:	case SCSI_CMD_MODE_SENSE_10:	case SCSI_CMD_MODE_SELECT_6:	case SCSI_CMD_MODE_SELECT_10:#ifdef CORE_SUPPORT_API	case APICDB0_PD:#   ifdef SUPPORT_PASS_THROUGH_DIRECT	case APICDB0_PASS_THRU_CMD:#   endif /* SUPPORT_PASS_THROUGH_DIRECT */#   ifdef SUPPORT_CSMI	case APICDB0_CSMI_CORE:#   endif /* SUPPORT_CSMI */#endif /* CORE_SUPPORT_API */		return MV_TRUE;	default:		return MV_FALSE;	}}MV_BOOLEAN HandleInstantRequest(PCore_Driver_Extension pCore, PMV_Request pReq){	/* 	 * Some of the requests can be returned immediately without hardware 	 * access. 	 * Handle Inquiry and Read Capacity.	 * If return MV_TRUE, means the request can be returned to OS now.	 */	PDomain_Device pDevice = NULL;	MV_U8 portId, deviceId; /*, temp;*/	MV_U8 ret;		if (IS_A_SMP_REQ(pReq) || 	    IS_SOFT_RESET_REQ(pReq) || 	    IS_PM_REQ(pReq) ||	    (!__is_scsi_cmd_simulated(pReq->Cdb[0])))		return MV_FALSE;	if ( pReq->Device_Id != VIRTUAL_DEVICE_ID )	{		portId = MapPortId(pCore, pReq->Device_Id);		deviceId = MapDeviceId(pCore, pReq->Device_Id);		if ( deviceId != ID_NOT_MAPPED )			pDevice = &pCore->Devices[deviceId];	}	if (pReq->Cdb[0] == SCSI_CMD_MARVELL_SPECIFIC && pReq->Cdb[1] == CDB_CORE_MODULE)	{		if (pReq->Cdb[2] == CDB_CORE_RESET_DEVICE)		{			HandleDeviceReset(pCore, pDevice);			return MV_TRUE;		}		else if (pReq->Cdb[2] == CDB_CORE_RESET_PORT)		{			MV_U16 portId;			MV_CopyMemory(&portId, &pReq->Cdb[3], 2);			HandlePortReset(pCore, &pCore->Ports[MapPortEntry(pCore, portId)]);			return MV_TRUE;		}	}	if(pDevice != NULL && IS_SSP(pDevice))	{	#ifdef SUPPORT_CSMI		if ( (pReq->Cdb[0] != APICDB0_PD) 			&& (pReq->Cdb[0] != APICDB0_CSMI_CORE)			&& (pReq->Cdb[0] != APICDB0_PASS_THRU_CMD_SCSI) 			&& (pReq->Cdb[0] != APICDB0_PASS_THRU_CMD_ATA) )#else		if ( (pReq->Cdb[0] != APICDB0_PD) 			&& (pReq->Cdb[0] != APICDB0_PASS_THRU_CMD_SCSI)			&& (pReq->Cdb[0] != APICDB0_PASS_THRU_CMD_ATA) )#endif /* SUPPORT_CSMI */			return(MV_FALSE);#ifndef CORE_SAS_SUPPORT_ATA_COMMAND		else if( (pReq->Cdb[0] == SCSI_CMD_MARVELL_SPECIFIC) &&

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -