📄 tmchnl.c
字号:
{
this->InchnlmOnPacketSend++;
}
for ( Count = 0 ;
Count < this->ChannelCount ;
Count++, this->IdxSent = ( this->IdxSent + 1 ) % this->ChannelCount )
{
pChannel = this->pChnlTab[ this->IdxSent ];
if ( pChannel == NULL )
continue;
if ( pChannel->Direction != TMCHNL_DIRECTION_SEND )
continue;
while ( ! cqueueIsEmpty ( pChannel->pQueue ) )
{
DWORD Start, End, Cycles;
PCSW = intCLEAR_IEN ();
Start = cycles();
cqueueRetrieve ( pChannel->pQueue, &MailSlot.Packet );
MailSlot.dwChannel = this->IdxSent;
MailSlot.dwMessage = 0;
dwWriteIndex = pHost->dwWriteIndex;
dwWriteIndex = ( dwWriteIndex + 1 ) % ( TMHD_CHNL_MBOXSLOTS );
if ( dwWriteIndex == pHost->dwReadIndex )
{
/* no point processing the other channels if ipc has no room */
End = cycles();
intRESTORE_IEN ( PCSW );
Cycles = ( End > Start ) ? (End - Start) : (End + ((DWORD)(~0) - Start));
/* if ( Cycles > 2000 ) tmDBGPrintf("LATENCY[%x]", Cycles ); */
goto chnlmOnPacketSend_exit;
}
MailSlot.dwPriority = this->PktSendSequence;
this->PktSendSequence++;
pChannel->PacketCounter++;
pHost->Queue [pHost->dwWriteIndex] = MailSlot;
pHost->dwWriteIndex = dwWriteIndex;
/* ipcSendPacket ends */
cqueueDelete ( pChannel->pQueue, &MailSlot.Packet );
End = cycles();
intRESTORE_IEN ( PCSW );
Cycles = ( End > Start ) ? (End - Start) : (End + ((DWORD)(~0) - Start));
/* if ( Cycles > 2000 ) tmDBGPrintf("LATENCY[%x]", Cycles ); */
DT(2, ( "[SEND|SQ:%x|CH:%x|CM:%x|A0:%x|A1:%x|A2:%x|A3:%x|A4:%x]\n",
MailSlot.dwPriority,
MailSlot.dwChannel,
MailSlot.Packet.dwCommand,
MailSlot.Packet.dwArgument[0],
MailSlot.Packet.dwArgument[1],
MailSlot.Packet.dwArgument[2],
MailSlot.Packet.dwArgument[3],
MailSlot.Packet.dwArgument[4] ));
#ifdef CHNL_DEBUG
DT(0, ("[D:%x]", MailSlot.Packet.dwArgument[4] ));
#endif
}
}
chnlmOnPacketSend_exit :
this->InchnlmOnPacketSend--;
return TMOK;
}
STATUS chnlmOnAsyncCallback ( PVOID pContext )
{
PTMCHNL_MGR_OBJECT this = (PTMCHNL_MGR_OBJECT)pContext;
PTMCHNL_OBJECT pChannel;
TMSTD_PACKET Packet;
DWORD dwIdx;
STATUS Status;
for ( dwIdx = 0 ; dwIdx < this->ChannelCount ; dwIdx ++ )
{
pChannel = this->pChnlTab[ dwIdx ];
if ( ! pChannel )
continue;
/* we deal only with the receiving channels here */
if ( pChannel->Direction != TMCHNL_DIRECTION_RECV )
continue;
while ( ! cqueueIsEmpty ( pChannel->pQueue ) )
{
cqueueDelete ( pChannel->pQueue, &Packet );
if ( ! pChannel->OnCallback )
{
DT(0, (
"tmman:chnlmOnAsyncCallback:OnPacketRecv:NULL:Chnl[%x]\n",
dwIdx ));
continue;
}
DT(3, ( "[QRECV|CH:%x|CM:%x|A0:%x|A1:%x|A2:%x|A3:%x|A4:%x]\n",
dwIdx,
Packet.dwCommand,
Packet.dwArgument[0],
Packet.dwArgument[1],
Packet.dwArgument[2],
Packet.dwArgument[3],
Packet.dwArgument[4] ));
if ( ( Status = ((TMCHNL_ONRECVPACKET)pChannel->OnCallback) (
pChannel->pContext, &Packet ) ) != TMOK )
{
}
}
}
return TRUE;
}
STATUS chnlValidateHandle ( PVOID pChnl )
{
PTMCHNL_OBJECT this = (PTMCHNL_OBJECT)pChnl;
if ( ! pChnl )
goto chnlValidateHandle_fail;
if ( this->Size != sizeof(TMCHNL_OBJECT) )
goto chnlValidateHandle_fail;
if ( ! FlagGet ( this->Flags, TMCHNL_CHNL_FLAGALLOCATED ) )
goto chnlValidateHandle_fail;
return TMOK;
chnlValidateHandle_fail :
DT(0, ( "tmman:chnlValidateHandle:FAIL[%x]\n",pChnl));
return TMCHNL_ERR_INVALIDHANDLE;
}
STATUS chnlHandleToID ( PVOID pChnl, PDWORD pID )
{
PTMCHNL_OBJECT this = (PTMCHNL_OBJECT)pChnl;
STATUS Status;
if ( ( Status = chnlValidateHandle ( pChnl )) != TMOK )
{
return Status;
}
*pID = this->ID;
return TMOK;
}
/* prototype for callback registered with IPC */
STATUS chnlmCallback ( DWORD InterruptID, DWORD Count, PVOID pContext )
{
PTMCHNL_MGR_OBJECT this = (PTMCHNL_MGR_OBJECT)pContext;
PTMHD_CHNL_MAILQUEUE pDSP = &this->pSharedData->ToDSP;
PTMHD_CHNL_MAILQUEUE pHost = &this->pSharedData->ToHost;
DWORD dwIdxPacket;
DWORD volatile dwReadIndex;
PTMCHNL_OBJECT pChannel;
TMHD_CHNL_MAILSLOT Packet;
DWORD volatile Index ;
DWORD PCSW;
BOOL QueueFull = FALSE;
for ( dwIdxPacket = 0 ; ; dwIdxPacket++ )
{
DWORD Start, End, Cycles;
PCSW = intCLEAR_IEN ();
Start = cycles();
dwReadIndex = pDSP->dwReadIndex;
if ( dwReadIndex == pDSP->dwWriteIndex )
{
End = cycles();
intRESTORE_IEN ( PCSW );
Cycles = ( End > Start ) ? (End - Start) : (End + ((DWORD)(~0) - Start));
/*if ( Cycles > 2000 ) tmDBGPrintf("LATENCY[%x]", Cycles ); */
break;
}
this->PktRecvCount++;
Packet = pDSP->Queue[dwReadIndex];
dwReadIndex = ( dwReadIndex + 1 ) % ( TMHD_CHNL_MBOXSLOTS );
pDSP->dwReadIndex = dwReadIndex;
End = cycles();
intRESTORE_IEN ( PCSW );
Cycles = ( End > Start ) ? (End - Start) : (End + ((DWORD)(~0) - Start));
/* if ( Cycles > 2000 ) tmDBGPrintf("LATENCY[%x]", Cycles ); */
DT(2, ( "[RECV|SQ:%x|CH:%x|CM:%x|A0:%x|A1:%x|A2:%x|A3:%x|A4:%x]\n",
Packet.dwPriority,
Packet.dwChannel,
Packet.Packet.dwCommand,
Packet.Packet.dwArgument[0],
Packet.Packet.dwArgument[1],
Packet.Packet.dwArgument[2],
Packet.Packet.dwArgument[3],
Packet.Packet.dwArgument[4] ));
if ( Packet.dwPriority != this->PktRecvSequence )
{
DT(0, ( "tmman:PANIC:chnlmCallback:Sequence:FAIL:PC[%x]:EC[%x]\n",
Packet.dwPriority, this->PktRecvSequence ));
}
this->PktRecvSequence ++;
/* change the way the packet is retrieved directly from the queue */
pChannel = this->pChnlTab[ Packet.dwChannel ];
if ( chnlValidateHandle ( pChannel ) != TMOK )
{
this->PktRecvInvChCount++;
/* chnlDebugDump ( NULL ); */
DT(0, (
"tmman:PANIC:chnlmOnPacketRecv:chnlValidateHandle:FAIL:Ch[%x]\n",
Packet.dwChannel ));
continue;
}
if ( pChannel->Direction != TMCHNL_DIRECTION_RECV )
{
this->PktRecvInvDirCount++;
DT(0, (
"tmman:PANIC:chnlmOnPacketRecv:Chnl[%x]->Direction != RECV\n",
pChannel->ID ));
continue;
}
#ifdef CHNL_DEBUG
if ( Packet.Packet.dwArgument[4] != pChannel->PacketCounter )
{
DT(0, ("TM:chnlmCallback:PacketCoutner:Expected[%x]:Received[%x]:ERROR\n",
pChannel->PacketCounter,
Packet.Packet.dwArgument[4] ));
DT(0, ("TM:[Callback|SQ:%x|CH:%x|CM:%x|A0:%x|A1:%x|A2:%x|A3:%x|A4:%x]\n",
Packet.dwPriority,
Packet.dwChannel,
Packet.Packet.dwCommand,
Packet.Packet.dwArgument[0],
Packet.Packet.dwArgument[1],
Packet.Packet.dwArgument[2],
Packet.Packet.dwArgument[3],
Packet.Packet.dwArgument[4] ));
}
#endif
pChannel->PacketCounter++;
if ( cqueueInsert ( pChannel->pQueue, &Packet.Packet ) == FALSE )
{
this->PktRecvDropCount++;
/* this should not be allowed to happen when using packet acks */
DT(0, (
"tmman:PANIC:chnlmOnPacketRecv:cqueueInsert:Ch[%x]:FAIL\n",
pChannel->ID ));
continue;
}
}
/* sucked out all the packets now update the indices */
/* Allow this to happen with interrupts off */
chnlmOnAsyncCallback ( this );
/*
we don't want sends to happen since we will be manipulating
shared data structures.
*/
/* don't do send ready callbacks if DSP mailbox is full */
QueueFull = ( ( (pHost->dwWriteIndex + 1 ) % TMHD_CHNL_MBOXSLOTS ) ==
pHost->dwReadIndex );
if ( ! QueueFull )
{
chnlmOnPacketSend ( this );
}
{
DWORD Start, End, Cycles;
PCSW = intCLEAR_IEN ();
Start = cycles();
switch ( this->pSharedData->ToDSP.IntCmd )
{
case TMHD_CHNL_ISRREADY :
if ( pHost->dwReadIndex != pHost->dwWriteIndex )
{
this->pSharedData->ToHost.IntCmd = TMHD_CHNL_MBOXREADY;
ipcGenerateIRQ ( this->pIPC, TMHD_IPC_CHNLINTERRUPT );
}
break;
case TMHD_CHNL_MBOXREADY :
/* if we have massages for the host interrupt it and let it know */
if ( pHost->dwReadIndex == pHost->dwWriteIndex )
{
this->pSharedData->ToHost.IntCmd = TMHD_CHNL_ISRREADY;
}
else
{
this->pSharedData->ToHost.IntCmd = TMHD_CHNL_MBOXREADY;
}
ipcGenerateIRQ ( this->pIPC, TMHD_IPC_CHNLINTERRUPT );
break;
default :
DT(0, ( "chnlmCallback:PANIC:INVALID InterruptID:ID#[%x]\n",
this->pSharedData->ToDSP.IntCmd ));
break;
}
End = cycles();
intRESTORE_IEN ( PCSW );
Cycles = ( End > Start ) ? (End - Start) : (End + ((DWORD)(~0) - Start));
/* if ( Cycles > 2000 ) tmDBGPrintf("LATENCY[%x]", Cycles ); */
}
}
VOID chnlDebugDump ( PVOID pBoard )
{
PTMCHNL_MGR_OBJECT this = GetChnlMgrObject();
DT(0, ( "chnlDebugDump:pChnlTab[%x]:[0:%x][1:%x][2:%x][3:%x][4:%x][5:%x]\n",
this->pChnlTab,
this->pChnlTab[0],
this->pChnlTab[1],
this->pChnlTab[2],
this->pChnlTab[3],
this->pChnlTab[4],
this->pChnlTab[5]
));
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -