📄 tmchnl.c
字号:
MailSlot.Packet.dwCommand,
MailSlot.Packet.dwArgument[0],
MailSlot.Packet.dwArgument[1],
MailSlot.Packet.dwArgument[2],
MailSlot.Packet.dwArgument[3],
MailSlot.Packet.dwArgument[4] );
}
#endif
if ( dwWriteIndex == pDSP->dwReadIndex )
{
/* no point processing the other channels if ipc has no room */
goto chnlmOnPacketSend_exit;
}
MailSlot.dwPriority = this->PktSendSequence;
this->PktSendSequence++;
pChannel->PacketCounter++;
// dump the packet contents
DP( 2,"TM:[SEND|SQ:%x|CH:%x|CM:%x|A0:%x|A1:%x|A2:%x|A3:%x|A4:%x]\n",
MailSlot.dwPriority,
MailSlot.dwChannel,
MailSlot.Packet.dwCommand,
MailSlot.Packet.dwArgument[0],
MailSlot.Packet.dwArgument[1],
MailSlot.Packet.dwArgument[2],
MailSlot.Packet.dwArgument[3],
MailSlot.Packet.dwArgument[4] );
pDSP->Queue [pDSP->dwWriteIndex] = MailSlot;
pDSP->dwWriteIndex = dwWriteIndex;
cqueueDelete ( pChannel->pQueue, &MailSlot.Packet );
}
}
chnlmOnPacketSend_exit :
vxdLeaveCritical( Flags );
return TMOK;
}
STATUS chnlmOnAsyncCallback ( PVOID pContext )
{
PTMCHNL_MGR_OBJECT this = (PTMCHNL_MGR_OBJECT)pContext;
PTMCHNL_OBJECT pChannel;
TMSTD_PACKET Packet;
DWORD dwIdx;
STATUS Status;
if ( this->DPCEntered != 0 )
{
DP(0,
"TM:PANIC:chnlmOnAsyncCallback:DPCEntered[%x] == 0:FAIL\n",
this->DPCEntered );
this->DPCEntered = 0;
}
else
{
this->DPCEntered++;
}
for ( dwIdx = 0 ; dwIdx < this->ChannelCount ; dwIdx ++ )
{
pChannel = this->pChnlTab[ dwIdx ];
if ( ! pChannel )
continue;
/* we deal only with the receiving channels here */
if ( pChannel->Direction != TMCHNL_DIRECTION_RECV )
continue;
while ( ! cqueueIsEmpty ( pChannel->pQueue ) )
{
/*
1. this was changed to circumvent the duplicate packet
bug. one packet was inserted in the per channel queue and
it was retrieved twice. this can happen since there may
be interrupt between the retrieve and delete and that may
schedule an event which will retrieve the already retrieved packet
since it is not yet deleted.
2. the disadvantage of doing the fix is that now if packets cannot
be inserted in the advisory queue. they will get dropped forever
this situation may be avoided ( not prevented ) by increaing the
size of the advisory queue.
3. condition 1 was put back, but our own interrupts are now disbled
in the deferred proccedure call. but will this help.
whay if this happens an interrupt schedules a DPC and before the DPC
is dispatched another interrupt schedules another DPC. The second
interrupt was generated because the DPC has not yet executed and
out interrupt is not yet masked. Now we have condition 1 happending
again.
Solution - use NT, the kernel does not schedule another DPC while one
is active.
*/
DWORD QueueLength;
QueueLength = cqueueLength(pChannel->pQueue);
if ( QueueLength > 10 )
{
DP ( 0,"[C#%x:%x]", dwIdx, QueueLength );
}
cqueueRetrieve ( pChannel->pQueue, &Packet );
//cqueueDelete ( pChannel->pQueue, &Packet );
DP( 3,"TM:[QRECV|CH:%x|CM:%x|A0:%x|A1:%x|A2:%x|A3:%x|A4:%x]\n",
dwIdx,
Packet.dwCommand,
Packet.dwArgument[0],
Packet.dwArgument[1],
Packet.dwArgument[2],
Packet.dwArgument[3],
Packet.dwArgument[4] );
if ( ! pChannel->OnCallback )
{
DP(0,
"TM:chnlmOnAsyncCallback:OnPacketRecv:NULL:Chnl#[%x]:FAIL\n",
dwIdx );
}
if ( ( Status = ((TMCHNL_ONRECVPACKET)pChannel->OnCallback) (
pChannel->pContext, &Packet ) ) == TMOK )
{
cqueueDelete ( pChannel->pQueue, &Packet );
}
else
{
// if there is no space in the client queue we stop doing anything for
// this client.
break;
}
}
}
this->DPCEntered--;
return TRUE;
}
STATUS chnlValidateHandle ( PVOID pChnl )
{
PTMCHNL_OBJECT this = (PTMCHNL_OBJECT)pChnl;
if ( ! pChnl )
goto chnlValidateHandle_fail;
if ( this->Size != sizeof(TMCHNL_OBJECT) )
goto chnlValidateHandle_fail;
if ( ! FlagGet ( this->Flags, TMCHNL_CHNL_FLAGALLOCATED ) )
goto chnlValidateHandle_fail;
return TMOK;
chnlValidateHandle_fail :
DP(0,"TM:chnlValidateHandle:pChnl[%x]:FAIL\n",pChnl);
return TMCHNL_ERR_INVALIDHANDLE;
}
STATUS chnlHandleToID ( PVOID pChnl, PDWORD pID )
{
PTMCHNL_OBJECT this = (PTMCHNL_OBJECT)pChnl;
STATUS Status;
if ( ( Status = chnlValidateHandle ( pChnl )) != TMOK )
{
return Status;
}
*pID = this->ID;
return TMOK;
}
/* prototype for callback registered with IPC */
STATUS chnlmCallback ( DWORD InterruptID, DWORD Count, PVOID pContext )
{
PTMCHNL_MGR_OBJECT this = (PTMCHNL_MGR_OBJECT)pContext;
PTMHD_CHNL_MAILQUEUE pDSP = &this->pSharedData->ToDSP;
PTMHD_CHNL_MAILQUEUE pHost = &this->pSharedData->ToHost;
PTMHD_CHNL_MAILSLOT pMail;
PTMCHNL_OBJECT pChannel;
DWORD dwIdxPacket, dwReadIndex;
for ( dwIdxPacket = 0, dwReadIndex = pHost->dwReadIndex;
( dwReadIndex != pHost->dwWriteIndex ) ;
dwReadIndex = ( dwReadIndex + 1 ) % ( TMHD_CHNL_MBOXSLOTS ) ,
dwIdxPacket++ )
{
this->PktRecvCount++;
pMail = &pHost->Queue[dwReadIndex];
if ( pMail->dwPriority != this->PktRecvSequence )
{
DP(0,"TM:PANIC:chnlmCallback:Sequence:PC[%x]:EC[%x]:FAIL\n",
pMail->dwPriority, this->PktRecvSequence );
}
this->PktRecvSequence ++;
/* change the way the packet is retrieved directly from the queue */
pChannel = this->pChnlTab[ pMail->dwChannel ];
/* dump the packet contents */
DP( 2,"TM:[RECV|SQ:%x|CH:%x|CM:%x|A0:%x|A1:%x|A2:%x|A3:%x|A4:%x]\n",
pMail->dwPriority,
pMail->dwChannel,
pMail->Packet.dwCommand,
pMail->Packet.dwArgument[0],
pMail->Packet.dwArgument[1],
pMail->Packet.dwArgument[2],
pMail->Packet.dwArgument[3],
pMail->Packet.dwArgument[4] );
if ( chnlValidateHandle ( pChannel ) != TMOK )
{
this->PktRecvInvChCount++;
DP(0,"TM:chnlmOnPacketRecv:chnlValidateHandle:Chnl#[%x]:FAIL\n",
pMail->dwChannel );
continue;
}
if ( pChannel->Direction != TMCHNL_DIRECTION_RECV )
{
this->PktRecvInvDirCount++;
DP(0,"TM:chnlmOnPacketRecv:Chnl#[%x]->Direction != RECV:FAIL\n",
pChannel->ID );
continue;
}
#ifdef CHNL_DEBUG
if ( pMail->Packet.dwArgument[4] != pChannel->PacketCounter )
{
DP( 0,"TM:chnlmCallback:PacketCoutner:Expected[%x]:Received[%x]:ERROR\n",
pChannel->PacketCounter,
pMail->Packet.dwArgument[4]);
DP( 0,"TM:[Callback|SQ:%x|CH:%x|CM:%x|A0:%x|A1:%x|A2:%x|A3:%x|A4:%x]\n",
pMail->dwPriority,
pMail->dwChannel,
pMail->Packet.dwCommand,
pMail->Packet.dwArgument[0],
pMail->Packet.dwArgument[1],
pMail->Packet.dwArgument[2],
pMail->Packet.dwArgument[3],
pMail->Packet.dwArgument[4] );
}
#endif
pChannel->PacketCounter++;
if ( cqueueInsert ( pChannel->pQueue, &pMail->Packet ) == FALSE )
{
this->PktRecvDropCount++;
/* this should not be allowed to happen when using packet acks */
DP( 0,"TM:chnlmOnPacketRecv:cqueueInsert:Chnl#[%x]:FAIL\n",
pChannel->ID );
continue;
}
}
/* sucked out all the packets now update the indices */
pHost->dwReadIndex = dwReadIndex;
this->DPCReqCount++;
/* if there is more than a request pending do nto queue up any more */
// if ( this->DPCReqCount > ( this->DPCAckCount + 1 ) );
// else
ipcDisableIRQ ( this->pIPC );
#if 1
// this scdules a global event with
// GlobalEventCallback ( ebx = VMHandle, edx = RefData, ebp = *Client_Reg_Struct );
if ( ! ( this->DPCRunning && this->DPCScheduled ) )
{
this->DPCScheduled;
winSchedule_Global_Event ( vxdDPCHandlerA, this, &this->EventHandle );
}
#else
// this schedules an event in the system VM.
// PriorityEventCallback ( ebx = VMHandle, edx = RefData, ebp = *Client_Reg_Struct , edi = ThreadHandle );
winCall_Priority_VM_Event (
TIME_CRITICAL_BOOST, //PriorityBoost
winGet_Sys_VM_Handle(),//VMHandle
0,//PEF_ALWAYS_SCHED | PEF_WAIT_NOT_CRIT,//Flags
this, //RefData
vxdDPCHandlerA,//PriorityEventCallback
0, //TimeOut
&this->EventHandle// &EvetnHandle
);
#endif
/* don't do send ready callbacks if to DSP mailbox is full */
if ( ! ( ( (pDSP->dwWriteIndex + 1 ) % TMHD_CHNL_MBOXSLOTS ) ==
pDSP->dwReadIndex ) )
{
/* retrieve packets out of the queues and dump them into the mailbox */
chnlmOnPacketSend ( this );
}
switch ( this->pSharedData->ToHost.IntCmd )
{
case TMHD_CHNL_ISRREADY :
if ( pDSP->dwReadIndex != pDSP->dwWriteIndex )
{
this->pSharedData->ToDSP.IntCmd = TMHD_CHNL_MBOXREADY;
ipcGenerateIRQ ( this->pIPC, TMHD_IPC_CHNLINTERRUPT);
}
break;
case TMHD_CHNL_MBOXREADY :
/* if we have messages for the host interrupt it and let it know */
if ( pDSP->dwReadIndex == pDSP->dwWriteIndex )
{
this->pSharedData->ToDSP.IntCmd = TMHD_CHNL_ISRREADY;
}
else
{
this->pSharedData->ToDSP.IntCmd = TMHD_CHNL_MBOXREADY;
}
ipcGenerateIRQ ( this->pIPC, TMHD_IPC_CHNLINTERRUPT);
break;
default :
DP(0, "TM:chnlmCallback:INVALID InterruptID:ID#[%x]:FAIL\n",
this->pSharedData->ToHost.IntCmd );
break;
}
return TMOK;
}
VOID vxdDPCHandlerC ( DWORD dwVMHandle, PVOID pvContext,
PVOID pClientRegister )
{
PTMCHNL_MGR_OBJECT this = (PTMCHNL_MGR_OBJECT)pvContext;
this->DPCRunning = TRUE;
this->DPCScheduled = FALSE;
// the ISR should call chnlQueueDPC and let the channel manager take
// care of synchroniztion between multiple DPCs.
// queue up a DPC, it will be called the moment IRQL == DISPATCH_LEVEL
// ipcDisableIRQ ( this->pIPC );
this->DPCAckCount++;
chnlmOnAsyncCallback ( this );
ipcEnableIRQ ( this->pIPC );
this->DPCRunning = FALSE;
}
/*
Template for writing debug functions
Every string should be less thatn 80 characters.
Should start with 4 character definition followed by 5 printable
values begining with a 4 character description and enclosed in square
brackets seperated by colons. Print formatting should be %08x
*/
VOID chnlmDebugDump ( PVOID pDevice )
{
PTMCHNL_MGR_OBJECT this = GetChnlMgrObject ( pDevice );
PTMHD_CHNL_MAILQUEUE pDSP = &this->pSharedData->ToDSP;
PTMHD_CHNL_MAILQUEUE pHost = &this->pSharedData->ToHost;
PTMHD_CHNL_MAILSLOT pMail;
DWORD dwReadIndex, dwIdxPacket;
CHAR szTemp[81];
/* dump the channel data structure */
_Sprintf ( szTemp,
"chnl:SIZE[%08x]:FLAG[%08x]:CCNT[%08x]:ACNT[%08x]:ISNT[%08x]\n",
this->Size, this->Flags, this->ChannelCount,
this->AllocatedCount,this->IdxSent );
DP(0,szTemp);
_Sprintf ( szTemp,
"chnl:SHRD[%08x]:DPCR[%08x]:DPCA[%08x]:SEND[%08x]:SDRP[%08x]\n",
this->pSharedData, this->DPCReqCount, this->DPCAckCount,
this->PktSendCount, this->PktSendDropCount);
DP(0,szTemp);
_Sprintf ( szTemp,
"chnl:RECV[%08x]:RDRP[%08x]:RIND[%08x]:RINC[%08x]\n",
this->PktRecvCount, this->PktRecvDropCount, this->PktRecvInvDirCount,
this->PktRecvInvChCount );
DP(0,szTemp);
/* dump the shared data structure */
_Sprintf ( szTemp,
"2HST:RIDX[%08x]:WIDX[%08x]:ICMD[%08x]\n",
this->pSharedData->ToHost.dwReadIndex,
this->pSharedData->ToHost.dwWriteIndex,
this->pSharedData->ToHost.IntCmd );
DP(0,szTemp);
for ( dwIdxPacket = 0, dwReadIndex = pHost->dwReadIndex;
( dwReadIndex != pHost->dwWriteIndex ) ;
dwReadIndex = ( dwReadIndex + 1 ) % ( TMHD_CHNL_MBOXSLOTS ) ,
dwIdxPacket++ )
{
pMail = &pHost->Queue[dwReadIndex];
_Sprintf ( szTemp,
"2HST:PCKT[%08x]:CHNL[%08x]:CMND[%08x]:ARG0[%08x]:ARG1[%08x]\n",
dwIdxPacket, pMail->dwChannel, pMail->Packet.dwCommand,
pMail->Packet.dwArgument[0],pMail->Packet.dwArgument[1] );
DP(0,szTemp);
}
_Sprintf ( szTemp,
"2TRG:RIDX[%08x]:WIDX[%08x]:ICMD[%08x]\n",
this->pSharedData->ToDSP.dwReadIndex,
this->pSharedData->ToDSP.dwWriteIndex,
this->pSharedData->ToDSP.IntCmd );
DP(0,szTemp);
for ( dwIdxPacket = 0, dwReadIndex = pDSP->dwReadIndex;
( dwReadIndex != pDSP->dwWriteIndex ) ;
dwReadIndex = ( dwReadIndex + 1 ) % ( TMHD_CHNL_MBOXSLOTS ) ,
dwIdxPacket++ )
{
pMail = &pDSP->Queue[dwReadIndex];
_Sprintf ( szTemp,
"2TRG:PSEQ[%08x]:CHNL[%08x]:CMND[%08x]:ARG0[%08x]:ARG1[%08x]\n",
pMail->dwPriority, pMail->dwChannel, pMail->Packet.dwCommand,
pMail->Packet.dwArgument[0],pMail->Packet.dwArgument[1] );
DP(0,szTemp);
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -