📄 queue.s43
字号:
??xQueueSend_5:
MOV.W #0x1, R8
// 345
// 346 /* Update the TxLock count so prvUnlockQueue knows to check for
// 347 tasks waiting for data to become available in the queue. */
// 348 ++( pxQueue->xTxLock );
MOV.W R9, R15
ADD.W #0x1, 0x24(R15)
JMP ??xQueueSend_6
// 349 }
// 350 else
// 351 {
// 352 xReturn = errQUEUE_FULL;
??xQueueSend_4:
MOV.W #0xfffd, R8
// 353 }
// 354 }
// 355 taskEXIT_CRITICAL();
??xQueueSend_6:
CMP.W #0x0, &usCriticalNesting
JEQ ??xQueueSend_7
ADD.W #0xffff, &usCriticalNesting
CMP.W #0x0, &usCriticalNesting
JNE ??xQueueSend_7
EINT
// 356
// 357 /* We no longer require exclusive access to the queue. prvUnlockQueue
// 358 will remove any tasks suspended on a receive if either this function
// 359 or an ISR has posted onto the queue. */
// 360 if( prvUnlockQueue( pxQueue ) )
??xQueueSend_7:
MOV.W R9, R12
CALL #prvUnlockQueue
CMP.W #0x0, R12
JEQ ??xQueueSend_8
// 361 {
// 362 /* Resume the scheduler - making ready any tasks that were woken
// 363 by an event while the scheduler was locked. Resuming the
// 364 scheduler may cause a yield, in which case there is no point
// 365 yielding again here. */
// 366 if( !xTaskResumeAll() )
CALL #xTaskResumeAll
CMP.W #0x0, R12
JNE ??xQueueSend_9
// 367 {
// 368 taskYIELD();
CALL #vPortYield
JMP ??xQueueSend_9
// 369 }
// 370 }
// 371 else
// 372 {
// 373 /* Resume the scheduler - making ready any tasks that were woken
// 374 by an event while the scheduler was locked. */
// 375 xTaskResumeAll();
??xQueueSend_8:
CALL #xTaskResumeAll
// 376 }
// 377
// 378 return xReturn;
??xQueueSend_9:
MOV.W R8, R12
BR #?Epilogue4
CFI EndBlock cfiBlock1
// 379 }
// 380 /*-----------------------------------------------------------*/
// 381
RSEG CODE:CODE:REORDER:NOROOT(1)
// 382 signed portBASE_TYPE xQueueSendFromISR( xQueueHandle pxQueue, const void *pvItemToQueue, signed portBASE_TYPE xTaskPreviouslyWoken )
xQueueSendFromISR:
CFI Block cfiBlock2 Using cfiCommon0
CFI Function xQueueSendFromISR
// 383 {
FUNCALL xQueueSendFromISR, memcpy
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSendFromISR, xTaskRemoveFromEventList
LOCFRAME CSTACK, 8, STACK
PUSH.W R10
CFI R10 Frame(CFA, -4)
CFI CFA SP+4
PUSH.W R11
CFI R11 Frame(CFA, -6)
CFI CFA SP+6
PUSH.W R8
CFI R8 Frame(CFA, -8)
CFI CFA SP+8
MOV.W R12, R10
MOV.W R14, R11
MOV.W 0x8(SP), R8
// 384 /* Similar to xQueueSend, except we don't block if there is no room in the
// 385 queue. Also we don't directly wake a task that was blocked on a queue
// 386 read, instead we return a flag to say whether a context switch is required
// 387 or not (i.e. has a task with a higher priority than us been woken by this
// 388 post). */
// 389 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
CMP.W 0x1e(R10), 0x1c(R10)
JC ??xQueueSendFromISR_0
// 390 {
// 391 prvCopyQueueData( pxQueue, pvItemToQueue );
PUSH.W 0x20(R10)
CFI CFA SP+10
MOV.W R11, R14
MOV.W 0x4(R10), R12
CALL #memcpy
MOV.W R10, R15
ADD.W #0x1, 0x1c(R15)
MOV.W R10, R15
ADD.W 0x20(R10), 0x4(R15)
ADD.W #0x2, SP
CFI CFA SP+8
CMP.W 0x2(R10), 0x4(R10)
JNC ??xQueueSendFromISR_1
MOV.W @R10, 0x4(R10)
// 392
// 393 /* If the queue is locked we do not alter the event list. This will
// 394 be done when the queue is unlocked later. */
// 395 if( pxQueue->xTxLock == queueUNLOCKED )
??xQueueSendFromISR_1:
CMP.W #0xffff, 0x24(R10)
JNE ??xQueueSendFromISR_2
// 396 {
// 397 /* We only want to wake one task per ISR, so check that a task has
// 398 not already been woken. */
// 399 if( !xTaskPreviouslyWoken )
CMP.W #0x0, R8
JNE ??xQueueSendFromISR_0
// 400 {
// 401 if( !listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) )
CMP.W #0x0, 0x12(R10)
JEQ ??xQueueSendFromISR_0
// 402 {
// 403 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
MOV.W R10, R12
ADD.W #0x12, R12
CALL #xTaskRemoveFromEventList
CMP.W #0x0, R12
JEQ ??xQueueSendFromISR_0
// 404 {
// 405 /* The task waiting has a higher priority so record that a
// 406 context switch is required. */
// 407 return pdTRUE;
MOV.W #0x1, R12
JMP ??xQueueSendFromISR_3
// 408 }
// 409 }
// 410 }
// 411 }
// 412 else
// 413 {
// 414 /* Increment the lock count so the task that unlocks the queue
// 415 knows that data was posted while it was locked. */
// 416 ++( pxQueue->xTxLock );
??xQueueSendFromISR_2:
MOV.W R10, R15
ADD.W #0x1, 0x24(R15)
// 417 }
// 418 }
// 419
// 420 return xTaskPreviouslyWoken;
??xQueueSendFromISR_0:
MOV.W R8, R12
??xQueueSendFromISR_3:
BR #?Epilogue3
CFI EndBlock cfiBlock2
// 421 }
// 422 /*-----------------------------------------------------------*/
// 423
RSEG CODE:CODE:REORDER:NOROOT(1)
// 424 signed portBASE_TYPE xQueueReceive( xQueueHandle pxQueue, void *pvBuffer, portTickType xTicksToWait )
xQueueReceive:
CFI Block cfiBlock3 Using cfiCommon0
CFI Function xQueueReceive
// 425 {
FUNCALL xQueueReceive, vTaskSuspendAll
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, prvIsQueueEmpty
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, vTaskPlaceOnEventList
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, prvUnlockQueue
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, xTaskResumeAll
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, vPortYield
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, vTaskSuspendAll
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, memcpy
LOCFRAME CSTACK, 12, STACK
FUNCALL xQueueReceive, prvUnlockQueue
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, xTaskResumeAll
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, vPortYield
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueReceive, xTaskResumeAll
LOCFRAME CSTACK, 10, STACK
PUSH.W R10
CFI R10 Frame(CFA, -4)
CFI CFA SP+4
PUSH.W R11
CFI R11 Frame(CFA, -6)
CFI CFA SP+6
PUSH.W R8
CFI R8 Frame(CFA, -8)
CFI CFA SP+8
PUSH.W R9
CFI R9 Frame(CFA, -10)
CFI CFA SP+10
MOV.W R12, R9
MOV.W R14, R10
MOV.W 0xa(SP), R11
// 426 signed portBASE_TYPE xReturn;
// 427
// 428 /* This function is very similar to xQueueSend(). See comments within
// 429 xQueueSend() for a more detailed explanation.
// 430
// 431 Make sure other tasks do not access the queue. */
// 432 vTaskSuspendAll();
CALL #vTaskSuspendAll
// 433
// 434 /* Make sure interrupts do not access the queue. */
// 435 prvLockQueue( pxQueue );
DINT
NOP
ADD.W #0x1, &usCriticalNesting
MOV.W R9, R15
ADD.W #0x1, 0x22(R15)
MOV.W R9, R15
ADD.W #0x1, 0x24(R15)
CMP.W #0x0, &usCriticalNesting
JEQ ??xQueueReceive_0
ADD.W #0xffff, &usCriticalNesting
CMP.W #0x0, &usCriticalNesting
JNE ??xQueueReceive_0
EINT
// 436
// 437 /* If there are no messages in the queue we may have to block. */
// 438 if( prvIsQueueEmpty( pxQueue ) )
??xQueueReceive_0:
MOV.W R9, R12
CALL #prvIsQueueEmpty
CMP.W #0x0, R12
JEQ ??xQueueReceive_1
// 439 {
// 440 /* There are no messages in the queue, do we want to block or just
// 441 leave with nothing? */
// 442 if( xTicksToWait > ( portTickType ) 0 )
CMP.W #0x0, R11
JEQ ??xQueueReceive_1
// 443 {
// 444 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
MOV.W R11, R14
MOV.W R9, R12
ADD.W #0x12, R12
CALL #vTaskPlaceOnEventList
// 445 taskENTER_CRITICAL();
DINT
NOP
ADD.W #0x1, &usCriticalNesting
// 446 {
// 447 prvUnlockQueue( pxQueue );
MOV.W R9, R12
CALL #prvUnlockQueue
// 448 if( !xTaskResumeAll() )
CALL #xTaskResumeAll
CMP.W #0x0, R12
JNE ??xQueueReceive_2
// 449 {
// 450 taskYIELD();
CALL #vPortYield
// 451 }
// 452
// 453 vTaskSuspendAll();
??xQueueReceive_2:
CALL #vTaskSuspendAll
// 454 prvLockQueue( pxQueue );
DINT
NOP
ADD.W #0x1, &usCriticalNesting
MOV.W R9, R15
ADD.W #0x1, 0x22(R15)
MOV.W R9, R15
ADD.W #0x1, 0x24(R15)
CMP.W #0x0, &usCriticalNesting
JEQ ??xQueueReceive_3
ADD.W #0xffff, &usCriticalNesting
CMP.W #0x0, &usCriticalNesting
JNE ??xQueueReceive_3
EINT
// 455 }
// 456 taskEXIT_CRITICAL();
??xQueueReceive_3:
CMP.W #0x0, &usCriticalNesting
JEQ ??xQueueReceive_1
ADD.W #0xffff, &usCriticalNesting
CMP.W #0x0, &usCriticalNesting
JNE ??xQueueReceive_1
EINT
// 457 }
// 458 }
// 459
// 460 taskENTER_CRITICAL();
??xQueueReceive_1:
DINT
NOP
ADD.W #0x1, &usCriticalNesting
// 461 {
// 462 if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
CMP.W #0x0, 0x1c(R9)
JEQ ??xQueueReceive_4
// 463 {
// 464 pxQueue->pcReadFrom += pxQueue->uxItemSize;
MOV.W R9, R15
ADD.W 0x20(R9), 0x6(R15)
// 465 if( pxQueue->pcReadFrom >= pxQueue->pcTail )
CMP.W 0x2(R9), 0x6(R9)
JNC ??xQueueReceive_5
// 466 {
// 467 pxQueue->pcReadFrom = pxQueue->pcHead;
MOV.W @R9, 0x6(R9)
// 468 }
// 469 --( pxQueue->uxMessagesWaiting );
??xQueueReceive_5:
MOV.W R9, R15
ADD.W #0xffff, 0x1c(R15)
// 470 memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
PUSH.W 0x20(R9)
CFI CFA SP+12
MOV.W 0x6(R9), R14
MOV.W R10, R12
CALL #memcpy
// 471
// 472 /* Increment the lock count so prvUnlockQueue knows to check for
// 473 tasks waiting for space to become available on the queue. */
// 474 ++( pxQueue->xRxLock );
MOV.W R9, R15
ADD.W #0x1, 0x22(R15)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -