📄 queue.s43
字号:
CFI R9 Frame(CFA, -10)
CFI CFA SP+10
PUSH.W R6
CFI R6 Frame(CFA, -12)
CFI CFA SP+12
MOV.W R12, R10
MOV.W R14, R11
// 184 xQUEUE *pxNewQueue;
// 185 size_t xQueueSizeInBytes;
// 186
// 187 /* Allocate the new queue structure. */
// 188 if( uxQueueLength > ( unsigned portBASE_TYPE ) 0 )
CMP.W #0x0, R10
JEQ ??xQueueCreate_0
// 189 {
// 190 pxNewQueue = ( xQUEUE * ) pvPortMalloc( sizeof( xQUEUE ) );
MOV.W #0x26, R12
CALL #pvPortMalloc
MOV.W R12, R8
// 191 if( pxNewQueue != NULL )
CMP.W #0x0, R8
JEQ ??xQueueCreate_0
// 192 {
// 193 /* Create the list of pointers to queue items. The queue is one byte
// 194 longer than asked for to make wrap checking easier/faster. */
// 195 xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1;
MOV.W R10, R12
MOV.W R11, R14
CALL #?Mul16
ADD.W #0x1, R12
MOV.W R12, R9
// 196
// 197 pxNewQueue->pcHead = ( signed portCHAR * ) pvPortMalloc( xQueueSizeInBytes );
MOV.W R9, R12
CALL #pvPortMalloc
MOV.W R12, 0(R8)
// 198 if( pxNewQueue->pcHead != NULL )
CMP.W #0x0, 0(R8)
JEQ ??xQueueCreate_1
// 199 {
// 200 /* Initialise the queue members as described above where the
// 201 queue type is defined. */
// 202 pxNewQueue->pcTail = pxNewQueue->pcHead + ( uxQueueLength * uxItemSize );
MOV.W @R8, R6
MOV.W R10, R12
MOV.W R11, R14
CALL #?Mul16
ADD.W R12, R6
MOV.W R6, 0x2(R8)
// 203 pxNewQueue->uxMessagesWaiting = 0;
MOV.W #0x0, 0x1c(R8)
// 204 pxNewQueue->pcWriteTo = pxNewQueue->pcHead;
MOV.W @R8, 0x4(R8)
// 205 pxNewQueue->pcReadFrom = pxNewQueue->pcHead + ( ( uxQueueLength - 1 ) * uxItemSize );
MOV.W @R8, R6
MOV.W R10, R12
ADD.W #0xffff, R12
MOV.W R11, R14
CALL #?Mul16
ADD.W R12, R6
MOV.W R6, 0x6(R8)
// 206 pxNewQueue->uxLength = uxQueueLength;
MOV.W R10, 0x1e(R8)
// 207 pxNewQueue->uxItemSize = uxItemSize;
MOV.W R11, 0x20(R8)
// 208 pxNewQueue->xRxLock = queueUNLOCKED;
MOV.W #0xffff, 0x22(R8)
// 209 pxNewQueue->xTxLock = queueUNLOCKED;
MOV.W #0xffff, 0x24(R8)
// 210
// 211 /* Likewise ensure the event queues start with the correct state. */
// 212 vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
MOV.W R8, R12
ADD.W #0x8, R12
CALL #vListInitialise
// 213 vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
MOV.W R8, R12
ADD.W #0x12, R12
CALL #vListInitialise
// 214
// 215 return pxNewQueue;
MOV.W R8, R12
JMP ??xQueueCreate_2
// 216 }
// 217 else
// 218 {
// 219 vPortFree( pxNewQueue );
??xQueueCreate_1:
MOV.W R8, R12
CALL #vPortFree
// 220 }
// 221 }
// 222 }
// 223
// 224 /* Will only reach here if we could not allocate enough memory or no memory
// 225 was required. */
// 226 return NULL;
??xQueueCreate_0:
MOV.W #0x0, R12
??xQueueCreate_2:
BR #?Epilogue5
CFI EndBlock cfiBlock0
// 227 }
// 228 /*-----------------------------------------------------------*/
// 229
RSEG CODE:CODE:REORDER:NOROOT(1)
// 230 signed portBASE_TYPE xQueueSend( xQueueHandle pxQueue, const void *pvItemToQueue, portTickType xTicksToWait )
xQueueSend:
CFI Block cfiBlock1 Using cfiCommon0
CFI Function xQueueSend
// 231 {
FUNCALL xQueueSend, vTaskSuspendAll
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, prvIsQueueFull
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, vTaskPlaceOnEventList
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, prvUnlockQueue
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, xTaskResumeAll
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, vPortYield
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, vTaskSuspendAll
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, memcpy
LOCFRAME CSTACK, 12, STACK
FUNCALL xQueueSend, prvUnlockQueue
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, xTaskResumeAll
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, vPortYield
LOCFRAME CSTACK, 10, STACK
FUNCALL xQueueSend, xTaskResumeAll
LOCFRAME CSTACK, 10, STACK
PUSH.W R10
CFI R10 Frame(CFA, -4)
CFI CFA SP+4
PUSH.W R11
CFI R11 Frame(CFA, -6)
CFI CFA SP+6
PUSH.W R8
CFI R8 Frame(CFA, -8)
CFI CFA SP+8
PUSH.W R9
CFI R9 Frame(CFA, -10)
CFI CFA SP+10
MOV.W R12, R9
MOV.W R14, R10
MOV.W 0xa(SP), R11
// 232 signed portBASE_TYPE xReturn;
// 233
// 234 /* Make sure other tasks do not access the queue. */
// 235 vTaskSuspendAll();
CALL #vTaskSuspendAll
// 236
// 237 /* It is important that this is the only thread/ISR that modifies the
// 238 ready or delayed lists until xTaskResumeAll() is called. Places where
// 239 the ready/delayed lists are modified include:
// 240
// 241 + vTaskDelay() - Nothing can call vTaskDelay as the scheduler is
// 242 suspended, vTaskDelay() cannot be called from an ISR.
// 243 + vTaskPrioritySet() - Has a critical section around the access.
// 244 + vTaskSwitchContext() - This will not get executed while the scheduler
// 245 is suspended.
// 246 + prvCheckDelayedTasks() - This will not get executed while the
// 247 scheduler is suspended.
// 248 + xTaskCreate() - Has a critical section around the access.
// 249 + vTaskResume() - Has a critical section around the access.
// 250 + xTaskResumeAll() - Has a critical section around the access.
// 251 + xTaskRemoveFromEventList - Checks to see if the scheduler is
// 252 suspended. If so then the TCB being removed from the event is
// 253 removed from the event and added to the xPendingReadyList.
// 254 */
// 255
// 256 /* Make sure interrupts do not access the queue event list. */
// 257 prvLockQueue( pxQueue );
DINT
NOP
ADD.W #0x1, &usCriticalNesting
MOV.W R9, R15
ADD.W #0x1, 0x22(R15)
MOV.W R9, R15
ADD.W #0x1, 0x24(R15)
CMP.W #0x0, &usCriticalNesting
JEQ ??xQueueSend_0
ADD.W #0xffff, &usCriticalNesting
CMP.W #0x0, &usCriticalNesting
JNE ??xQueueSend_0
EINT
// 258
// 259 /* It is important that interrupts to not access the event list of the
// 260 queue being modified here. Places where the event list is modified
// 261 include:
// 262
// 263 + xQueueSendFromISR(). This checks the lock on the queue to see if
// 264 it has access. If the queue is locked then the Tx lock count is
// 265 incremented to signify that a task waiting for data can be made ready
// 266 once the queue lock is removed. If the queue is not locked then
// 267 a task can be moved from the event list, but will not be removed
// 268 from the delayed list or placed in the ready list until the scheduler
// 269 is unlocked.
// 270
// 271 + xQueueReceiveFromISR(). As per xQueueSendFromISR().
// 272 */
// 273
// 274 /* If the queue is already full we may have to block. */
// 275 if( prvIsQueueFull( pxQueue ) )
??xQueueSend_0:
MOV.W R9, R12
CALL #prvIsQueueFull
CMP.W #0x0, R12
JEQ ??xQueueSend_1
// 276 {
// 277 /* The queue is full - do we want to block or just leave without
// 278 posting? */
// 279 if( xTicksToWait > ( portTickType ) 0 )
CMP.W #0x0, R11
JEQ ??xQueueSend_1
// 280 {
// 281 /* We are going to place ourselves on the xTasksWaitingToSend event
// 282 list, and will get woken should the delay expire, or space become
// 283 available on the queue.
// 284
// 285 As detailed above we do not require mutual exclusion on the event
// 286 list as nothing else can modify it or the ready lists while we
// 287 have the scheduler suspended and queue locked.
// 288
// 289 It is possible that an ISR has removed data from the queue since we
// 290 checked if any was available. If this is the case then the data
// 291 will have been copied from the queue, and the queue variables
// 292 updated, but the event list will not yet have been checked to see if
// 293 anything is waiting as the queue is locked. */
// 294 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
MOV.W R11, R14
MOV.W R9, R12
ADD.W #0x8, R12
CALL #vTaskPlaceOnEventList
// 295
// 296 /* Force a context switch now as we are blocked. We can do
// 297 this from within a critical section as the task we are
// 298 switching to has its own context. When we return here (i.e. we
// 299 unblock) we will leave the critical section as normal.
// 300
// 301 It is possible that an ISR has caused an event on an unrelated and
// 302 unlocked queue. If this was the case then the event list for that
// 303 queue will have been updated but the ready lists left unchanged -
// 304 instead the readied task will have been added to the pending ready
// 305 list. */
// 306 taskENTER_CRITICAL();
DINT
NOP
ADD.W #0x1, &usCriticalNesting
// 307 {
// 308 /* We can safely unlock the queue and scheduler here as
// 309 interrupts are disabled. We must not yield with anything
// 310 locked, but we can yield from within a critical section.
// 311
// 312 Tasks that have been placed on the pending ready list cannot
// 313 be tasks that are waiting for events on this queue. See
// 314 in comment xTaskRemoveFromEventList(). */
// 315 prvUnlockQueue( pxQueue );
MOV.W R9, R12
CALL #prvUnlockQueue
// 316
// 317 /* Resuming the scheduler may cause a yield. If so then there
// 318 is no point yielding again here. */
// 319 if( !xTaskResumeAll() )
CALL #xTaskResumeAll
CMP.W #0x0, R12
JNE ??xQueueSend_2
// 320 {
// 321 taskYIELD();
CALL #vPortYield
// 322 }
// 323
// 324 /* Before leaving the critical section we have to ensure
// 325 exclusive access again. */
// 326 vTaskSuspendAll();
??xQueueSend_2:
CALL #vTaskSuspendAll
// 327 prvLockQueue( pxQueue );
DINT
NOP
ADD.W #0x1, &usCriticalNesting
MOV.W R9, R15
ADD.W #0x1, 0x22(R15)
MOV.W R9, R15
ADD.W #0x1, 0x24(R15)
CMP.W #0x0, &usCriticalNesting
JEQ ??xQueueSend_3
ADD.W #0xffff, &usCriticalNesting
CMP.W #0x0, &usCriticalNesting
JNE ??xQueueSend_3
EINT
// 328 }
// 329 taskEXIT_CRITICAL();
??xQueueSend_3:
CMP.W #0x0, &usCriticalNesting
JEQ ??xQueueSend_1
ADD.W #0xffff, &usCriticalNesting
CMP.W #0x0, &usCriticalNesting
JNE ??xQueueSend_1
EINT
// 330 }
// 331 }
// 332
// 333 /* When we are here it is possible that we unblocked as space became
// 334 available on the queue. It is also possible that an ISR posted to the
// 335 queue since we left the critical section, so it may be that again there
// 336 is no space. This would only happen if a task and ISR post onto the
// 337 same queue. */
// 338 taskENTER_CRITICAL();
??xQueueSend_1:
DINT
NOP
ADD.W #0x1, &usCriticalNesting
// 339 {
// 340 if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
CMP.W 0x1e(R9), 0x1c(R9)
JC ??xQueueSend_4
// 341 {
// 342 /* There is room in the queue, copy the data into the queue. */
// 343 prvCopyQueueData( pxQueue, pvItemToQueue );
PUSH.W 0x20(R9)
CFI CFA SP+12
MOV.W R10, R14
MOV.W 0x4(R9), R12
CALL #memcpy
MOV.W R9, R15
ADD.W #0x1, 0x1c(R15)
MOV.W R9, R15
ADD.W 0x20(R9), 0x4(R15)
ADD.W #0x2, SP
CFI CFA SP+10
CMP.W 0x2(R9), 0x4(R9)
JNC ??xQueueSend_5
MOV.W @R9, 0x4(R9)
// 344 xReturn = pdPASS;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -