diff --git a/event_groups.c b/event_groups.c index ae8e20be7d1..d812fe41d12 100644 --- a/event_groups.c +++ b/event_groups.c @@ -71,6 +71,8 @@ typedef struct EventGroupDef_t #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ #endif + + portMUX_TYPE xEventGroupLock; } EventGroup_t; /*-----------------------------------------------------------*/ @@ -125,6 +127,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + /* Initialize the event group's spinlock. */ + portMUX_INITIALIZE( &pxEventBits->xEventGroupLock ); + traceEVENT_GROUP_CREATE( pxEventBits ); } else @@ -176,6 +181,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, } #endif /* configSUPPORT_STATIC_ALLOCATION */ + /* Initialize the event group's spinlock. */ + portMUX_INITIALIZE( &pxEventBits->xEventGroupLock ); + traceEVENT_GROUP_CREATE( pxEventBits ); } else @@ -272,7 +280,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { /* The task timed out, just return the current event bit value. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); { uxReturn = pxEventBits->uxEventBits; @@ -289,7 +297,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); xTimeoutOccurred = pdTRUE; } @@ -423,7 +431,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); { /* The task timed out, just return the current event bit value. */ uxReturn = pxEventBits->uxEventBits; @@ -448,7 +456,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, xTimeoutOccurred = pdTRUE; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); } else { @@ -479,7 +487,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, configASSERT( xEventGroup ); configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); { traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); @@ -490,7 +498,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, /* Clear the bits. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); return uxReturn; } @@ -546,6 +554,12 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, pxList = &( pxEventBits->xTasksWaitingForBits ); pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ vTaskSuspendAll(); + #if ( configNUM_CORES > 1 ) + + /* We are about to traverse a task list which is a kernel data structure. + * Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */ + vTaskTakeKernelLock(); + #endif /* configNUM_CORES > 1 */ { traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); @@ -617,6 +631,10 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, * bit was set in the control word. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + vTaskReleaseKernelLock(); + #endif /* configNUM_CORES > 1 */ ( void ) xTaskResumeAll(); return pxEventBits->uxEventBits; @@ -629,6 +647,12 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); vTaskSuspendAll(); + #if ( configNUM_CORES > 1 ) + + /* We are about to traverse a task list which is a kernel data structure. + * Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */ + vTaskTakeKernelLock(); + #endif /* configNUM_CORES > 1 */ { traceEVENT_GROUP_DELETE( xEventGroup ); @@ -661,6 +685,10 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ } + #if ( configNUM_CORES > 1 ) + /* Release the previously taken kernel lock. */ + vTaskReleaseKernelLock(); + #endif /* configNUM_CORES > 1 */ ( void ) xTaskResumeAll(); } /*-----------------------------------------------------------*/ diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 60abd75b6ca..2af7ac48238 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -240,6 +240,10 @@ #define configUSE_TIMERS 0 #endif +#ifndef configCHECK_MUTEX_GIVEN_BY_OWNER + #define configCHECK_MUTEX_GIVEN_BY_OWNER 0 +#endif + #ifndef configUSE_COUNTING_SEMAPHORES #define configUSE_COUNTING_SEMAPHORES 0 #endif @@ -1300,6 +1304,7 @@ typedef struct xSTATIC_QUEUE UBaseType_t uxDummy8; uint8_t ucDummy9; #endif + portMUX_TYPE xDummyQueueLock; } StaticQueue_t; typedef StaticQueue_t StaticSemaphore_t; @@ -1329,6 +1334,7 @@ typedef struct xSTATIC_EVENT_GROUP #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucDummy4; #endif + portMUX_TYPE xDummyEventGroupLock; } StaticEventGroup_t; /* @@ -1356,6 +1362,7 @@ typedef struct xSTATIC_TIMER UBaseType_t uxDummy7; #endif uint8_t ucDummy8; + portMUX_TYPE xDummyStreamBufferLock; } StaticTimer_t; /* diff --git a/include/task.h b/include/task.h index ba82f9f0dad..d35f595affd 100644 --- a/include/task.h +++ b/include/task.h @@ -203,8 +203,8 @@ typedef enum * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL * \ingroup SchedulerControl */ -#define taskENTER_CRITICAL() portENTER_CRITICAL() -#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() +#define taskENTER_CRITICAL( x ) portENTER_CRITICAL( x ) +#define taskENTER_CRITICAL_FROM_ISR( x ) portSET_INTERRUPT_MASK_FROM_ISR( x ) /** * task. h @@ -218,7 +218,7 @@ typedef enum * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL * \ingroup SchedulerControl */ -#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL( x ) portEXIT_CRITICAL( x ) #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) /** diff --git a/queue.c b/queue.c index 0233171d6d4..1bb0d626d6d 100644 --- a/queue.c +++ b/queue.c @@ -128,6 +128,8 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b UBaseType_t uxQueueNumber; uint8_t ucQueueType; #endif + + portMUX_TYPE xQueueLock; } xQUEUE; /* The old xQUEUE name is maintained above then typedefed to the new Queue_t @@ -247,7 +249,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * accessing the queue event lists. */ #define prvLockQueue( pxQueue ) \ - taskENTER_CRITICAL(); \ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); \ { \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ { \ @@ -258,7 +260,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ } \ } \ - taskEXIT_CRITICAL() + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ) /*-----------------------------------------------------------*/ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, @@ -268,7 +270,12 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, configASSERT( pxQueue ); - taskENTER_CRITICAL(); + if( xNewQueue == pdTRUE ) + { + portMUX_INITIALIZE( &( pxQueue->xQueueLock ) ); + } + + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; @@ -307,7 +314,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); /* A value is returned for calling semantic consistency with previous * versions. */ @@ -505,6 +512,9 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, /* In case this is a recursive mutex. */ pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; + /* Initialize the mutex's spinlock */ + portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) ); + traceCREATE_MUTEX( pxNewQueue ); /* Start with the semaphore in the expected state. */ @@ -570,7 +580,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * calling task is the mutex holder, but not a good way of determining the * identity of the mutex holder, as the holder may change between the * following critical section exiting and the function returning. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) { @@ -581,7 +591,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, pxReturn = NULL; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pxReturn; } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ @@ -790,12 +800,18 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, } #endif +#if ( configUSE_MUTEXES == 1 && configCHECK_MUTEX_GIVEN_BY_OWNER == 1) + configASSERT( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX + || pxQueue->u.xSemaphore.xMutexHolder == NULL + || pxQueue->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ); +#endif + /*lint -save -e904 This function relaxes the coding standard somewhat to * allow return statements within the function itself. This is done in the * interest of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { /* Is there room on the queue now? The running task must be the * highest priority task wanting to access the queue. If the head item @@ -901,7 +917,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, } #endif /* configUSE_QUEUE_SETS */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -910,7 +926,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, { /* The queue was full and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); /* Return to the original privilege level before exiting * the function. */ @@ -931,7 +947,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); /* Interrupts and other tasks can send to and receive from the queue * now the critical section has been exited. */ @@ -1346,7 +1362,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, * interest of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1378,7 +1394,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1387,7 +1403,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, { /* The queue was empty and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); traceQUEUE_RECEIVE_FAILED( pxQueue ); return errQUEUE_EMPTY; } @@ -1405,7 +1421,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); /* Interrupts and other tasks can send to and receive from the queue * now the critical section has been exited. */ @@ -1492,7 +1508,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, * of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { /* Semaphores are queues with an item size of 0, and where the * number of messages in the queue is the semaphore's count value. */ @@ -1541,7 +1557,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1559,7 +1575,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, /* The semaphore count was 0 and no block time is specified * (or the block time has expired) so exit now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); traceQUEUE_RECEIVE_FAILED( pxQueue ); return errQUEUE_EMPTY; } @@ -1577,7 +1593,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); /* Interrupts and other tasks can give to and take from the semaphore * now the critical section has been exited. */ @@ -1600,11 +1616,11 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); } else { @@ -1652,7 +1668,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, * test the mutex type again to check it is actually a mutex. */ if( xInheritanceOccurred != pdFALSE ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { UBaseType_t uxHighestWaitingPriority; @@ -1664,7 +1680,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); } } #endif /* configUSE_MUTEXES */ @@ -1709,7 +1725,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, * interest of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1747,7 +1763,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1756,7 +1772,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, { /* The queue was empty and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); traceQUEUE_PEEK_FAILED( pxQueue ); return errQUEUE_EMPTY; } @@ -1775,7 +1791,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); /* Interrupts and other tasks can send to and receive from the queue * now that the critical section has been exited. */ @@ -1988,11 +2004,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) configASSERT( xQueue ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return uxReturn; } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ @@ -2005,11 +2021,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) configASSERT( pxQueue ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return uxReturn; } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ @@ -2239,7 +2255,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) * removed from the queue while the queue was locked. When a queue is * locked items can be added or removed, but the event lists cannot be * updated. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { int8_t cTxLock = pxQueue->cTxLock; @@ -2317,10 +2333,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) pxQueue->cTxLock = queueUNLOCKED; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); /* Do the same for the Rx lock. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { int8_t cRxLock = pxQueue->cRxLock; @@ -2347,7 +2363,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) pxQueue->cRxLock = queueUNLOCKED; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); } /*-----------------------------------------------------------*/ @@ -2355,7 +2371,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) { BaseType_t xReturn; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) { @@ -2366,7 +2382,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) xReturn = pdFALSE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return xReturn; } @@ -2396,7 +2412,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) { BaseType_t xReturn; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) { @@ -2407,7 +2423,7 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) xReturn = pdFALSE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return xReturn; } @@ -2878,7 +2894,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) { BaseType_t xReturn; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) { @@ -2897,7 +2913,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) xReturn = pdPASS; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return xReturn; } @@ -2927,12 +2943,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } else { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { /* The queue is no longer contained in the set. */ pxQueueOrSemaphore->pxQueueSetContainer = NULL; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); xReturn = pdPASS; } diff --git a/stream_buffer.c b/stream_buffer.c index 47c8a9a652b..2ddcbaad9db 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -151,6 +151,8 @@ typedef struct StreamBufferDef_t /*lint !e9058 Style convention #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */ #endif + + portMUX_TYPE xStreamBufferLock; } StreamBuffer_t; /* @@ -276,6 +278,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, xTriggerLevelBytes, ucFlags ); + /* Initialize the stream buffer's spinlock separately, as + * prvInitialiseNewStreamBuffer() is also called from + * xStreamBufferReset(). */ + portMUX_INITIALIZE( &( ( ( StreamBuffer_t * ) pucAllocatedMemory )->xStreamBufferLock ) ); + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); } else @@ -351,6 +358,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, * again. */ pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; + /* Initialize the stream buffer's spinlock separately, as + * prvInitialiseNewStreamBuffer() is also called from + * xStreamBufferReset(). */ + portMUX_INITIALIZE( &( pxStreamBuffer->xStreamBufferLock ) ); + traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ); xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */ @@ -420,7 +432,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) #endif /* Can only reset a message buffer if there are no tasks blocked on it. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); { if( pxStreamBuffer->xTaskWaitingToReceive == NULL ) { @@ -443,7 +455,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); return xReturn; } @@ -580,7 +592,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, { /* Wait until the required number of bytes are free in the message * buffer. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); { xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); @@ -595,11 +607,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, } else { - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); break; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); @@ -778,7 +790,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, { /* Checking if there is data and clearing the notification state must be * performed atomically. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); { xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); @@ -801,7 +813,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); if( xBytesAvailable <= xBytesToStoreMessageLength ) { diff --git a/tasks.c b/tasks.c index 842ef224e9b..4939bdefa31 100644 --- a/tasks.c +++ b/tasks.c @@ -339,6 +339,7 @@ PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Del PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */ PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ +PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED; #if ( INCLUDE_vTaskDelete == 1 ) @@ -1662,7 +1663,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { uxCurrentNumberOfTasks++; @@ -1729,7 +1730,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -1740,7 +1741,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t * pxTCB; TaskRunning_t xTaskRunningOnCore; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* If null is passed in here then it is the calling task that is * being deleted. */ @@ -1831,7 +1832,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* INCLUDE_vTaskDelete */ @@ -1980,13 +1981,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxTCB ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); pxDelayedList = pxDelayedTaskList; pxOverflowedDelayedList = pxOverflowDelayedTaskList; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) { @@ -2074,14 +2075,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t const * pxTCB; UBaseType_t uxReturn; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* If null is passed in here then it is the priority of the task * that called uxTaskPriorityGet() that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return uxReturn; } @@ -2152,7 +2153,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* If null is passed in here then it is the priority of the calling * task that is being changed. */ @@ -2289,7 +2290,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) ( void ) uxPriorityUsedOnEntry; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* INCLUDE_vTaskPrioritySet */ @@ -2304,7 +2305,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t * pxTCB; BaseType_t xCoreID; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { pxTCB = prvGetTCBFromHandle( xTask ); @@ -2323,7 +2324,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_CORE_AFFINITY */ @@ -2338,12 +2339,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t * pxTCB; UBaseType_t uxCoreAffinityMask; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { pxTCB = prvGetTCBFromHandle( xTask ); uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return uxCoreAffinityMask; } @@ -2359,13 +2360,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { TCB_t * pxTCB; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { pxTCB = prvGetTCBFromHandle( xTask ); pxTCB->xPreemptionDisable = pdTRUE; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ @@ -2378,7 +2379,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t * pxTCB; BaseType_t xCoreID; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { pxTCB = prvGetTCBFromHandle( xTask ); @@ -2393,7 +2394,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ @@ -2406,7 +2407,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t * pxTCB; TaskRunning_t xTaskRunningOnCore; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* If null is passed in here then it is the running task that is * being suspended. */ @@ -2481,11 +2482,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvYieldCore( xTaskRunningOnCore ); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } else { - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); configASSERT( pxTCB == pxCurrentTCBs[ xTaskRunningOnCore ] ); @@ -2516,7 +2517,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } else { - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } } /* taskEXIT_CRITICAL() - already exited in one of three cases above */ } @@ -2585,7 +2586,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * and check if it is actually suspended or not below. */ if( pxTCB != NULL ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -2608,7 +2609,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } else { @@ -3041,7 +3042,7 @@ BaseType_t xTaskResumeAll( void ) * removed task will have been added to the xPendingReadyList. Once the * scheduler has been resumed it is safe to move all the pending ready * tasks from this list into their appropriate ready list. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { BaseType_t xCoreID; @@ -3135,7 +3136,7 @@ BaseType_t xTaskResumeAll( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } else { @@ -3496,7 +3497,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * the event list too. Interrupts can touch the event list item, * even though the scheduler is suspended, so a critical section * is used. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { @@ -3512,7 +3513,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); /* Place the unblocked task into the appropriate ready list. */ prvAddTaskToReadyList( pxTCB ); @@ -3521,11 +3522,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { prvYieldForTask( pxTCB, pdFALSE ); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_PREEMPTION */ } @@ -3553,7 +3554,7 @@ BaseType_t xTaskIncrementTick( void ) BaseType_t xCoreYieldList[ configNUM_CORES ] = { pdFALSE }; #endif /* configUSE_PREEMPTION */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Called by the portable layer each time a tick interrupt occurs. * Increments the tick then checks to see if the new tick value will cause any @@ -3754,7 +3755,7 @@ BaseType_t xTaskIncrementTick( void ) #endif } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xSwitchRequired; } @@ -3780,11 +3781,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { xTCB->pxTaskTag = pxHookFunction; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_APPLICATION_TASK_TAG */ @@ -3802,11 +3803,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -4110,6 +4111,20 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } /*-----------------------------------------------------------*/ +#if ( configNUM_CORES > 1 ) + void vTaskTakeKernelLock( void ) + { + /* We call the tasks.c critical section macro to take xKernelLock */ + taskENTER_CRITICAL( &xKernelLock ); + } + + void vTaskReleaseKernelLock( void ) + { + /* We call the tasks.c critical section macro to release xKernelLock */ + taskEXIT_CRITICAL( &xKernelLock ); + } +#endif /* configNUM_CORES > 1 */ + void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) { @@ -4149,11 +4164,11 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, prvAddTaskToReadyList( pxUnblockedTCB ); #if ( configUSE_PREEMPTION == 1 ) - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { prvYieldForTask( pxUnblockedTCB, pdFALSE ); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); #endif } /*-----------------------------------------------------------*/ @@ -4161,12 +4176,12 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) { configASSERT( pxTimeOut ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -4186,7 +4201,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, configASSERT( pxTimeOut ); configASSERT( pxTicksToWait ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Minor optimisation. The tick count cannot change in this block. */ const TickType_t xConstTickCount = xTickCount; @@ -4237,7 +4252,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -4657,7 +4672,7 @@ static void prvCheckTasksWaitingTermination( void ) * being called too often in the idle task. */ while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Since we are SMP, multiple idles can be running simultaneously * and we need to check that other idles did not cleanup while we were @@ -4678,12 +4693,12 @@ static void prvCheckTasksWaitingTermination( void ) /* The TCB to be deleted still has not yet been switched out * by the scheduler, so we will just exit this loop early and * try again next time. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); break; } } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } } #endif /* INCLUDE_vTaskDelete */ @@ -5038,7 +5053,7 @@ static void prvResetNextTaskUnblockTime( void ) } else { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) { @@ -5049,7 +5064,7 @@ static void prvResetNextTaskUnblockTime( void ) xReturn = taskSCHEDULER_SUSPENDED; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } return xReturn; @@ -5751,7 +5766,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Only block if the notification count is not already non-zero. */ if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] == 0UL ) @@ -5780,9 +5795,9 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { traceTASK_NOTIFY_TAKE( uxIndexToWait ); ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ]; @@ -5805,7 +5820,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return ulReturn; } @@ -5825,7 +5840,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Only block if a notification is not already pending. */ if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) @@ -5859,9 +5874,9 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { traceTASK_NOTIFY_WAIT( uxIndexToWait ); @@ -5891,7 +5906,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -5915,7 +5930,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( xTaskToNotify ); pxTCB = xTaskToNotify; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( pulPreviousNotificationValue != NULL ) { @@ -6009,7 +6024,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -6245,7 +6260,7 @@ TickType_t uxTaskResetEventItemValue( void ) * its notification state cleared. */ pxTCB = prvGetTCBFromHandle( xTask ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED ) { @@ -6257,7 +6272,7 @@ TickType_t uxTaskResetEventItemValue( void ) xReturn = pdFAIL; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -6278,14 +6293,14 @@ TickType_t uxTaskResetEventItemValue( void ) * its notification state cleared. */ pxTCB = prvGetTCBFromHandle( xTask ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Return the notification as it was before the bits were cleared, * then clear the bit mask. */ ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ]; pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return ulReturn; } diff --git a/timers.c b/timers.c index 65455cc8da8..9878014ecbf 100644 --- a/timers.c +++ b/timers.c @@ -142,6 +142,7 @@ PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; + PRIVILEGED_DATA portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED; /*lint -restore */ /*-----------------------------------------------------------*/ @@ -483,7 +484,7 @@ Timer_t * pxTimer = xTimer; configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( uxAutoReload != pdFALSE ) { @@ -494,7 +495,7 @@ pxTimer->ucStatus &= ~tmrSTATUS_IS_AUTORELOAD; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); } /*-----------------------------------------------------------*/ @@ -504,7 +505,7 @@ UBaseType_t uxReturn; configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 ) { @@ -517,7 +518,7 @@ uxReturn = ( UBaseType_t ) pdTRUE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); return uxReturn; } @@ -993,7 +994,7 @@ /* Check that the list from which active timers are referenced, and the * queue used to communicate with the timer service, have been * initialised. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( xTimerQueue == NULL ) { @@ -1035,7 +1036,7 @@ mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); } /*-----------------------------------------------------------*/ @@ -1047,7 +1048,7 @@ configASSERT( xTimer ); /* Is the timer in the list of active timers? */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 ) { @@ -1058,7 +1059,7 @@ xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); return xReturn; } /*lint !e818 Can't be pointer to const due to the typedef. */ @@ -1071,11 +1072,11 @@ configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { pvReturn = pxTimer->pvTimerID; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); return pvReturn; } @@ -1088,11 +1089,11 @@ configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { pxTimer->pvTimerID = pvNewID; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); } /*-----------------------------------------------------------*/