Merge branch 'bugfix/freertos_idle_tick_count_v3.1' into 'release/v3.1'
freertos: use xTaskQueueMutex to protect tick count (backport v3.1) See merge request idf/esp-idf!3728
This commit is contained in:
commit
f00c6d1e84
1 changed files with 12 additions and 42 deletions
|
@ -302,10 +302,8 @@ when the scheduler is unsuspended. The pending ready list itself can only be
|
||||||
accessed from a critical section. */
|
accessed from a critical section. */
|
||||||
PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
|
PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
|
||||||
|
|
||||||
/* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
|
/* We use just one spinlock for all the critical sections. */
|
||||||
that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
|
|
||||||
PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
|
PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
|
||||||
PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
|
|
||||||
|
|
||||||
#if ( configGENERATE_RUN_TIME_STATS == 1 )
|
#if ( configGENERATE_RUN_TIME_STATS == 1 )
|
||||||
|
|
||||||
|
@ -1347,9 +1345,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||||
{
|
{
|
||||||
/* Minor optimisation. The tick count cannot change in this
|
/* Minor optimisation. The tick count cannot change in this
|
||||||
block. */
|
block. */
|
||||||
// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
|
||||||
const TickType_t xConstTickCount = xTickCount;
|
const TickType_t xConstTickCount = xTickCount;
|
||||||
// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
|
||||||
|
|
||||||
/* Generate the tick time at which the task wants to wake. */
|
/* Generate the tick time at which the task wants to wake. */
|
||||||
xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
|
xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
|
||||||
|
@ -1456,9 +1452,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||||
|
|
||||||
/* Calculate the time to wake - this may overflow but this is
|
/* Calculate the time to wake - this may overflow but this is
|
||||||
not a problem. */
|
not a problem. */
|
||||||
// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
|
||||||
xTimeToWake = xTickCount + xTicksToDelay;
|
xTimeToWake = xTickCount + xTicksToDelay;
|
||||||
// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
|
||||||
|
|
||||||
/* We must remove ourselves from the ready list before adding
|
/* We must remove ourselves from the ready list before adding
|
||||||
ourselves to the blocked list as the same list item is used for
|
ourselves to the blocked list as the same list item is used for
|
||||||
|
@ -2198,9 +2192,7 @@ void vTaskSuspendAll( void )
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
|
||||||
xReturn = xNextTaskUnblockTime - xTickCount;
|
xReturn = xNextTaskUnblockTime - xTickCount;
|
||||||
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||||
|
|
||||||
|
@ -2306,31 +2298,13 @@ BaseType_t xAlreadyYielded = pdFALSE;
|
||||||
|
|
||||||
TickType_t xTaskGetTickCount( void )
|
TickType_t xTaskGetTickCount( void )
|
||||||
{
|
{
|
||||||
TickType_t xTicks;
|
return xTickCount;
|
||||||
|
|
||||||
/* Critical section required if running on a 16 bit processor. */
|
|
||||||
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
|
||||||
{
|
|
||||||
xTicks = xTickCount;
|
|
||||||
}
|
|
||||||
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
|
||||||
|
|
||||||
return xTicks;
|
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
TickType_t xTaskGetTickCountFromISR( void )
|
TickType_t xTaskGetTickCountFromISR( void )
|
||||||
{
|
{
|
||||||
TickType_t xReturn;
|
return xTickCount;
|
||||||
|
|
||||||
taskENTER_CRITICAL_ISR(&xTickCountMutex);
|
|
||||||
{
|
|
||||||
xReturn = xTickCount;
|
|
||||||
// vPortCPUReleaseMutex( &xTickCountMutex );
|
|
||||||
}
|
|
||||||
taskEXIT_CRITICAL_ISR(&xTickCountMutex);
|
|
||||||
|
|
||||||
return xReturn;
|
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
@ -2465,10 +2439,10 @@ implementations require configUSE_TICKLESS_IDLE to be set to a value other than
|
||||||
/* Correct the tick count value after a period during which the tick
|
/* Correct the tick count value after a period during which the tick
|
||||||
was suppressed. Note this does *not* call the tick hook function for
|
was suppressed. Note this does *not* call the tick hook function for
|
||||||
each stepped tick. */
|
each stepped tick. */
|
||||||
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
portENTER_CRITICAL( &xTaskQueueMutex );
|
||||||
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
|
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
|
||||||
xTickCount += xTicksToJump;
|
xTickCount += xTicksToJump;
|
||||||
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
portEXIT_CRITICAL( &xTaskQueueMutex );
|
||||||
traceINCREASE_TICK_COUNT( xTicksToJump );
|
traceINCREASE_TICK_COUNT( xTicksToJump );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2508,14 +2482,10 @@ BaseType_t xSwitchRequired = pdFALSE;
|
||||||
|
|
||||||
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
|
||||||
{
|
{
|
||||||
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
taskENTER_CRITICAL_ISR( &xTaskQueueMutex );
|
||||||
/* Increment the RTOS tick, switching the delayed and overflowed
|
/* Increment the RTOS tick, switching the delayed and overflowed
|
||||||
delayed lists if it wraps to 0. */
|
delayed lists if it wraps to 0. */
|
||||||
++xTickCount;
|
++xTickCount;
|
||||||
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
|
||||||
|
|
||||||
//The other CPU may decide to mess with the task queues, so this needs a mux.
|
|
||||||
taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
|
|
||||||
{
|
{
|
||||||
/* Minor optimisation. The tick count cannot change in this
|
/* Minor optimisation. The tick count cannot change in this
|
||||||
block. */
|
block. */
|
||||||
|
@ -3280,7 +3250,7 @@ BaseType_t xReturn;
|
||||||
configASSERT( pxTimeOut );
|
configASSERT( pxTimeOut );
|
||||||
configASSERT( pxTicksToWait );
|
configASSERT( pxTicksToWait );
|
||||||
|
|
||||||
taskENTER_CRITICAL(&xTickCountMutex);
|
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||||
{
|
{
|
||||||
/* Minor optimisation. The tick count cannot change in this block. */
|
/* Minor optimisation. The tick count cannot change in this block. */
|
||||||
const TickType_t xConstTickCount = xTickCount;
|
const TickType_t xConstTickCount = xTickCount;
|
||||||
|
@ -3316,7 +3286,7 @@ BaseType_t xReturn;
|
||||||
xReturn = pdTRUE;
|
xReturn = pdTRUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL(&xTickCountMutex);
|
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
|
@ -4077,7 +4047,7 @@ TCB_t *pxTCB;
|
||||||
{
|
{
|
||||||
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
|
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
|
||||||
|
|
||||||
taskENTER_CRITICAL(&xTickCountMutex);
|
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||||
/* If the mutex was given back by an interrupt while the queue was
|
/* If the mutex was given back by an interrupt while the queue was
|
||||||
locked then the mutex holder might now be NULL. */
|
locked then the mutex holder might now be NULL. */
|
||||||
if( pxMutexHolder != NULL )
|
if( pxMutexHolder != NULL )
|
||||||
|
@ -4134,7 +4104,7 @@ TCB_t *pxTCB;
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL(&xTickCountMutex);
|
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4147,7 +4117,7 @@ TCB_t *pxTCB;
|
||||||
{
|
{
|
||||||
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
|
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
|
||||||
BaseType_t xReturn = pdFALSE;
|
BaseType_t xReturn = pdFALSE;
|
||||||
taskENTER_CRITICAL(&xTickCountMutex);
|
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||||
|
|
||||||
if( pxMutexHolder != NULL )
|
if( pxMutexHolder != NULL )
|
||||||
{
|
{
|
||||||
|
@ -4211,7 +4181,7 @@ TCB_t *pxTCB;
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL(&xTickCountMutex);
|
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue