freertos: use xTaskQueueMutex to protect tick count

Having two different spinlocks is problematic due to possibly
different order in which the locks will be taken. Changing the order
would require significant restructuring of kernel code which is
undesirable.

An additional place where taking xTickCountMutex was needed was in
vApplicationSleep function. Not taking xTickCountMutex resulted in
other CPU sometimes possibly advancing tick count while light sleep
entry/exit was happening. Taking xTickCountMutex in addition to
xTaskQueueMutex has shown a problem that in different code paths,
these two spinlocks could be taken in different order, leading to
(unlikely, but possible) deadlocks.
This commit is contained in:
Ivan Grokhotkov 2018-10-12 14:18:49 +08:00 committed by bot
parent 96c2b34eb9
commit 3b3242cbae

View file

@ -302,10 +302,8 @@ when the scheduler is unsuspended. The pending ready list itself can only be
accessed from a critical section. */
PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
/* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
/* We use just one spinlock for all the critical sections. */
PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
#if ( configGENERATE_RUN_TIME_STATS == 1 )
@ -1346,9 +1344,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
{
/* Minor optimisation. The tick count cannot change in this
block. */
// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
const TickType_t xConstTickCount = xTickCount;
// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
/* Generate the tick time at which the task wants to wake. */
xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
@ -1455,9 +1451,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
/* Calculate the time to wake - this may overflow but this is
not a problem. */
// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
xTimeToWake = xTickCount + xTicksToDelay;
// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
/* We must remove ourselves from the ready list before adding
ourselves to the blocked list as the same list item is used for
@ -2203,9 +2197,7 @@ void vTaskSuspendAll( void )
}
else
{
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
xReturn = xNextTaskUnblockTime - xTickCount;
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
}
taskEXIT_CRITICAL(&xTaskQueueMutex);
@ -2311,31 +2303,13 @@ BaseType_t xAlreadyYielded = pdFALSE;
TickType_t xTaskGetTickCount( void )
{
TickType_t xTicks;
/* Critical section required if running on a 16 bit processor. */
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
{
xTicks = xTickCount;
}
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
return xTicks;
return xTickCount;
}
/*-----------------------------------------------------------*/
TickType_t xTaskGetTickCountFromISR( void )
{
TickType_t xReturn;
taskENTER_CRITICAL_ISR(&xTickCountMutex);
{
xReturn = xTickCount;
// vPortCPUReleaseMutex( &xTickCountMutex );
}
taskEXIT_CRITICAL_ISR(&xTickCountMutex);
return xReturn;
return xTickCount;
}
/*-----------------------------------------------------------*/
@ -2470,10 +2444,10 @@ implementations require configUSE_TICKLESS_IDLE to be set to a value other than
/* Correct the tick count value after a period during which the tick
was suppressed. Note this does *not* call the tick hook function for
each stepped tick. */
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
portENTER_CRITICAL( &xTaskQueueMutex );
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
xTickCount += xTicksToJump;
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
portEXIT_CRITICAL( &xTaskQueueMutex );
traceINCREASE_TICK_COUNT( xTicksToJump );
}
@ -2515,14 +2489,10 @@ BaseType_t xSwitchRequired = pdFALSE;
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
{
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
taskENTER_CRITICAL_ISR( &xTaskQueueMutex );
/* Increment the RTOS tick, switching the delayed and overflowed
delayed lists if it wraps to 0. */
++xTickCount;
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
//The other CPU may decide to mess with the task queues, so this needs a mux.
taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
{
/* Minor optimisation. The tick count cannot change in this
block. */
@ -3291,7 +3261,7 @@ BaseType_t xReturn;
configASSERT( pxTimeOut );
configASSERT( pxTicksToWait );
taskENTER_CRITICAL(&xTickCountMutex);
taskENTER_CRITICAL(&xTaskQueueMutex);
{
/* Minor optimisation. The tick count cannot change in this block. */
const TickType_t xConstTickCount = xTickCount;
@ -3327,7 +3297,7 @@ BaseType_t xReturn;
xReturn = pdTRUE;
}
}
taskEXIT_CRITICAL(&xTickCountMutex);
taskEXIT_CRITICAL(&xTaskQueueMutex);
return xReturn;
}
@ -4084,7 +4054,7 @@ TCB_t *pxTCB;
{
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
taskENTER_CRITICAL(&xTickCountMutex);
taskENTER_CRITICAL(&xTaskQueueMutex);
/* If the mutex was given back by an interrupt while the queue was
locked then the mutex holder might now be NULL. */
if( pxMutexHolder != NULL )
@ -4141,7 +4111,7 @@ TCB_t *pxTCB;
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL(&xTickCountMutex);
taskEXIT_CRITICAL(&xTaskQueueMutex);
}
@ -4154,7 +4124,7 @@ TCB_t *pxTCB;
{
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
BaseType_t xReturn = pdFALSE;
taskENTER_CRITICAL(&xTickCountMutex);
taskENTER_CRITICAL(&xTaskQueueMutex);
if( pxMutexHolder != NULL )
{
@ -4218,7 +4188,7 @@ TCB_t *pxTCB;
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL(&xTickCountMutex);
taskEXIT_CRITICAL(&xTaskQueueMutex);
return xReturn;
}