Merge branch 'bugfix/freertos_event_groups' into 'master'
FreeRTOS: Fix cross-core event group sync As above Also includes fixes which allowed removing some semi-hacky bits from the event group unit tests - specifically, higher priority tasks will always be started immediately even if they run on the opposite core. See merge request !535
This commit is contained in:
commit
ccbc6183c3
4 changed files with 120 additions and 79 deletions
|
@ -119,13 +119,10 @@ typedef struct xEventGroupDefinition
|
||||||
UBaseType_t uxEventGroupNumber;
|
UBaseType_t uxEventGroupNumber;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
portMUX_TYPE eventGroupMux;
|
||||||
} EventGroup_t;
|
} EventGroup_t;
|
||||||
|
|
||||||
|
|
||||||
/* Again: one mux for all events. Maybe this can be made more granular. ToDo: look into that. -JD */
|
|
||||||
static portMUX_TYPE xEventGroupMux = portMUX_INITIALIZER_UNLOCKED;
|
|
||||||
|
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -156,6 +153,8 @@ EventGroup_t *pxEventBits;
|
||||||
traceEVENT_GROUP_CREATE_FAILED();
|
traceEVENT_GROUP_CREATE_FAILED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vPortCPUInitializeMutex(&pxEventBits->eventGroupMux);
|
||||||
|
|
||||||
return ( EventGroupHandle_t ) pxEventBits;
|
return ( EventGroupHandle_t ) pxEventBits;
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
@ -176,6 +175,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
|
taskENTER_CRITICAL(&pxEventBits->eventGroupMux);
|
||||||
{
|
{
|
||||||
uxOriginalBitValue = pxEventBits->uxEventBits;
|
uxOriginalBitValue = pxEventBits->uxEventBits;
|
||||||
|
|
||||||
|
@ -217,6 +217,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
xAlreadyYielded = xTaskResumeAll();
|
xAlreadyYielded = xTaskResumeAll();
|
||||||
|
|
||||||
if( xTicksToWait != ( TickType_t ) 0 )
|
if( xTicksToWait != ( TickType_t ) 0 )
|
||||||
|
@ -239,7 +240,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
|
||||||
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
||||||
{
|
{
|
||||||
/* The task timed out, just return the current event bit value. */
|
/* The task timed out, just return the current event bit value. */
|
||||||
taskENTER_CRITICAL( &xEventGroupMux );
|
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
{
|
{
|
||||||
uxReturn = pxEventBits->uxEventBits;
|
uxReturn = pxEventBits->uxEventBits;
|
||||||
|
|
||||||
|
@ -256,7 +257,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL( &xEventGroupMux );
|
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
|
|
||||||
xTimeoutOccurred = pdTRUE;
|
xTimeoutOccurred = pdTRUE;
|
||||||
}
|
}
|
||||||
|
@ -295,6 +296,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
|
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
{
|
{
|
||||||
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
||||||
|
|
||||||
|
@ -361,6 +363,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
|
||||||
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
xAlreadyYielded = xTaskResumeAll();
|
xAlreadyYielded = xTaskResumeAll();
|
||||||
|
|
||||||
if( xTicksToWait != ( TickType_t ) 0 )
|
if( xTicksToWait != ( TickType_t ) 0 )
|
||||||
|
@ -382,7 +385,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
|
||||||
|
|
||||||
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
||||||
{
|
{
|
||||||
taskENTER_CRITICAL( &xEventGroupMux );
|
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
{
|
{
|
||||||
/* The task timed out, just return the current event bit value. */
|
/* The task timed out, just return the current event bit value. */
|
||||||
uxReturn = pxEventBits->uxEventBits;
|
uxReturn = pxEventBits->uxEventBits;
|
||||||
|
@ -405,7 +408,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL( &xEventGroupMux );
|
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
|
|
||||||
/* Prevent compiler warnings when trace macros are not used. */
|
/* Prevent compiler warnings when trace macros are not used. */
|
||||||
xTimeoutOccurred = pdFALSE;
|
xTimeoutOccurred = pdFALSE;
|
||||||
|
@ -434,7 +437,7 @@ EventBits_t uxReturn;
|
||||||
configASSERT( xEventGroup );
|
configASSERT( xEventGroup );
|
||||||
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
||||||
|
|
||||||
taskENTER_CRITICAL( &xEventGroupMux );
|
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
{
|
{
|
||||||
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
|
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
|
||||||
|
|
||||||
|
@ -445,7 +448,7 @@ EventBits_t uxReturn;
|
||||||
/* Clear the bits. */
|
/* Clear the bits. */
|
||||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL( &xEventGroupMux );
|
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
|
|
||||||
return uxReturn;
|
return uxReturn;
|
||||||
}
|
}
|
||||||
|
@ -498,7 +501,9 @@ BaseType_t xMatchFound = pdFALSE;
|
||||||
|
|
||||||
pxList = &( pxEventBits->xTasksWaitingForBits );
|
pxList = &( pxEventBits->xTasksWaitingForBits );
|
||||||
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
||||||
|
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
|
taskENTER_CRITICAL(&pxEventBits->eventGroupMux);
|
||||||
{
|
{
|
||||||
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
||||||
|
|
||||||
|
@ -570,6 +575,7 @@ BaseType_t xMatchFound = pdFALSE;
|
||||||
bit was set in the control word. */
|
bit was set in the control word. */
|
||||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||||
}
|
}
|
||||||
|
taskEXIT_CRITICAL(&pxEventBits->eventGroupMux);
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
|
|
||||||
return pxEventBits->uxEventBits;
|
return pxEventBits->uxEventBits;
|
||||||
|
@ -578,10 +584,11 @@ BaseType_t xMatchFound = pdFALSE;
|
||||||
|
|
||||||
void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
||||||
{
|
{
|
||||||
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
|
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
|
||||||
const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
||||||
|
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
|
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
{
|
{
|
||||||
traceEVENT_GROUP_DELETE( xEventGroup );
|
traceEVENT_GROUP_DELETE( xEventGroup );
|
||||||
|
|
||||||
|
@ -593,6 +600,7 @@ const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
||||||
( void ) xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
( void ) xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
vPortFree( pxEventBits );
|
vPortFree( pxEventBits );
|
||||||
}
|
}
|
||||||
( void ) xTaskResumeAll();
|
( void ) xTaskResumeAll();
|
||||||
|
|
|
@ -1046,25 +1046,59 @@ UBaseType_t x;
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, const BaseType_t xCoreID )
|
static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, BaseType_t xCoreID )
|
||||||
{
|
{
|
||||||
TCB_t *curTCB;
|
TCB_t *curTCB, *tcb0, *tcb1;
|
||||||
BaseType_t i;
|
|
||||||
|
|
||||||
/* Ensure interrupts don't access the task lists while the lists are being
|
/* Ensure interrupts don't access the task lists while the lists are being
|
||||||
updated. */
|
updated. */
|
||||||
taskENTER_CRITICAL(&xTaskQueueMutex);
|
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||||
{
|
{
|
||||||
uxCurrentNumberOfTasks++;
|
uxCurrentNumberOfTasks++;
|
||||||
//If the task has no affinity and nothing is scheduled on this core, just throw it this core.
|
|
||||||
//If it has affinity, throw it on the core that needs it if nothing is already scheduled there.
|
// Determine which core this task starts on
|
||||||
BaseType_t xMyCore = xCoreID;
|
if ( xCoreID == tskNO_AFFINITY )
|
||||||
if ( xMyCore == tskNO_AFFINITY) xMyCore = xPortGetCoreID();
|
{
|
||||||
if( pxCurrentTCB[ xMyCore ] == NULL )
|
if ( portNUM_PROCESSORS == 1 )
|
||||||
|
{
|
||||||
|
xCoreID = 0;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// if the task has no affinity, put it on either core if nothing is currently scheduled there. Failing that,
|
||||||
|
// put it on the core where it will preempt the lowest priority running task. If neither of these are true,
|
||||||
|
// queue it on the currently running core.
|
||||||
|
tcb0 = pxCurrentTCB[0];
|
||||||
|
tcb1 = pxCurrentTCB[1];
|
||||||
|
if ( tcb0 == NULL )
|
||||||
|
{
|
||||||
|
xCoreID = 0;
|
||||||
|
}
|
||||||
|
else if ( tcb1 == NULL )
|
||||||
|
{
|
||||||
|
xCoreID = 1;
|
||||||
|
}
|
||||||
|
else if ( tcb0->uxPriority < pxNewTCB->uxPriority && tcb0->uxPriority < tcb1->uxPriority )
|
||||||
|
{
|
||||||
|
xCoreID = 0;
|
||||||
|
}
|
||||||
|
else if ( tcb1->uxPriority < pxNewTCB->uxPriority )
|
||||||
|
{
|
||||||
|
xCoreID = 1;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
xCoreID = xPortGetCoreID(); // Both CPU have higher priority tasks running on them, so this won't run yet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If nothing is running on this core, put the new task there now
|
||||||
|
if( pxCurrentTCB[ xCoreID ] == NULL )
|
||||||
{
|
{
|
||||||
/* There are no other tasks, or all the other tasks are in
|
/* There are no other tasks, or all the other tasks are in
|
||||||
the suspended state - make this the current task. */
|
the suspended state - make this the current task. */
|
||||||
pxCurrentTCB[ xMyCore ] = pxNewTCB;
|
pxCurrentTCB[ xCoreID ] = pxNewTCB;
|
||||||
|
|
||||||
if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
|
if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
|
||||||
{
|
{
|
||||||
|
@ -1090,19 +1124,11 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||||
so far. */
|
so far. */
|
||||||
if( xSchedulerRunning == pdFALSE )
|
if( xSchedulerRunning == pdFALSE )
|
||||||
{
|
{
|
||||||
/* Scheduler isn't running yet. We need to determine on which CPU to run this task. */
|
/* Scheduler isn't running yet. We need to determine on which CPU to run this task.
|
||||||
for ( i=0; i<portNUM_PROCESSORS; i++ )
|
Schedule now if either nothing is scheduled yet or we can replace a task of lower prio. */
|
||||||
|
if ( pxCurrentTCB[xCoreID] == NULL || pxCurrentTCB[xCoreID]->uxPriority <= pxNewTCB->uxPriority )
|
||||||
{
|
{
|
||||||
/* Can we schedule this task on core i? */
|
pxCurrentTCB[xCoreID] = pxNewTCB;
|
||||||
if (xCoreID == tskNO_AFFINITY || xCoreID == i)
|
|
||||||
{
|
|
||||||
/* Schedule if nothing is scheduled yet, or overwrite a task of lower prio. */
|
|
||||||
if ( pxCurrentTCB[i] == NULL || pxCurrentTCB[i]->uxPriority <= pxNewTCB->uxPriority )
|
|
||||||
{
|
|
||||||
pxCurrentTCB[i] = pxNewTCB;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -1130,37 +1156,27 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||||
|
|
||||||
if( xSchedulerRunning != pdFALSE )
|
if( xSchedulerRunning != pdFALSE )
|
||||||
{
|
{
|
||||||
taskENTER_CRITICAL(&xTaskQueueMutex);
|
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||||
curTCB = pxCurrentTCB[ xPortGetCoreID() ];
|
|
||||||
|
curTCB = pxCurrentTCB[ xCoreID ];
|
||||||
/* Scheduler is running. If the created task is of a higher priority than an executing task
|
/* Scheduler is running. If the created task is of a higher priority than an executing task
|
||||||
then it should run now.
|
then it should run now.
|
||||||
ToDo: This only works for the current core. If a task is scheduled on an other processor,
|
*/
|
||||||
the other processor will keep running the task it's working on, and only switch to the newer
|
if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority )
|
||||||
task on a timer interrupt. */
|
|
||||||
//No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
|
|
||||||
if( curTCB->uxPriority < pxNewTCB->uxPriority )
|
|
||||||
{
|
{
|
||||||
/* Scheduler is running. If the created task is of a higher priority than an executing task
|
if( xCoreID == xPortGetCoreID() )
|
||||||
then it should run now.
|
|
||||||
No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
|
|
||||||
*/
|
|
||||||
if( tskCAN_RUN_HERE( xCoreID ) && curTCB->uxPriority < pxNewTCB->uxPriority )
|
|
||||||
{
|
{
|
||||||
taskYIELD_IF_USING_PREEMPTION_MUX(&xTaskQueueMutex);
|
taskYIELD_IF_USING_PREEMPTION_MUX(&xTaskQueueMutex);
|
||||||
}
|
}
|
||||||
else if( xCoreID != xPortGetCoreID() ) {
|
else {
|
||||||
taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
|
taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
mtCOVERAGE_TEST_MARKER();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
|
@ -11,9 +11,10 @@
|
||||||
#define BIT_RESPONSE(TASK) (1 << (TASK+1))
|
#define BIT_RESPONSE(TASK) (1 << (TASK+1))
|
||||||
#define ALL_RESPONSE_BITS (((1 << NUM_TASKS) - 1) << 1)
|
#define ALL_RESPONSE_BITS (((1 << NUM_TASKS) - 1) << 1)
|
||||||
|
|
||||||
static const int NUM_TASKS = 4;
|
static const int NUM_TASKS = 8;
|
||||||
static const int COUNT = 4000;
|
static const int COUNT = 1000;
|
||||||
static EventGroupHandle_t eg;
|
static EventGroupHandle_t eg;
|
||||||
|
static SemaphoreHandle_t done_sem;
|
||||||
|
|
||||||
static void task_event_group_call_response(void *param)
|
static void task_event_group_call_response(void *param)
|
||||||
{
|
{
|
||||||
|
@ -24,47 +25,51 @@ static void task_event_group_call_response(void *param)
|
||||||
for (int i = 0; i < COUNT; i++) {
|
for (int i = 0; i < COUNT; i++) {
|
||||||
/* Wait until the common "call" bit is set, starts off all tasks
|
/* Wait until the common "call" bit is set, starts off all tasks
|
||||||
(clear on return) */
|
(clear on return) */
|
||||||
while (!xEventGroupWaitBits(eg, BIT_CALL, true, false, portMAX_DELAY)) {
|
TEST_ASSERT( xEventGroupWaitBits(eg, BIT_CALL, true, false, portMAX_DELAY) );
|
||||||
}
|
|
||||||
|
|
||||||
/* Set our individual "response" bit */
|
/* Set our individual "response" bit */
|
||||||
xEventGroupSetBits(eg, BIT_RESPONSE(task_num));
|
xEventGroupSetBits(eg, BIT_RESPONSE(task_num));
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("Task %d done\n", task_num);
|
printf("Task %d done\n", task_num);
|
||||||
|
xSemaphoreGive(done_sem);
|
||||||
/* Delay is due to not-yet-fixed bug with deleting tasks at same time */
|
|
||||||
vTaskDelay(100 / portTICK_PERIOD_MS);
|
|
||||||
vTaskDelete(NULL);
|
vTaskDelete(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("FreeRTOS Event Groups", "[freertos][ignore]")
|
TEST_CASE("FreeRTOS Event Groups", "[freertos]")
|
||||||
{
|
{
|
||||||
eg = xEventGroupCreate();
|
eg = xEventGroupCreate();
|
||||||
|
done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0);
|
||||||
|
|
||||||
/* Note: task_event_group_call_response all have higher priority than us, so will block together.
|
/* Note: task_event_group_call_response all have higher priority than this task, so on this core
|
||||||
|
they will always preempt this task.
|
||||||
|
|
||||||
This is important because we need to know they'll all have blocked on BIT_CALL each time we
|
This is important because we need to know all tasks have blocked on BIT_CALL each time we signal it,
|
||||||
signal it, or they get out of sync.
|
or they get out of sync.
|
||||||
*/
|
*/
|
||||||
for (int c = 0; c < NUM_TASKS; c++) {
|
for (int c = 0; c < NUM_TASKS; c++) {
|
||||||
xTaskCreatePinnedToCore(task_event_group_call_response, "tsk_call_resp", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS);
|
xTaskCreatePinnedToCore(task_event_group_call_response, "tsk_call_resp", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS);
|
||||||
}
|
}
|
||||||
/* Scheduler weirdness, if we don't sleep a few ticks here then the tasks on the other CPU aren't running yet... */
|
|
||||||
vTaskDelay(10);
|
/* Tasks all start instantly, but this task will resume running at the same time as the higher priority tasks on the
|
||||||
|
other processor may still be setting up, so give a tick for them to also block on BIT_CALL... */
|
||||||
|
vTaskDelay(1);
|
||||||
|
|
||||||
for (int i = 0; i < COUNT; i++) {
|
for (int i = 0; i < COUNT; i++) {
|
||||||
if (i % 100 == 0) {
|
|
||||||
//printf("Call %d\n", i);
|
|
||||||
}
|
|
||||||
/* signal all tasks with "CALL" bit... */
|
/* signal all tasks with "CALL" bit... */
|
||||||
xEventGroupSetBits(eg, BIT_CALL);
|
xEventGroupSetBits(eg, BIT_CALL);
|
||||||
|
|
||||||
while (xEventGroupWaitBits(eg, ALL_RESPONSE_BITS, true, true, portMAX_DELAY) != ALL_RESPONSE_BITS) {
|
TEST_ASSERT_EQUAL_HEX16(ALL_RESPONSE_BITS, xEventGroupWaitBits(eg, ALL_RESPONSE_BITS, true, true, 100 / portMAX_DELAY));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
/* Ensure all tasks cleaned up correctly */
|
||||||
|
for (int c = 0; c < NUM_TASKS; c++) {
|
||||||
|
TEST_ASSERT( xSemaphoreTake(done_sem, 100/portTICK_PERIOD_MS) );
|
||||||
|
}
|
||||||
|
|
||||||
|
vSemaphoreDelete(done_sem);
|
||||||
|
vEventGroupDelete(eg);
|
||||||
|
}
|
||||||
|
|
||||||
#define BIT_DONE(X) (1<<(NUM_TASKS+1+X))
|
#define BIT_DONE(X) (1<<(NUM_TASKS+1+X))
|
||||||
|
|
||||||
|
@ -82,24 +87,32 @@ static void task_test_sync(void *param)
|
||||||
}
|
}
|
||||||
int after_done = xEventGroupSetBits(eg, BIT_DONE(task_num));
|
int after_done = xEventGroupSetBits(eg, BIT_DONE(task_num));
|
||||||
|
|
||||||
printf("Done %d = %x\n", task_num, after_done);
|
printf("Done %d = 0x%08x\n", task_num, after_done);
|
||||||
|
|
||||||
/* Delay is due to not-yet-fixed bug with deleting tasks at same time */
|
xSemaphoreGive(done_sem);
|
||||||
vTaskDelay(100 / portTICK_PERIOD_MS);
|
|
||||||
vTaskDelete(NULL);
|
vTaskDelete(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("FreeRTOS Event Group Sync", "[freertos][ignore]")
|
TEST_CASE("FreeRTOS Event Group Sync", "[freertos]")
|
||||||
{
|
{
|
||||||
eg = xEventGroupCreate();
|
eg = xEventGroupCreate();
|
||||||
|
done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0);
|
||||||
|
|
||||||
for (int c = 0; c < NUM_TASKS; c++) {
|
for (int c = 0; c < NUM_TASKS; c++) {
|
||||||
xTaskCreatePinnedToCore(task_test_sync, "task_test_sync", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS);
|
xTaskCreatePinnedToCore(task_test_sync, "task_test_sync", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int c = 0; c < NUM_TASKS; c++) {
|
for (int c = 0; c < NUM_TASKS; c++) {
|
||||||
printf("Waiting on %d (%x)\n", c, BIT_DONE(c));
|
printf("Waiting on %d (0x%08x)\n", c, BIT_DONE(c));
|
||||||
xEventGroupWaitBits(eg, BIT_DONE(c), false, false, portMAX_DELAY);
|
TEST_ASSERT( xEventGroupWaitBits(eg, BIT_DONE(c), false, false, portMAX_DELAY) );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Ensure all tasks cleaned up correctly */
|
||||||
|
for (int c = 0; c < NUM_TASKS; c++) {
|
||||||
|
TEST_ASSERT( xSemaphoreTake(done_sem, 100/portTICK_PERIOD_MS) );
|
||||||
|
}
|
||||||
|
|
||||||
|
vSemaphoreDelete(done_sem);
|
||||||
|
vEventGroupDelete(eg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,13 +10,17 @@
|
||||||
static void task_delete_self(void *param)
|
static void task_delete_self(void *param)
|
||||||
{
|
{
|
||||||
printf("Task %p running on core %d. Deleting shortly...\n", xTaskGetCurrentTaskHandle(), xPortGetCoreID());
|
printf("Task %p running on core %d. Deleting shortly...\n", xTaskGetCurrentTaskHandle(), xPortGetCoreID());
|
||||||
|
vTaskDelay(5);
|
||||||
vTaskDelete(NULL);
|
vTaskDelete(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("FreeRTOS Delete Tasks", "[freertos][ignore]")
|
TEST_CASE("FreeRTOS Delete Tasks", "[freertos]")
|
||||||
{
|
{
|
||||||
|
uint32_t before_count = uxTaskGetNumberOfTasks();
|
||||||
|
|
||||||
xTaskCreatePinnedToCore(task_delete_self, "tsk_self_a", 4096, NULL, configMAX_PRIORITIES - 1, NULL, 0);
|
xTaskCreatePinnedToCore(task_delete_self, "tsk_self_a", 4096, NULL, configMAX_PRIORITIES - 1, NULL, 0);
|
||||||
xTaskCreatePinnedToCore(task_delete_self, "tsk_self_a", 4096, NULL, configMAX_PRIORITIES - 1, NULL, 0);
|
xTaskCreatePinnedToCore(task_delete_self, "tsk_self_a", 4096, NULL, configMAX_PRIORITIES - 1, NULL, 0);
|
||||||
|
TEST_ASSERT_EQUAL(before_count + 2, uxTaskGetNumberOfTasks());
|
||||||
vTaskDelay(200 / portTICK_PERIOD_MS);
|
vTaskDelay(200 / portTICK_PERIOD_MS);
|
||||||
printf("Done?\n");
|
TEST_ASSERT_EQUAL(before_count, uxTaskGetNumberOfTasks());
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue