Merge branch 'bugfix/xTaskIncrementTick_v3.2' into 'release/v3.2'

freertos: Fix xTaskIncrementTick for unwind the Tick for CPU1 (v3.2)

See merge request idf/esp-idf!5035
This commit is contained in:
Jiang Jiang Jian 2019-06-30 18:01:45 +08:00
commit c6c1d089cc
2 changed files with 202 additions and 40 deletions

View file

@ -2464,12 +2464,11 @@ BaseType_t xSwitchRequired = pdFALSE;
Increments the tick then checks to see if the new tick value will cause any Increments the tick then checks to see if the new tick value will cause any
tasks to be unblocked. */ tasks to be unblocked. */
/* Only let core 0 increase the tick count, to keep accurate track of time. */ /* Only allow core 0 increase the tick count in the case of xPortSysTickHandler processing. */
/* ToDo: This doesn't really play nice with the logic below: it means when core 1 is /* And allow core 0 and core 1 to unwind uxPendedTicks during xTaskResumeAll. */
running a low-priority task, it will keep running it until there is a context
switch, even when this routine (running on core 0) unblocks a bunch of high-priority if ( xPortInIsrContext() )
tasks... this is less than optimal -- JD. */ {
if ( xPortGetCoreID()!=0 ) {
#if ( configUSE_TICK_HOOK == 1 ) #if ( configUSE_TICK_HOOK == 1 )
vApplicationTickHook(); vApplicationTickHook();
#endif /* configUSE_TICK_HOOK */ #endif /* configUSE_TICK_HOOK */
@ -2477,11 +2476,10 @@ BaseType_t xSwitchRequired = pdFALSE;
esp_vApplicationTickHook(); esp_vApplicationTickHook();
#endif /* CONFIG_FREERTOS_LEGACY_HOOKS */ #endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
/* if (xPortGetCoreID() == 1 )
We can't really calculate what we need, that's done on core 0... just assume we need a switch. {
ToDo: Make this more intelligent? -- JD return pdTRUE;
*/ }
return pdTRUE;
} }
@ -2606,39 +2604,11 @@ BaseType_t xSwitchRequired = pdFALSE;
} }
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
{
/* Guard against the tick hook being called when the pended tick
count is being unwound (when the scheduler is being unlocked). */
if( uxPendedTicks == ( UBaseType_t ) 0U )
{
#if ( configUSE_TICK_HOOK == 1 )
vApplicationTickHook();
#endif /* configUSE_TICK_HOOK */
#if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
esp_vApplicationTickHook();
#endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL_ISR(&xTaskQueueMutex); taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
} }
else else
{ {
++uxPendedTicks; ++uxPendedTicks;
/* The tick hook gets called at regular intervals, even if the
scheduler is locked. */
#if ( configUSE_TICK_HOOK == 1 )
{
vApplicationTickHook();
}
#endif
#if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
esp_vApplicationTickHook();
#endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
} }
#if ( configUSE_PREEMPTION == 1 ) #if ( configUSE_PREEMPTION == 1 )

View file

@ -1,9 +1,11 @@
/* Tests for FreeRTOS task suspend & resume */ /* Tests for FreeRTOS task suspend & resume */
#include <stdio.h> #include <stdio.h>
#include <string.h>
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "freertos/task.h" #include "freertos/task.h"
#include "freertos/semphr.h" #include "freertos/semphr.h"
#include "freertos/timers.h"
#include "freertos/queue.h" #include "freertos/queue.h"
#include "freertos/xtensa_api.h" #include "freertos/xtensa_api.h"
#include "unity.h" #include "unity.h"
@ -11,6 +13,11 @@
#include "driver/timer.h" #include "driver/timer.h"
#include "esp_ipc.h"
#include "esp_freertos_hooks.h"
#include "sdkconfig.h"
/* Counter task counts a target variable forever */ /* Counter task counts a target variable forever */
static void task_count(void *vp_counter) static void task_count(void *vp_counter)
{ {
@ -170,4 +177,189 @@ TEST_CASE("Resume task from ISR (other core)", "[freertos]")
{ {
test_resume_task_from_isr(!UNITY_FREERTOS_CPU); test_resume_task_from_isr(!UNITY_FREERTOS_CPU);
} }
#endif
static volatile bool block;
static bool suspend_both_cpus;
static void IRAM_ATTR suspend_scheduler_while_block_set(void* arg)
{
vTaskSuspendAll();
while (block) { };
ets_delay_us(1);
xTaskResumeAll();
}
static void IRAM_ATTR suspend_scheduler_on_both_cpus()
{
block = true;
if (suspend_both_cpus) {
TEST_ESP_OK(esp_ipc_call((xPortGetCoreID() == 0) ? 1 : 0, &suspend_scheduler_while_block_set, NULL));
}
vTaskSuspendAll();
}
static void IRAM_ATTR resume_scheduler_on_both_cpus()
{
block = false;
xTaskResumeAll();
}
static const int waiting_ms = 2000;
static const int delta_ms = 100;
static int duration_wait_task_ms;
static int duration_ctrl_task_ms;
static void waiting_task(void *pvParameters)
{
int cpu_id = xPortGetCoreID();
int64_t start_time = esp_timer_get_time();
printf("Start waiting_task cpu=%d\n", cpu_id);
vTaskDelay(waiting_ms / portTICK_PERIOD_MS);
duration_wait_task_ms = (esp_timer_get_time() - start_time) / 1000;
printf("Finish waiting_task cpu=%d, time=%d ms\n", cpu_id, duration_wait_task_ms);
vTaskDelete(NULL);
}
static void control_task(void *pvParameters)
{
int cpu_id = xPortGetCoreID();
ets_delay_us(2000); // let to start the waiting_task first
printf("Start control_task cpu=%d\n", cpu_id);
int64_t start_time = esp_timer_get_time();
suspend_scheduler_on_both_cpus();
ets_delay_us(waiting_ms * 1000 + delta_ms * 1000);
resume_scheduler_on_both_cpus();
duration_ctrl_task_ms = (esp_timer_get_time() - start_time) / 1000;
printf("Finish control_task cpu=%d, time=%d ms\n", cpu_id, duration_ctrl_task_ms);
vTaskDelete(NULL);
}
static void test_scheduler_suspend1(int cpu)
{
/* This test tests a case then both CPUs were in suspend state and then resume CPUs back.
* A task for which a wait time has been set and this time has elapsed in the suspended state should in any case be ready to start.
* (In an old implementation of xTaskIncrementTick function the counting for waiting_task() will be continued
* (excluding time in suspended) after control_task() is finished.)
*/
duration_wait_task_ms = 0;
duration_ctrl_task_ms = 0;
printf("Test for CPU%d\n", cpu);
int other_cpu = (cpu == 0) ? 1 : 0;
xTaskCreatePinnedToCore(&waiting_task, "waiting_task", 8192, NULL, 5, NULL, other_cpu);
xTaskCreatePinnedToCore(&control_task, "control_task", 8192, NULL, 5, NULL, cpu);
vTaskDelay(waiting_ms * 2 / portTICK_PERIOD_MS);
TEST_ASSERT_INT_WITHIN(4, waiting_ms + delta_ms + 4, duration_ctrl_task_ms);
if (suspend_both_cpus == false && cpu == 1) {
// CPU0 continues to increase the TickCount and the wait_task does not depend on Suspended Scheduler on CPU1
TEST_ASSERT_INT_WITHIN(2, waiting_ms, duration_wait_task_ms);
} else {
TEST_ASSERT_INT_WITHIN(4, waiting_ms + delta_ms + 4, duration_wait_task_ms);
}
printf("\n");
}
TEST_CASE("Test the waiting task not missed due to scheduler suspension on both CPUs", "[freertos]")
{
printf("Suspend both CPUs:\n");
suspend_both_cpus = true;
test_scheduler_suspend1(0);
test_scheduler_suspend1(1);
}
TEST_CASE("Test the waiting task not missed due to scheduler suspension on one CPU", "[freertos]")
{
printf("Suspend only one CPU:\n");
suspend_both_cpus = false;
test_scheduler_suspend1(0);
test_scheduler_suspend1(1);
}
static uint32_t count_tick[2];
static void IRAM_ATTR tick_hook()
{
++count_tick[xPortGetCoreID()];
}
static void test_scheduler_suspend2(int cpu)
{
esp_register_freertos_tick_hook_for_cpu(tick_hook, 0);
esp_register_freertos_tick_hook_for_cpu(tick_hook, 1);
memset(count_tick, 0, sizeof(count_tick));
printf("Test for CPU%d\n", cpu);
xTaskCreatePinnedToCore(&control_task, "control_task", 8192, NULL, 5, NULL, cpu);
vTaskDelay(waiting_ms * 2 / portTICK_PERIOD_MS);
esp_deregister_freertos_tick_hook(tick_hook);
printf("count_tick[cpu0] = %d, count_tick[cpu1] = %d\n", count_tick[0], count_tick[1]);
TEST_ASSERT_INT_WITHIN(1, waiting_ms * 2, count_tick[0]);
TEST_ASSERT_INT_WITHIN(1, waiting_ms * 2, count_tick[1]);
printf("\n");
}
TEST_CASE("Test suspend-resume CPU. The number of tick_hook should be the same for both CPUs", "[freertos]")
{
printf("Suspend both CPUs:\n");
suspend_both_cpus = true;
test_scheduler_suspend2(0);
test_scheduler_suspend2(1);
printf("Suspend only one CPU:\n");
suspend_both_cpus = false;
test_scheduler_suspend2(0);
test_scheduler_suspend2(1);
}
static int duration_timer_ms;
static void timer_callback(void *arg)
{
++duration_timer_ms;
}
static void test_scheduler_suspend3(int cpu)
{
duration_timer_ms = 0;
duration_ctrl_task_ms = 0;
printf("Test for CPU%d\n", cpu);
TimerHandle_t count_time = xTimerCreate("count_time", 1, pdTRUE, NULL, timer_callback);
xTimerStart( count_time, portMAX_DELAY);
xTaskCreatePinnedToCore(&control_task, "control_task", 8192, NULL, 5, NULL, cpu);
vTaskDelay(waiting_ms * 2 / portTICK_PERIOD_MS);
xTimerDelete(count_time, portMAX_DELAY);
printf("Finish duration_timer_ms=%d ms\n", duration_timer_ms);
TEST_ASSERT_INT_WITHIN(2, waiting_ms * 2, duration_timer_ms);
TEST_ASSERT_INT_WITHIN(5, waiting_ms + delta_ms, duration_ctrl_task_ms);
printf("\n");
}
TEST_CASE("Test suspend-resume CPU works with xTimer", "[freertos]")
{
printf("Suspend both CPUs:\n");
suspend_both_cpus = true;
test_scheduler_suspend3(0);
test_scheduler_suspend3(1);
printf("Suspend only one CPU:\n");
suspend_both_cpus = false;
test_scheduler_suspend3(0);
test_scheduler_suspend3(1);
}
#endif // CONFIG_FREERTOS_UNICORE