Merge branch 'feature/ipc_runs_with_caller_priority' into 'master'

esp_common: IPC works with the priority of the caller's task

Closes IDF-78

See merge request espressif/esp-idf!6191
This commit is contained in:
Angus Gratton 2019-11-04 18:29:14 +08:00
commit 13ff57f133
3 changed files with 149 additions and 21 deletions

View file

@ -1,8 +1,10 @@
#include <stdio.h> #include <stdio.h>
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "freertos/task.h" #include "freertos/task.h"
#include "freertos/semphr.h"
#include "unity.h" #include "unity.h"
#include "esp_ipc.h" #include "esp_ipc.h"
#include "esp_log.h"
#include "sdkconfig.h" #include "sdkconfig.h"
#if !CONFIG_FREERTOS_UNICORE #if !CONFIG_FREERTOS_UNICORE
@ -19,4 +21,105 @@ TEST_CASE("Test blocking IPC function call", "[ipc]")
esp_ipc_call_blocking(!xPortGetCoreID(), test_func_ipc_cb, &val); esp_ipc_call_blocking(!xPortGetCoreID(), test_func_ipc_cb, &val);
TEST_ASSERT_EQUAL_HEX(val, 0xa5a5); TEST_ASSERT_EQUAL_HEX(val, 0xa5a5);
} }
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
static volatile bool exit_flag;
static void task1(void *sema)
{
ESP_LOGI("task1", "start");
ets_delay_us(3000000);
vTaskDelay(1);
while (exit_flag == false) {
}
ESP_LOGI("task1", "finish");
vTaskDelete(NULL);
}
static UBaseType_t func_ipc_priority;
static void test_func_ipc(void *sema)
{
ets_delay_us(1000000 + xPortGetCoreID() * 100);
func_ipc_priority = uxTaskPriorityGet(NULL);
xSemaphoreGive(*(xSemaphoreHandle *)sema);
ets_printf("test_func_ipc: [%d, %d]\n", func_ipc_priority, xPortGetCoreID());
}
TEST_CASE("Test ipc_task works with the priority of the caller's task", "[ipc]")
{
UBaseType_t priority = 18;
func_ipc_priority = 0;
vTaskPrioritySet(NULL, priority);
xSemaphoreHandle sema_ipc_done = xSemaphoreCreateBinary();
exit_flag = false;
xTaskCreatePinnedToCore(task1, "task1", 4096, NULL, priority + 2, NULL, 1);
vTaskDelay(100 / portTICK_PERIOD_MS);
ESP_LOGI("test", "Start IPC call in IPC_WAIT_FOR_START mode");
esp_ipc_call(1, test_func_ipc, &sema_ipc_done);
ESP_LOGI("test", "Waiting for IPC finish");
xSemaphoreTake(sema_ipc_done, 4000 / portTICK_PERIOD_MS);
ESP_LOGI("test", "Stop task1");
exit_flag = true;
xSemaphoreTake(sema_ipc_done, portMAX_DELAY);
vSemaphoreDelete(sema_ipc_done);
ESP_LOGI("test", "Check ipc_priority with priority caller's task. Should be the same");
vTaskPrioritySet(NULL, 5);
TEST_ASSERT_EQUAL(priority, func_ipc_priority);
}
static void test_func2_ipc(void *arg)
{
int callers_priority = *(int *)arg;
ets_delay_us(1000000 + xPortGetCoreID() * 100);
UBaseType_t priority = uxTaskPriorityGet(NULL);
ets_printf("test_func2_ipc: [callers_priority = %d, priority = %d, cpu = %d]\n", callers_priority, priority, xPortGetCoreID());
}
static void task(void *sema)
{
int priority = uxTaskPriorityGet(NULL);
ESP_LOGI("task", "start [priority = %d, cpu = %d]", priority, xPortGetCoreID());
xSemaphoreTake(*(xSemaphoreHandle *)sema, portMAX_DELAY);
esp_ipc_call_blocking(!xPortGetCoreID(), test_func2_ipc, &priority);
xSemaphoreGive(*(xSemaphoreHandle *)sema);
ESP_LOGI("task", "finish [priority = %d, cpu = %d]", priority, xPortGetCoreID());
vTaskDelete(NULL);
}
TEST_CASE("Test multiple ipc_calls", "[ipc]")
{
const int max_tasks = 5;
UBaseType_t priority = uxTaskPriorityGet(NULL);
ESP_LOGI("test", "priority = %d, cpu = %d", priority, xPortGetCoreID());
xSemaphoreHandle sema_ipc_done[max_tasks * portNUM_PROCESSORS];
for (int task_num = 0; task_num < max_tasks; ++task_num) {
++priority;
ESP_LOGI("test", "task prio = %d", priority);
for (int cpu_num = 0; cpu_num < portNUM_PROCESSORS; ++cpu_num) {
sema_ipc_done[task_num * 2 + cpu_num] = xSemaphoreCreateBinary();
xTaskCreatePinnedToCore(task, "task", 4096, &sema_ipc_done[task_num * 2 + cpu_num], priority, NULL, cpu_num);
}
}
for (int task_num = 0; task_num < max_tasks; ++task_num) {
for (int cpu_num = 0; cpu_num < portNUM_PROCESSORS; ++cpu_num) {
xSemaphoreGive(sema_ipc_done[task_num * 2 + cpu_num]);
}
}
for (int task_num = 0; task_num < max_tasks; ++task_num) {
for (int cpu_num = 0; cpu_num < portNUM_PROCESSORS; ++cpu_num) {
xSemaphoreTake(sema_ipc_done[task_num * 2 + cpu_num], portMAX_DELAY);
vSemaphoreDelete(sema_ipc_done[task_num * 2 + cpu_num]);
}
}
}
#endif /* !CONFIG_FREERTOS_UNICORE */ #endif /* !CONFIG_FREERTOS_UNICORE */
#endif // CONFIG_ESP_IPC_USE_CALLERS_PRIORITY

View file

@ -54,6 +54,15 @@ menu "Common ESP-related"
It can be shrunk if you are sure that you do not use any custom It can be shrunk if you are sure that you do not use any custom
IPC functionality. IPC functionality.
config ESP_IPC_USES_CALLERS_PRIORITY
bool "IPC runs at caller's priority"
default y
depends on !FREERTOS_UNICORE
help
If this option is not enabled then the IPC task will keep behavior
same as prior to that of ESP-IDF v4.0, and hence IPC task will run
at (configMAX_PRIORITIES - 1) priority.
config ESP_TIMER_TASK_STACK_SIZE config ESP_TIMER_TASK_STACK_SIZE
int "High-resolution timer task stack size" int "High-resolution timer task stack size"
default 3584 default 3584

View file

@ -24,19 +24,19 @@
#include "freertos/task.h" #include "freertos/task.h"
#include "freertos/semphr.h" #include "freertos/semphr.h"
static TaskHandle_t s_ipc_task_handle[portNUM_PROCESSORS];
static SemaphoreHandle_t s_ipc_mutex; // This mutex is used as a global lock for esp_ipc_* APIs static SemaphoreHandle_t s_ipc_mutex[portNUM_PROCESSORS]; // This mutex is used as a global lock for esp_ipc_* APIs
static SemaphoreHandle_t s_ipc_sem[portNUM_PROCESSORS]; // Two semaphores used to wake each of ipc tasks static SemaphoreHandle_t s_ipc_sem[portNUM_PROCESSORS]; // Two semaphores used to wake each of ipc tasks
static SemaphoreHandle_t s_ipc_ack; // Semaphore used to acknowledge that task was woken up, static SemaphoreHandle_t s_ipc_ack[portNUM_PROCESSORS]; // Semaphore used to acknowledge that task was woken up,
// or function has finished running // or function has finished running
static volatile esp_ipc_func_t s_func; // Function which should be called by high priority task static volatile esp_ipc_func_t s_func[portNUM_PROCESSORS]; // Function which should be called by high priority task
static void * volatile s_func_arg; // Argument to pass into s_func static void * volatile s_func_arg[portNUM_PROCESSORS]; // Argument to pass into s_func
typedef enum { typedef enum {
IPC_WAIT_FOR_START, IPC_WAIT_FOR_START,
IPC_WAIT_FOR_END IPC_WAIT_FOR_END
} esp_ipc_wait_t; } esp_ipc_wait_t;
static volatile esp_ipc_wait_t s_ipc_wait; // This variable tells high priority task when it should give static volatile esp_ipc_wait_t s_ipc_wait[portNUM_PROCESSORS];// This variable tells high priority task when it should give
// s_ipc_ack semaphore: before s_func is called, or // s_ipc_ack semaphore: before s_func is called, or
// after it returns // after it returns
@ -53,15 +53,15 @@ static void IRAM_ATTR ipc_task(void* arg)
abort(); abort();
} }
esp_ipc_func_t func = s_func; esp_ipc_func_t func = s_func[cpuid];
void* arg = s_func_arg; void* arg = s_func_arg[cpuid];
if (s_ipc_wait == IPC_WAIT_FOR_START) { if (s_ipc_wait[cpuid] == IPC_WAIT_FOR_START) {
xSemaphoreGive(s_ipc_ack); xSemaphoreGive(s_ipc_ack[cpuid]);
} }
(*func)(arg); (*func)(arg);
if (s_ipc_wait == IPC_WAIT_FOR_END) { if (s_ipc_wait[cpuid] == IPC_WAIT_FOR_END) {
xSemaphoreGive(s_ipc_ack); xSemaphoreGive(s_ipc_ack[cpuid]);
} }
} }
// TODO: currently this is unreachable code. Introduce esp_ipc_uninit // TODO: currently this is unreachable code. Introduce esp_ipc_uninit
@ -86,14 +86,14 @@ static void esp_ipc_init(void) __attribute__((constructor));
static void esp_ipc_init(void) static void esp_ipc_init(void)
{ {
s_ipc_mutex = xSemaphoreCreateMutex();
s_ipc_ack = xSemaphoreCreateBinary();
char task_name[15]; char task_name[15];
for (int i = 0; i < portNUM_PROCESSORS; ++i) { for (int i = 0; i < portNUM_PROCESSORS; ++i) {
snprintf(task_name, sizeof(task_name), "ipc%d", i); snprintf(task_name, sizeof(task_name), "ipc%d", i);
s_ipc_mutex[i] = xSemaphoreCreateMutex();
s_ipc_ack[i] = xSemaphoreCreateBinary();
s_ipc_sem[i] = xSemaphoreCreateBinary(); s_ipc_sem[i] = xSemaphoreCreateBinary();
portBASE_TYPE res = xTaskCreatePinnedToCore(ipc_task, task_name, CONFIG_ESP_IPC_TASK_STACK_SIZE, (void*) i, portBASE_TYPE res = xTaskCreatePinnedToCore(ipc_task, task_name, CONFIG_ESP_IPC_TASK_STACK_SIZE, (void*) i,
configMAX_PRIORITIES - 1, NULL, i); configMAX_PRIORITIES - 1, &s_ipc_task_handle[i], i);
assert(res == pdTRUE); assert(res == pdTRUE);
} }
} }
@ -107,14 +107,30 @@ static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, voi
return ESP_ERR_INVALID_STATE; return ESP_ERR_INVALID_STATE;
} }
xSemaphoreTake(s_ipc_mutex, portMAX_DELAY); #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
TaskHandle_t task_handler = xTaskGetCurrentTaskHandle();
UBaseType_t priority_of_current_task = uxTaskPriorityGet(task_handler);
UBaseType_t priority_of_running_ipc_task = uxTaskPriorityGet(s_ipc_task_handle[cpu_id]);
if (priority_of_running_ipc_task < priority_of_current_task) {
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
}
s_func = func; xSemaphoreTake(s_ipc_mutex[cpu_id], portMAX_DELAY);
s_func_arg = arg; vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
s_ipc_wait = wait_for; #else
xSemaphoreTake(s_ipc_mutex[0], portMAX_DELAY);
#endif
s_func[cpu_id] = func;
s_func_arg[cpu_id] = arg;
s_ipc_wait[cpu_id] = wait_for;
xSemaphoreGive(s_ipc_sem[cpu_id]); xSemaphoreGive(s_ipc_sem[cpu_id]);
xSemaphoreTake(s_ipc_ack, portMAX_DELAY); xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY);
xSemaphoreGive(s_ipc_mutex); #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
xSemaphoreGive(s_ipc_mutex[cpu_id]);
#else
xSemaphoreGive(s_ipc_mutex[0]);
#endif
return ESP_OK; return ESP_OK;
} }