esp_common: IPC refactor

- esp_ipc_call_and_wait() can work simultaneously on two CPUs.
- This will increase the priority for ipc_task
  if the current task also wants to use it.
- Added the ESP_IPC_USES_CALLERS_PRIORITY option
  to get back the old IPC behaviour.
This commit is contained in:
KonstantinKondrashov 2019-09-25 02:13:18 +08:00
parent b125bb50ea
commit 6071e2f3c7
2 changed files with 42 additions and 22 deletions

View file

@ -53,6 +53,15 @@ menu "Common ESP-related"
It can be shrunk if you are sure that you do not use any custom It can be shrunk if you are sure that you do not use any custom
IPC functionality. IPC functionality.
config ESP_IPC_USES_CALLERS_PRIORITY
bool "IPC runs at caller's priority"
default y
depends on !FREERTOS_UNICORE
help
If this option is not enabled then the IPC task will keep behavior
same as prior to that of ESP-IDF v4.0, and hence IPC task will run
at (configMAX_PRIORITIES - 1) priority.
config ESP_TIMER_TASK_STACK_SIZE config ESP_TIMER_TASK_STACK_SIZE
int "High-resolution timer task stack size" int "High-resolution timer task stack size"
default 3584 default 3584

View file

@ -25,18 +25,18 @@
#include "freertos/semphr.h" #include "freertos/semphr.h"
static TaskHandle_t s_ipc_task_handle[portNUM_PROCESSORS]; static TaskHandle_t s_ipc_task_handle[portNUM_PROCESSORS];
static SemaphoreHandle_t s_ipc_mutex; // This mutex is used as a global lock for esp_ipc_* APIs static SemaphoreHandle_t s_ipc_mutex[portNUM_PROCESSORS]; // This mutex is used as a global lock for esp_ipc_* APIs
static SemaphoreHandle_t s_ipc_sem[portNUM_PROCESSORS]; // Two semaphores used to wake each of ipc tasks static SemaphoreHandle_t s_ipc_sem[portNUM_PROCESSORS]; // Two semaphores used to wake each of ipc tasks
static SemaphoreHandle_t s_ipc_ack; // Semaphore used to acknowledge that task was woken up, static SemaphoreHandle_t s_ipc_ack[portNUM_PROCESSORS]; // Semaphore used to acknowledge that task was woken up,
// or function has finished running // or function has finished running
static volatile esp_ipc_func_t s_func; // Function which should be called by high priority task static volatile esp_ipc_func_t s_func[portNUM_PROCESSORS]; // Function which should be called by high priority task
static void * volatile s_func_arg; // Argument to pass into s_func static void * volatile s_func_arg[portNUM_PROCESSORS]; // Argument to pass into s_func
typedef enum { typedef enum {
IPC_WAIT_FOR_START, IPC_WAIT_FOR_START,
IPC_WAIT_FOR_END IPC_WAIT_FOR_END
} esp_ipc_wait_t; } esp_ipc_wait_t;
static volatile esp_ipc_wait_t s_ipc_wait; // This variable tells high priority task when it should give static volatile esp_ipc_wait_t s_ipc_wait[portNUM_PROCESSORS];// This variable tells high priority task when it should give
// s_ipc_ack semaphore: before s_func is called, or // s_ipc_ack semaphore: before s_func is called, or
// after it returns // after it returns
@ -53,15 +53,15 @@ static void IRAM_ATTR ipc_task(void* arg)
abort(); abort();
} }
esp_ipc_func_t func = s_func; esp_ipc_func_t func = s_func[cpuid];
void* arg = s_func_arg; void* arg = s_func_arg[cpuid];
if (s_ipc_wait == IPC_WAIT_FOR_START) { if (s_ipc_wait[cpuid] == IPC_WAIT_FOR_START) {
xSemaphoreGive(s_ipc_ack); xSemaphoreGive(s_ipc_ack[cpuid]);
} }
(*func)(arg); (*func)(arg);
if (s_ipc_wait == IPC_WAIT_FOR_END) { if (s_ipc_wait[cpuid] == IPC_WAIT_FOR_END) {
xSemaphoreGive(s_ipc_ack); xSemaphoreGive(s_ipc_ack[cpuid]);
} }
} }
// TODO: currently this is unreachable code. Introduce esp_ipc_uninit // TODO: currently this is unreachable code. Introduce esp_ipc_uninit
@ -86,11 +86,11 @@ static void esp_ipc_init(void) __attribute__((constructor));
static void esp_ipc_init(void) static void esp_ipc_init(void)
{ {
s_ipc_mutex = xSemaphoreCreateMutex();
s_ipc_ack = xSemaphoreCreateBinary();
char task_name[15]; char task_name[15];
for (int i = 0; i < portNUM_PROCESSORS; ++i) { for (int i = 0; i < portNUM_PROCESSORS; ++i) {
snprintf(task_name, sizeof(task_name), "ipc%d", i); snprintf(task_name, sizeof(task_name), "ipc%d", i);
s_ipc_mutex[i] = xSemaphoreCreateMutex();
s_ipc_ack[i] = xSemaphoreCreateBinary();
s_ipc_sem[i] = xSemaphoreCreateBinary(); s_ipc_sem[i] = xSemaphoreCreateBinary();
portBASE_TYPE res = xTaskCreatePinnedToCore(ipc_task, task_name, CONFIG_ESP_IPC_TASK_STACK_SIZE, (void*) i, portBASE_TYPE res = xTaskCreatePinnedToCore(ipc_task, task_name, CONFIG_ESP_IPC_TASK_STACK_SIZE, (void*) i,
configMAX_PRIORITIES - 1, &s_ipc_task_handle[i], i); configMAX_PRIORITIES - 1, &s_ipc_task_handle[i], i);
@ -107,19 +107,30 @@ static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, voi
return ESP_ERR_INVALID_STATE; return ESP_ERR_INVALID_STATE;
} }
xSemaphoreTake(s_ipc_mutex, portMAX_DELAY); #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
TaskHandle_t task_handler = xTaskGetCurrentTaskHandle(); TaskHandle_t task_handler = xTaskGetCurrentTaskHandle();
UBaseType_t priority_of_current_task = uxTaskPriorityGet(task_handler); UBaseType_t priority_of_current_task = uxTaskPriorityGet(task_handler);
// ipc_task will work with the priority of the caller's task. UBaseType_t priority_of_running_ipc_task = uxTaskPriorityGet(s_ipc_task_handle[cpu_id]);
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task); if (priority_of_running_ipc_task < priority_of_current_task) {
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
}
s_func = func; xSemaphoreTake(s_ipc_mutex[cpu_id], portMAX_DELAY);
s_func_arg = arg; vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
s_ipc_wait = wait_for; #else
xSemaphoreTake(s_ipc_mutex[0], portMAX_DELAY);
#endif
s_func[cpu_id] = func;
s_func_arg[cpu_id] = arg;
s_ipc_wait[cpu_id] = wait_for;
xSemaphoreGive(s_ipc_sem[cpu_id]); xSemaphoreGive(s_ipc_sem[cpu_id]);
xSemaphoreTake(s_ipc_ack, portMAX_DELAY); xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY);
xSemaphoreGive(s_ipc_mutex); #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
xSemaphoreGive(s_ipc_mutex[cpu_id]);
#else
xSemaphoreGive(s_ipc_mutex[0]);
#endif
return ESP_OK; return ESP_OK;
} }