pthread: Fixes memory leaks and stack overflow in tests

Also this commit replaces FreeRTOS list used for pthread internals
with simple one from rom/queue.h
This commit is contained in:
Alexey Gerenkov 2017-11-14 11:43:32 +03:00
parent b83792f504
commit 7ce945a9de
3 changed files with 52 additions and 49 deletions

View file

@ -9,7 +9,7 @@ config ESP32_PTHREAD_TASK_PRIO_DEFAULT
config ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT
int "Default task stack size"
default 2048
default 3072
help
Stack size used to create new tasks with default pthread parameters.

View file

@ -23,16 +23,16 @@
#include <string.h>
#include "esp_err.h"
#include "esp_attr.h"
#include "rom/queue.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/list.h"
#include "pthread_internal.h"
#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
#include "esp_log.h"
const static char *TAG = "esp_pthread";
const static char *TAG = "pthread";
/** task state */
enum esp_pthread_task_state {
@ -41,11 +41,12 @@ enum esp_pthread_task_state {
};
/** pthread thread FreeRTOS wrapper */
typedef struct {
ListItem_t list_item; ///< Tasks list node struct. FreeRTOS task handle is kept as list_item.xItemValue
TaskHandle_t join_task; ///< Handle of the task waiting to join
enum esp_pthread_task_state state; ///< pthread task state
bool detached; ///< True if pthread is detached
typedef struct esp_pthread_entry {
SLIST_ENTRY(esp_pthread_entry) list_node; ///< Tasks list node struct.
TaskHandle_t handle; ///< FreeRTOS task handle
TaskHandle_t join_task; ///< Handle of the task waiting to join
enum esp_pthread_task_state state; ///< pthread task state
bool detached; ///< True if pthread is detached
} esp_pthread_t;
/** pthread wrapper task arg */
@ -56,23 +57,21 @@ typedef struct {
/** pthread mutex FreeRTOS wrapper */
typedef struct {
ListItem_t list_item; ///< mutexes list node struct
SemaphoreHandle_t sem; ///< Handle of the task waiting to join
int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE
} esp_pthread_mutex_t;
static SemaphoreHandle_t s_threads_mux = NULL;
static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
static List_t s_threads_list;
static SemaphoreHandle_t s_threads_mux = NULL;
static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
static SLIST_HEAD(esp_thread_list_head, esp_pthread_entry) s_threads_list
= SLIST_HEAD_INITIALIZER(s_threads_list);
static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
esp_err_t esp_pthread_init(void)
{
vListInitialise((List_t *)&s_threads_list);
s_threads_mux = xSemaphoreCreateMutex();
if (s_threads_mux == NULL) {
return ESP_ERR_NO_MEM;
@ -80,50 +79,47 @@ esp_err_t esp_pthread_init(void)
return ESP_OK;
}
static void *pthread_find_list_item(void *(*item_check)(ListItem_t *, void *arg), void *check_arg)
static void *pthread_list_find_item(void *(*item_check)(esp_pthread_t *, void *arg), void *check_arg)
{
ListItem_t const *list_end = listGET_END_MARKER(&s_threads_list);
ListItem_t *list_item = listGET_HEAD_ENTRY(&s_threads_list);
while (list_item != list_end) {
void *val = item_check(list_item, check_arg);
esp_pthread_t *it;
SLIST_FOREACH(it, &s_threads_list, list_node) {
void *val = item_check(it, check_arg);
if (val) {
return val;
}
list_item = listGET_NEXT(list_item);
}
return NULL;
}
static void *pthread_get_handle_by_desc(ListItem_t *item, void *arg)
static void *pthread_get_handle_by_desc(esp_pthread_t *item, void *desc)
{
esp_pthread_t *pthread = listGET_LIST_ITEM_OWNER(item);
if (pthread == arg) {
return (void *)listGET_LIST_ITEM_VALUE(item);
if (item == desc) {
return item->handle;
}
return NULL;
}
static void *pthread_get_desc_by_handle(esp_pthread_t *item, void *hnd)
{
if (hnd == item->handle) {
return item;
}
return NULL;
}
static inline TaskHandle_t pthread_find_handle(pthread_t thread)
{
return pthread_find_list_item(pthread_get_handle_by_desc, (void *)thread);
return pthread_list_find_item(pthread_get_handle_by_desc, (void *)thread);
}
static void *pthread_get_desc_by_handle(ListItem_t *item, void *arg)
{
TaskHandle_t task_handle = arg;
TaskHandle_t cur_handle = (TaskHandle_t)listGET_LIST_ITEM_VALUE(item);
if (task_handle == cur_handle) {
return (esp_pthread_t *)listGET_LIST_ITEM_OWNER(item);
}
return NULL;
}
static esp_pthread_t *pthread_find(TaskHandle_t task_handle)
{
return pthread_find_list_item(pthread_get_desc_by_handle, task_handle);
return pthread_list_find_item(pthread_get_desc_by_handle, task_handle);
}
static void pthread_delete(esp_pthread_t *pthread)
{
uxListRemove(&pthread->list_item);
SLIST_REMOVE(&s_threads_list, pthread, esp_pthread_entry, list_node);
free(pthread);
}
@ -166,6 +162,7 @@ static void pthread_task_func(void *arg)
}
xSemaphoreGive(s_threads_mux);
ESP_LOGD(TAG, "Task stk_wm = %d", uxTaskGetStackHighWaterMark(NULL));
vTaskDelete(NULL);
ESP_LOGV(TAG, "%s EXIT", __FUNCTION__);
@ -208,14 +205,12 @@ int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
return EAGAIN;
}
}
vListInitialiseItem((ListItem_t *)&pthread->list_item);
listSET_LIST_ITEM_OWNER((ListItem_t *)&pthread->list_item, pthread);
listSET_LIST_ITEM_VALUE((ListItem_t *)&pthread->list_item, (TickType_t)xHandle);
pthread->handle = xHandle;
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
assert(false && "Failed to lock threads list!");
}
vListInsertEnd((List_t *)&s_threads_list, (ListItem_t *)&pthread->list_item);
SLIST_INSERT_HEAD(&s_threads_list, pthread, list_node);
xSemaphoreGive(s_threads_mux);
// start task

View file

@ -22,6 +22,7 @@ static void thread_main()
{
int i = 0;
std::cout << "thread_main CXX " << std::hex << std::this_thread::get_id() << std::endl;
std::chrono::milliseconds dur = std::chrono::milliseconds(300);
while (i < 3) {
int old_val, new_val;
@ -38,7 +39,6 @@ static void thread_main()
TEST_ASSERT_TRUE(new_val == old_val + 1);
// sleep_for test
std::chrono::milliseconds dur(300);
std::this_thread::sleep_for(dur);
// recursive mux test
@ -59,7 +59,7 @@ static void thread_main()
std::time_t tt = system_clock::to_time_t(system_clock::now());
struct std::tm *ptm = std::localtime(&tt);
ptm->tm_sec++;
std::this_thread::sleep_until(system_clock::from_time_t (mktime(ptm)));
std::this_thread::sleep_until(system_clock::from_time_t(mktime(ptm)));
}
}
@ -89,17 +89,25 @@ TEST_CASE("pthread C++", "[pthread]")
global_sp.reset(); // avoid reported leak
}
static void task_test_sandbox(void *arg)
static void task_test_sandbox()
{
bool *running = (bool *)arg;
std::stringstream ss;
ESP_LOGI(TAG, "About to create a string stream");
std::stringstream ss;
ESP_LOGI(TAG, "About to write to string stream");
ss << "Hello World!";
ESP_LOGI(TAG, "About to extract from stringstream");
ESP_LOGI(TAG, "Text: %s", ss.str().c_str());
}
static void task_test_sandbox_c(void *arg)
{
bool *running = (bool *)arg;
// wrap thread func to ensure that all C++ stack objects are cleaned up by their destructors
task_test_sandbox();
ESP_LOGI(TAG, "Task stk_wm = %d", uxTaskGetStackHighWaterMark(NULL));
if (running) {
*running = false;
vTaskDelete(NULL);
@ -108,11 +116,11 @@ static void task_test_sandbox(void *arg)
TEST_CASE("pthread mix C/C++", "[pthread]")
{
bool running = true;
bool c_running = true;
std::thread t1(task_test_sandbox, (void *)NULL);
xTaskCreatePinnedToCore((TaskFunction_t)&task_test_sandbox, "task_test_sandbox", 2048, &running, 5, NULL, 0);
while (running) {
std::thread t1(task_test_sandbox);
xTaskCreatePinnedToCore((TaskFunction_t)&task_test_sandbox_c, "task_test_sandbox", 3072, &c_running, 5, NULL, 0);
while (c_running) {
vTaskDelay(1);
}
if (t1.joinable()) {