diff --git a/components/esp32/cpu_start.c b/components/esp32/cpu_start.c index facc27e8b..33139b4f1 100644 --- a/components/esp32/cpu_start.c +++ b/components/esp32/cpu_start.c @@ -83,6 +83,7 @@ static bool app_cpu_started = false; static void do_global_ctors(void); static void main_task(void* args); extern void app_main(void); +extern esp_err_t esp_pthread_init(void); extern int _bss_start; extern int _bss_end; @@ -252,6 +253,7 @@ static void intr_matrix_clear(void) void start_cpu0_default(void) { + esp_err_t err; esp_setup_syscall_table(); //Enable trace memory and immediately start trace. #if CONFIG_ESP32_TRAX @@ -290,7 +292,7 @@ void start_cpu0_default(void) esp_timer_init(); esp_set_time_from_rtc(); #if CONFIG_ESP32_APPTRACE_ENABLE - esp_err_t err = esp_apptrace_init(); + err = esp_apptrace_init(); if (err != ESP_OK) { ESP_EARLY_LOGE(TAG, "Failed to init apptrace module on CPU0 (%d)!", err); } @@ -298,6 +300,11 @@ void start_cpu0_default(void) #if CONFIG_SYSVIEW_ENABLE SEGGER_SYSVIEW_Conf(); #endif + err = esp_pthread_init(); + if (err != ESP_OK) { + ESP_EARLY_LOGE(TAG, "Failed to init pthread module (%d)!", err); + } + do_global_ctors(); #if CONFIG_INT_WDT esp_int_wdt_init(); diff --git a/components/freertos/tasks.c b/components/freertos/tasks.c index 396ad0cd7..0433deb2a 100644 --- a/components/freertos/tasks.c +++ b/components/freertos/tasks.c @@ -4623,7 +4623,6 @@ TickType_t uxReturn; TickType_t xTimeToWake; BaseType_t xReturn; - UNTESTED_FUNCTION(); taskENTER_CRITICAL(&xTaskQueueMutex); { /* Only block if a notification is not already pending. */ @@ -4747,7 +4746,6 @@ TickType_t uxReturn; eNotifyValue eOriginalNotifyState; BaseType_t xReturn = pdPASS; - UNTESTED_FUNCTION(); configASSERT( xTaskToNotify ); pxTCB = ( TCB_t * ) xTaskToNotify; diff --git a/components/newlib/include/sys/features.h b/components/newlib/include/sys/features.h index 1d90921af..87f3314fd 100644 --- a/components/newlib/include/sys/features.h +++ b/components/newlib/include/sys/features.h @@ -210,6 +210,12 @@ extern "C" { #endif /* __CYGWIN__ */ +/* ESP-IDF-specific: enable pthreads support */ +#ifdef __XTENSA__ +#define _POSIX_THREADS 1 +#define _UNIX98_THREAD_MUTEX_ATTRIBUTES 1 +#endif + /* Per the permission given in POSIX.1-2008 section 2.2.1, define * _POSIX_C_SOURCE if _XOPEN_SOURCE is defined and _POSIX_C_SOURCE is not. * (_XOPEN_SOURCE indicates that XSI extensions are desired by an application.) diff --git a/components/newlib/include/sys/sched.h b/components/newlib/include/sys/sched.h index 58f99d682..8554fc2b9 100644 --- a/components/newlib/include/sys/sched.h +++ b/components/newlib/include/sys/sched.h @@ -58,6 +58,8 @@ struct sched_param { #endif }; +int sched_yield( void ); + #ifdef __cplusplus } #endif diff --git a/components/newlib/time.c b/components/newlib/time.c index c6a1e7024..1427dcfde 100644 --- a/components/newlib/time.c +++ b/components/newlib/time.c @@ -216,6 +216,12 @@ int usleep(useconds_t us) return 0; } +unsigned int sleep(unsigned int seconds) +{ + usleep(seconds*1000000UL); + return 0; +} + uint32_t system_get_time(void) { #if defined( WITH_FRC1 ) || defined( WITH_RTC ) diff --git a/components/pthread/Kconfig b/components/pthread/Kconfig new file mode 100644 index 000000000..aa4c35b78 --- /dev/null +++ b/components/pthread/Kconfig @@ -0,0 +1,16 @@ +menu "PThreads" + +config ESP32_PTHREAD_TASK_PRIO_DEFAULT + int "Default task priority" + range 0 255 + default 5 + help + Priority used to create new tasks with default pthread parameters. + +config ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT + int "Default task stack size" + default 2048 + help + Stack size used to create new tasks with default pthread parameters. + +endmenu diff --git a/components/pthread/component.mk b/components/pthread/component.mk new file mode 100644 index 000000000..cd69bb330 --- /dev/null +++ b/components/pthread/component.mk @@ -0,0 +1,9 @@ +# +# Component Makefile +# + +COMPONENT_SRCDIRS := . + +#COMPONENT_ADD_INCLUDEDIRS := include + +COMPONENT_ADD_LDFLAGS := -lpthread diff --git a/components/pthread/pthread.c b/components/pthread/pthread.c new file mode 100644 index 000000000..73f23a00d --- /dev/null +++ b/components/pthread/pthread.c @@ -0,0 +1,574 @@ +// Copyright 2017 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing +// libstdcxx threading framework to operate correctly. So not all original pthread routines are supported. +// Moreover some implemened functions do not provide full functionality, e.g. pthread_create does not support +// thread's attributes customization (prio, stack size and so on). So if you are not satisfied with default +// behavior use native FreeRTOS API. +// +#include +#include +#include +#include "esp_err.h" +#include "esp_attr.h" +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "freertos/semphr.h" +#include "freertos/list.h" + +#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL +#include "esp_log.h" +const static char *TAG = "esp_pthread"; + +/** task state */ +enum esp_pthread_task_state { + PTHREAD_TASK_STATE_RUN, + PTHREAD_TASK_STATE_EXIT +}; + +/** pthread thread FreeRTOS wrapper */ +typedef struct { + ListItem_t list_item; ///< Tasks list node struct. FreeRTOS task handle is kept as list_item.xItemValue + TaskHandle_t join_task; ///< Handle of the task waiting to join + enum esp_pthread_task_state state; ///< pthread task state + bool detached; ///< True if pthread is detached +} esp_pthread_t; + +/** pthread wrapper task arg */ +typedef struct { + void *(*func)(void *); ///< user task entry + void *arg; ///< user task argument +} esp_pthread_task_arg_t; + +/** pthread mutex FreeRTOS wrapper */ +typedef struct { + ListItem_t list_item; ///< mutexes list node struct + SemaphoreHandle_t sem; ///< Handle of the task waiting to join + int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE +} esp_pthread_mutex_t; + + +static SemaphoreHandle_t s_once_mux = NULL; +static SemaphoreHandle_t s_threads_mux = NULL; +static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED; + +static List_t s_threads_list; + + +static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo); + +esp_err_t esp_pthread_init(void) +{ + vListInitialise((List_t *)&s_threads_list); + s_once_mux = xSemaphoreCreateMutex(); + if (s_once_mux == NULL) { + return ESP_ERR_NO_MEM; + } + s_threads_mux = xSemaphoreCreateMutex(); + if (s_threads_mux == NULL) { + vSemaphoreDelete(s_once_mux); + return ESP_ERR_NO_MEM; + } + return ESP_OK; +} + +static void *pthread_find_list_item(void *(*item_check)(ListItem_t *, void *arg), void *check_arg) +{ + ListItem_t const *list_end = listGET_END_MARKER(&s_threads_list); + ListItem_t *list_item = listGET_HEAD_ENTRY(&s_threads_list); + while (list_item != list_end) { + void *val = item_check(list_item, check_arg); + if (val) { + return val; + } + list_item = listGET_NEXT(list_item); + } + return NULL; +} + +static void *pthread_get_handle_by_desc(ListItem_t *item, void *arg) +{ + esp_pthread_t *pthread = listGET_LIST_ITEM_OWNER(item); + if (pthread == arg) { + return (void *)listGET_LIST_ITEM_VALUE(item); + } + return NULL; +} +static inline TaskHandle_t pthread_find_handle(pthread_t thread) +{ + return pthread_find_list_item(pthread_get_handle_by_desc, (void *)thread); +} + +static void *pthread_get_desc_by_handle(ListItem_t *item, void *arg) +{ + TaskHandle_t task_handle = arg; + TaskHandle_t cur_handle = (TaskHandle_t)listGET_LIST_ITEM_VALUE(item); + if (task_handle == cur_handle) { + return (esp_pthread_t *)listGET_LIST_ITEM_OWNER(item); + } + return NULL; +} +static esp_pthread_t *pthread_find(TaskHandle_t task_handle) +{ + return pthread_find_list_item(pthread_get_desc_by_handle, task_handle); +} + +static void pthread_delete(esp_pthread_t *pthread) +{ + uxListRemove(&pthread->list_item); + free(pthread); +} + +static void pthread_task_func(void *arg) +{ + esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg; + + ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func); + + // wait for start + xTaskNotifyWait(0, 0, NULL, portMAX_DELAY); + + ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func); + task_arg->func(task_arg->arg); + ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func); + free(task_arg); + + if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { + assert(false && "Failed to lock threads list!"); + } + esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle()); + if (!pthread) { + assert(false && "Failed to find pthread for current task!"); + } + if (pthread->detached) { + // auto-free for detached threads + pthread_delete(pthread); + } else { + // Remove from list, it indicates that task has exited + if (pthread->join_task) { + // notify join + xTaskNotify(pthread->join_task, 0, eNoAction); + } else { + pthread->state = PTHREAD_TASK_STATE_EXIT; + } + } + xSemaphoreGive(s_threads_mux); + + vTaskDelete(NULL); + + ESP_LOGV(TAG, "%s EXIT", __FUNCTION__); +} + +int pthread_create(pthread_t *thread, const pthread_attr_t *attr, + void *(*start_routine) (void *), void *arg) +{ + TaskHandle_t xHandle = NULL; + + ESP_LOGV(TAG, "%s", __FUNCTION__); + if (attr) { + ESP_LOGE(TAG, "%s: attrs not supported!", __FUNCTION__); + return ENOSYS; + } + esp_pthread_task_arg_t *task_arg = malloc(sizeof(esp_pthread_task_arg_t)); + if (task_arg == NULL) { + ESP_LOGE(TAG, "Failed to allocate task args!"); + return ENOMEM; + } + memset(task_arg, 0, sizeof(esp_pthread_task_arg_t)); + esp_pthread_t *pthread = malloc(sizeof(esp_pthread_t)); + if (pthread == NULL) { + ESP_LOGE(TAG, "Failed to allocate pthread data!"); + free(task_arg); + return ENOMEM; + } + memset(pthread, 0, sizeof(esp_pthread_t)); + task_arg->func = start_routine; + task_arg->arg = arg; + BaseType_t res = xTaskCreate(&pthread_task_func, "pthread", CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT, + task_arg, CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT, &xHandle); + if(res != pdPASS) { + ESP_LOGE(TAG, "Failed to create task!"); + free(pthread); + free(task_arg); + if (res == errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY) { + return ENOMEM; + } else { + return EAGAIN; + } + } + vListInitialiseItem((ListItem_t *)&pthread->list_item); + listSET_LIST_ITEM_OWNER((ListItem_t *)&pthread->list_item, pthread); + listSET_LIST_ITEM_VALUE((ListItem_t *)&pthread->list_item, (TickType_t)xHandle); + + if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { + assert(false && "Failed to lock threads list!"); + } + vListInsertEnd((List_t *)&s_threads_list, (ListItem_t *)&pthread->list_item); + xSemaphoreGive(s_threads_mux); + + // start task + xTaskNotify(xHandle, 0, eNoAction); + + *thread = (pthread_t)pthread; // pointer value fit into pthread_t (uint32_t) + + ESP_LOGV(TAG, "Created task %x", (uint32_t)xHandle); + + return 0; +} + +int pthread_join(pthread_t thread, void **retval) +{ + esp_pthread_t *pthread = (esp_pthread_t *)thread; + int ret = 0; + + ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread); + + // find task + if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { + assert(false && "Failed to lock threads list!"); + } + TaskHandle_t handle = pthread_find_handle(thread); + if (!handle) { + // not found + ret = ESRCH; + } else if (pthread->join_task) { + // already have waiting task to join + ret = EINVAL; + } else if (handle == xTaskGetCurrentTaskHandle()) { + // join to self not allowed + ret = EDEADLK; + } else { + esp_pthread_t *cur_pthread = pthread_find(xTaskGetCurrentTaskHandle()); + if (cur_pthread && cur_pthread->join_task == handle) { + // join to each other not allowed + ret = EDEADLK; + } else { + if (pthread->state == PTHREAD_TASK_STATE_RUN) { + pthread->join_task = xTaskGetCurrentTaskHandle(); + } else { + pthread_delete(pthread); + } + } + } + xSemaphoreGive(s_threads_mux); + + if (ret == 0 && pthread->join_task) { + xTaskNotifyWait(0, 0, NULL, portMAX_DELAY); + if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { + assert(false && "Failed to lock threads list!"); + } + pthread_delete(pthread); + xSemaphoreGive(s_threads_mux); + } + + if (retval) { + *retval = 0; // no exit code in FreeRTOS + } + + ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret); + return ret; +} + +int pthread_detach(pthread_t thread) +{ + esp_pthread_t *pthread = (esp_pthread_t *)thread; + int ret = 0; + + if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { + assert(false && "Failed to lock threads list!"); + } + TaskHandle_t handle = pthread_find_handle(thread); + if (!handle) { + ret = ESRCH; + } else { + pthread->detached = true; + } + xSemaphoreGive(s_threads_mux); + ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret); + return ret; +} + +int pthread_cancel(pthread_t thread) +{ + ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__); + return ENOSYS; +} + +int sched_yield( void ) +{ + vTaskDelay(0); + return 0; +} + +pthread_t pthread_self(void) +{ + if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { + assert(false && "Failed to lock threads list!"); + } + esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle()); + if (!pthread) { + assert(false && "Failed to find current thread ID!"); + } + xSemaphoreGive(s_threads_mux); + return (pthread_t)pthread; +} + +int pthread_equal(pthread_t t1, pthread_t t2) +{ + return t1 == t2 ? 1 : 0; +} + +/***************** KEY ******************/ +int pthread_key_create(pthread_key_t *key, void (*destructor)(void*)) +{ + static int s_created; + + //TODO: Key destructors not suppoted! + if (s_created) { + // key API supports just one key necessary by libstdcxx threading implementation + ESP_LOGE(TAG, "%s: multiple keys not supported!", __FUNCTION__); + return ENOSYS; + } + *key = 1; + s_created = 1; + return 0; +} + +int pthread_key_delete(pthread_key_t key) +{ + ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__); + return ENOSYS; +} + +void *pthread_getspecific(pthread_key_t key) +{ + ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__); + return NULL; +} + +int pthread_setspecific(pthread_key_t key, const void *value) +{ + ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__); + return ENOSYS; +} + +/***************** ONCE ******************/ +int pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) +{ + if (once_control == NULL || init_routine == NULL || !once_control->is_initialized) { + ESP_LOGE(TAG, "%s: Invalid args!", __FUNCTION__); + return EINVAL; + } + + TaskHandle_t cur_task = xTaskGetCurrentTaskHandle(); + // do not take mutex if OS is not running yet + if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED || + !cur_task || xSemaphoreTake(s_once_mux, portMAX_DELAY) == pdTRUE) + { + if (!once_control->init_executed) { + ESP_LOGV(TAG, "%s: call init_routine %p", __FUNCTION__, once_control); + init_routine(); + once_control->init_executed = 1; + } + if (cur_task) { + xSemaphoreGive(s_once_mux); + } + } + else + { + ESP_LOGE(TAG, "%s: Failed to lock!", __FUNCTION__); + return EBUSY; + } + + return 0; +} + +/***************** MUTEX ******************/ +static int mutexattr_check(const pthread_mutexattr_t *attr) +{ + if (attr->type < PTHREAD_MUTEX_NORMAL || attr->type > PTHREAD_MUTEX_RECURSIVE) { + return EINVAL; + } + return 0; +} + +int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr) +{ + int type = PTHREAD_MUTEX_NORMAL; + + if (!mutex) { + return EINVAL; + } + + if (attr) { + if (!attr->is_initialized) { + return EINVAL; + } + int res = mutexattr_check(attr); + if (res) { + return res; + } + type = attr->type; + } + + esp_pthread_mutex_t *mux = (esp_pthread_mutex_t *)malloc(sizeof(esp_pthread_mutex_t)); + if (!mux) { + return ENOMEM; + } + mux->type = type; + + if (mux->type == PTHREAD_MUTEX_RECURSIVE) { + mux->sem = xSemaphoreCreateRecursiveMutex(); + } else { + mux->sem = xSemaphoreCreateMutex(); + } + if (!mux->sem) { + free(mux); + return EAGAIN; + } + + *mutex = (pthread_mutex_t)mux; // pointer value fit into pthread_mutex_t (uint32_t) + + return 0; +} + +int pthread_mutex_destroy(pthread_mutex_t *mutex) +{ + esp_pthread_mutex_t *mux; + + ESP_LOGV(TAG, "%s %p", __FUNCTION__, mutex); + + if (!mutex) { + return EINVAL; + } + mux = (esp_pthread_mutex_t *)*mutex; + + // check if mux is busy + int res = pthread_mutex_lock_internal(mux, 0); + if (res == EBUSY) { + return EBUSY; + } + + vSemaphoreDelete(mux->sem); + free(mux); + + return 0; +} + +static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo) +{ + if (mux->type == PTHREAD_MUTEX_RECURSIVE) { + if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) { + return EBUSY; + } + } else { + if (xSemaphoreTake(mux->sem, tmo) != pdTRUE) { + return EBUSY; + } + } + + return 0; +} + +static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) { + int res = 0; + if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) { + portENTER_CRITICAL(&s_mutex_init_lock); + if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) { + res = pthread_mutex_init(mutex, NULL); + } + portEXIT_CRITICAL(&s_mutex_init_lock); + } + return res; +} + +int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex) +{ + if (!mutex) { + return EINVAL; + } + int res = pthread_mutex_init_if_static(mutex); + if (res != 0) { + return res; + } + return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY); +} + +int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex) +{ + if (!mutex) { + return EINVAL; + } + int res = pthread_mutex_init_if_static(mutex); + if (res != 0) { + return res; + } + return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0); +} + +int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex) +{ + esp_pthread_mutex_t *mux; + + if (!mutex) { + return EINVAL; + } + mux = (esp_pthread_mutex_t *)*mutex; + + if (mux->type == PTHREAD_MUTEX_RECURSIVE) { + xSemaphoreGiveRecursive(mux->sem); + } else { + xSemaphoreGive(mux->sem); + } + return 0; +} + +int pthread_mutexattr_init(pthread_mutexattr_t *attr) +{ + if (!attr) { + return EINVAL; + } + attr->type = PTHREAD_MUTEX_NORMAL; + attr->is_initialized = 1; + return 0; +} + +int pthread_mutexattr_destroy(pthread_mutexattr_t *attr) +{ + if (!attr) { + return EINVAL; + } + attr->is_initialized = 0; + return 0; +} + +int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type) +{ + ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__); + return ENOSYS; +} + +int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) +{ + if (!attr) { + return EINVAL; + } + pthread_mutexattr_t tmp_attr = {.type = type}; + int res = mutexattr_check(&tmp_attr); + if (!res) { + attr->type = type; + } + return res; +} diff --git a/components/pthread/test/component.mk b/components/pthread/test/component.mk new file mode 100644 index 000000000..af9fbb837 --- /dev/null +++ b/components/pthread/test/component.mk @@ -0,0 +1,9 @@ +# +#Component Makefile +# + +COMPONENT_SRCDIRS := . + +#CXXFLAGS += -H + +COMPONENT_ADD_LDFLAGS = -Wl,--whole-archive -l$(COMPONENT_NAME) -Wl,--no-whole-archive diff --git a/components/pthread/test/test_pthread_cxx.cpp b/components/pthread/test/test_pthread_cxx.cpp new file mode 100644 index 000000000..287a1da1b --- /dev/null +++ b/components/pthread/test/test_pthread_cxx.cpp @@ -0,0 +1,83 @@ +#include +#include +#include +#include "unity.h" + +#if __GTHREADS && __GTHREADS_CXX0X + +static std::shared_ptr global_sp; +static std::mutex mtx; +static std::recursive_mutex recur_mtx; + +static void thread_do_nothing() {} + +static void thread_main() +{ + int i = 0; + std::cout << "thread_main CXX " << std::hex << std::this_thread::get_id() << std::endl; + + while (i < 3) { + int old_val, new_val; + + // mux test + mtx.lock(); + old_val = *global_sp; + std::this_thread::yield(); + (*global_sp)++; + std::this_thread::yield(); + new_val = *global_sp; + mtx.unlock(); + std::cout << "thread " << std::hex << std::this_thread::get_id() << ": " << i++ << " val= " << *global_sp << std::endl; + TEST_ASSERT_TRUE(new_val == old_val + 1); + + // sleep_for test + std::chrono::milliseconds dur(300); + std::this_thread::sleep_for(dur); + + // recursive mux test + recur_mtx.lock(); + recur_mtx.lock(); + old_val = *global_sp; + std::this_thread::yield(); + (*global_sp)++; + std::this_thread::yield(); + new_val = *global_sp; + recur_mtx.unlock(); + recur_mtx.unlock(); + std::cout << "thread " << std::hex << std::this_thread::get_id() << ": " << i++ << " val= " << *global_sp << std::endl; + TEST_ASSERT_TRUE(new_val == old_val + 1); + + // sleep_until test + using std::chrono::system_clock; + std::time_t tt = system_clock::to_time_t(system_clock::now()); + struct std::tm *ptm = std::localtime(&tt); + ptm->tm_sec++; + std::this_thread::sleep_until(system_clock::from_time_t (mktime(ptm))); + } +} + +TEST_CASE("pthread CXX", "[pthread]") +{ + global_sp.reset(new int(1)); + + std::thread t1(thread_do_nothing); + t1.join(); + + std::thread t2(thread_main); + std::cout << "Detach thread " << std::hex << t2.get_id() << std::endl; + t2.detach(); + TEST_ASSERT_FALSE(t2.joinable()); + + std::thread t3(thread_main); + std::thread t4(thread_main); + if (t3.joinable()) { + std::cout << "Join thread " << std::hex << t3.get_id() << std::endl; + t3.join(); + } + if (t4.joinable()) { + std::cout << "Join thread " << std::hex << t4.get_id() << std::endl; + t4.join(); + } +} + +#endif