apptrace lock acquire function was re-designed to minimize waiting time with disabled IRQs
This commit is contained in:
parent
b3c6748a0b
commit
cf29dd47a9
2 changed files with 25 additions and 17 deletions
|
@ -46,29 +46,37 @@ esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo)
|
||||||
|
|
||||||
esp_err_t esp_apptrace_lock_take(esp_apptrace_lock_t *lock, esp_apptrace_tmo_t *tmo)
|
esp_err_t esp_apptrace_lock_take(esp_apptrace_lock_t *lock, esp_apptrace_tmo_t *tmo)
|
||||||
{
|
{
|
||||||
lock->int_state = portENTER_CRITICAL_NESTED();
|
int res;
|
||||||
|
|
||||||
unsigned now = ESP_APPTRACE_CPUTICKS2US(portGET_RUN_TIME_COUNTER_VALUE()); // us
|
while (1) {
|
||||||
unsigned end = tmo->start + tmo->tmo;
|
// do not overwrite lock->int_state before we actually acquired the mux
|
||||||
if (now > end) {
|
unsigned int_state = portENTER_CRITICAL_NESTED();
|
||||||
goto timeout;
|
// FIXME: if mux is busy it is not good idea to loop during the whole tmo with disabled IRQs.
|
||||||
|
// So we check mux state using zero tmo, restore IRQs and let others tasks/IRQs to run on this CPU
|
||||||
|
// while we are doing our own tmo check.
|
||||||
|
bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0);
|
||||||
|
if (success) {
|
||||||
|
lock->int_state = int_state;
|
||||||
|
return ESP_OK;
|
||||||
|
}
|
||||||
|
portEXIT_CRITICAL_NESTED(int_state);
|
||||||
|
// we can be preempted from this place till the next call (above) to portENTER_CRITICAL_NESTED()
|
||||||
|
res = esp_apptrace_tmo_check(tmo);
|
||||||
|
if (res != ESP_OK) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
unsigned remaining = end - now; // us
|
return res;
|
||||||
|
|
||||||
bool success = vPortCPUAcquireMutexTimeout(&lock->mux, ESP_APPTRACE_US2CPUTICKS(remaining));
|
|
||||||
if (success) {
|
|
||||||
return ESP_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
timeout:
|
|
||||||
portEXIT_CRITICAL_NESTED(lock->int_state);
|
|
||||||
return ESP_ERR_TIMEOUT;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock)
|
esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock)
|
||||||
{
|
{
|
||||||
|
// save lock's irq state value for this CPU
|
||||||
|
unsigned int_state = lock->int_state;
|
||||||
|
// after call to the following func we can not be sure that lock->int_state
|
||||||
|
// is not overwritten by other CPU who has acquired the mux just after we released it. See esp_apptrace_lock_take().
|
||||||
vPortCPUReleaseMutex(&lock->mux);
|
vPortCPUReleaseMutex(&lock->mux);
|
||||||
portEXIT_CRITICAL_NESTED(lock->int_state);
|
portEXIT_CRITICAL_NESTED(int_state);
|
||||||
return ESP_OK;
|
return ESP_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -115,7 +115,7 @@ static timer_group_t s_ts_timer_group;
|
||||||
// everything is fine, so for multi-core env we have to wait on underlying lock forever
|
// everything is fine, so for multi-core env we have to wait on underlying lock forever
|
||||||
#define SEGGER_LOCK_WAIT_TMO ESP_APPTRACE_TMO_INFINITE
|
#define SEGGER_LOCK_WAIT_TMO ESP_APPTRACE_TMO_INFINITE
|
||||||
|
|
||||||
static esp_apptrace_lock_t s_sys_view_lock = {.irq_stat = 0, .portmux = portMUX_INITIALIZER_UNLOCKED};
|
static esp_apptrace_lock_t s_sys_view_lock = {.mux = portMUX_INITIALIZER_UNLOCKED, .int_state = 0};
|
||||||
|
|
||||||
static const char * const s_isr_names[] = {
|
static const char * const s_isr_names[] = {
|
||||||
[0] = "WIFI_MAC",
|
[0] = "WIFI_MAC",
|
||||||
|
|
Loading…
Reference in a new issue