Merge branch 'bugfix/backports_v3.2' into 'release/v3.2'

Multiple v3.2 backports

See merge request idf/esp-idf!3727
This commit is contained in:
Jiang Jiang Jian 2018-11-23 13:43:24 +08:00
commit 79401934a1
3 changed files with 61 additions and 51 deletions

View file

@ -81,11 +81,20 @@ static uint32_t s_ccount_div;
static uint32_t s_ccount_mul;
#if CONFIG_FREERTOS_USE_TICKLESS_IDLE
/* Indicates if light sleep entry was successful for given CPU.
/* Indicates if light sleep entry was skipped in vApplicationSleep for given CPU.
* This in turn gets used in IDLE hook to decide if `waiti` needs
* to be invoked or not.
*/
static bool s_entered_light_sleep[portNUM_PROCESSORS];
static bool s_skipped_light_sleep[portNUM_PROCESSORS];
#if portNUM_PROCESSORS == 2
/* When light sleep is finished on one CPU, it is possible that the other CPU
* will enter light sleep again very soon, before interrupts on the first CPU
* get a chance to run. To avoid such situation, set a flag for the other CPU to
* skip light sleep attempt.
*/
static bool s_skip_light_sleep[portNUM_PROCESSORS];
#endif // portNUM_PROCESSORS == 2
#endif // CONFIG_FREERTOS_USE_TICKLESS_IDLE
/* Indicates to the ISR hook that CCOMPARE needs to be updated on the given CPU.
@ -465,10 +474,14 @@ void esp_pm_impl_waiti()
{
#if CONFIG_FREERTOS_USE_TICKLESS_IDLE
int core_id = xPortGetCoreID();
if (!s_entered_light_sleep[core_id]) {
if (s_skipped_light_sleep[core_id]) {
asm("waiti 0");
} else {
s_entered_light_sleep[core_id] = false;
/* Interrupt took the CPU out of waiti and s_rtos_lock_handle[core_id]
* is now taken. However since we are back to idle task, we can release
* the lock so that vApplicationSleep can attempt to enter light sleep.
*/
esp_pm_impl_idle_hook();
s_skipped_light_sleep[core_id] = false;
}
#else
asm("waiti 0");
@ -477,10 +490,35 @@ void esp_pm_impl_waiti()
#if CONFIG_FREERTOS_USE_TICKLESS_IDLE
static inline bool IRAM_ATTR should_skip_light_sleep(int core_id)
{
#if portNUM_PROCESSORS == 2
if (s_skip_light_sleep[core_id]) {
s_skip_light_sleep[core_id] = false;
s_skipped_light_sleep[core_id] = true;
return true;
}
#endif // portNUM_PROCESSORS == 2
if (s_mode != PM_MODE_LIGHT_SLEEP || s_is_switching) {
s_skipped_light_sleep[core_id] = true;
} else {
s_skipped_light_sleep[core_id] = false;
}
return s_skipped_light_sleep[core_id];
}
static inline void IRAM_ATTR other_core_should_skip_light_sleep(int core_id)
{
#if portNUM_PROCESSORS == 2
s_skip_light_sleep[!core_id] = true;
#endif
}
void IRAM_ATTR vApplicationSleep( TickType_t xExpectedIdleTime )
{
portENTER_CRITICAL(&s_switch_lock);
if (s_mode == PM_MODE_LIGHT_SLEEP && !s_is_switching) {
int core_id = xPortGetCoreID();
if (!should_skip_light_sleep(core_id)) {
/* Calculate how much we can sleep */
int64_t next_esp_timer_alarm = esp_timer_get_next_alarm();
int64_t now = esp_timer_get_time();
@ -494,7 +532,6 @@ void IRAM_ATTR vApplicationSleep( TickType_t xExpectedIdleTime )
esp_sleep_pd_config(ESP_PD_DOMAIN_RTC_PERIPH, ESP_PD_OPTION_ON);
#endif
/* Enter sleep */
int core_id = xPortGetCoreID();
ESP_PM_TRACE_ENTER(SLEEP, core_id);
int64_t sleep_start = esp_timer_get_time();
esp_light_sleep_start();
@ -516,7 +553,7 @@ void IRAM_ATTR vApplicationSleep( TickType_t xExpectedIdleTime )
;
}
}
s_entered_light_sleep[core_id] = true;
other_core_should_skip_light_sleep(core_id);
}
}
portEXIT_CRITICAL(&s_switch_lock);

View file

@ -302,10 +302,8 @@ when the scheduler is unsuspended. The pending ready list itself can only be
accessed from a critical section. */
PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
/* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
/* We use just one spinlock for all the critical sections. */
PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
#if ( configGENERATE_RUN_TIME_STATS == 1 )
@ -1346,9 +1344,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
{
/* Minor optimisation. The tick count cannot change in this
block. */
// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
const TickType_t xConstTickCount = xTickCount;
// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
/* Generate the tick time at which the task wants to wake. */
xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
@ -1455,9 +1451,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
/* Calculate the time to wake - this may overflow but this is
not a problem. */
// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
xTimeToWake = xTickCount + xTicksToDelay;
// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
/* We must remove ourselves from the ready list before adding
ourselves to the blocked list as the same list item is used for
@ -2153,6 +2147,8 @@ void vTaskSuspendAll( void )
#if ( configUSE_TICKLESS_IDLE != 0 )
#if ( portNUM_PROCESSORS > 1 )
static BaseType_t xHaveReadyTasks()
{
for (int i = tskIDLE_PRIORITY + 1; i < configMAX_PRIORITIES; ++i)
@ -2169,6 +2165,7 @@ void vTaskSuspendAll( void )
return pdFALSE;
}
#endif // portNUM_PROCESSORS > 1
static TickType_t prvGetExpectedIdleTime( void )
{
@ -2200,9 +2197,7 @@ void vTaskSuspendAll( void )
}
else
{
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
xReturn = xNextTaskUnblockTime - xTickCount;
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
}
taskEXIT_CRITICAL(&xTaskQueueMutex);
@ -2308,31 +2303,13 @@ BaseType_t xAlreadyYielded = pdFALSE;
TickType_t xTaskGetTickCount( void )
{
TickType_t xTicks;
/* Critical section required if running on a 16 bit processor. */
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
{
xTicks = xTickCount;
}
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
return xTicks;
return xTickCount;
}
/*-----------------------------------------------------------*/
TickType_t xTaskGetTickCountFromISR( void )
{
TickType_t xReturn;
taskENTER_CRITICAL_ISR(&xTickCountMutex);
{
xReturn = xTickCount;
// vPortCPUReleaseMutex( &xTickCountMutex );
}
taskEXIT_CRITICAL_ISR(&xTickCountMutex);
return xReturn;
return xTickCount;
}
/*-----------------------------------------------------------*/
@ -2467,10 +2444,10 @@ implementations require configUSE_TICKLESS_IDLE to be set to a value other than
/* Correct the tick count value after a period during which the tick
was suppressed. Note this does *not* call the tick hook function for
each stepped tick. */
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
portENTER_CRITICAL( &xTaskQueueMutex );
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
xTickCount += xTicksToJump;
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
portEXIT_CRITICAL( &xTaskQueueMutex );
traceINCREASE_TICK_COUNT( xTicksToJump );
}
@ -2512,14 +2489,10 @@ BaseType_t xSwitchRequired = pdFALSE;
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
{
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
taskENTER_CRITICAL_ISR( &xTaskQueueMutex );
/* Increment the RTOS tick, switching the delayed and overflowed
delayed lists if it wraps to 0. */
++xTickCount;
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
//The other CPU may decide to mess with the task queues, so this needs a mux.
taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
{
/* Minor optimisation. The tick count cannot change in this
block. */
@ -3288,7 +3261,7 @@ BaseType_t xReturn;
configASSERT( pxTimeOut );
configASSERT( pxTicksToWait );
taskENTER_CRITICAL(&xTickCountMutex);
taskENTER_CRITICAL(&xTaskQueueMutex);
{
/* Minor optimisation. The tick count cannot change in this block. */
const TickType_t xConstTickCount = xTickCount;
@ -3324,7 +3297,7 @@ BaseType_t xReturn;
xReturn = pdTRUE;
}
}
taskEXIT_CRITICAL(&xTickCountMutex);
taskEXIT_CRITICAL(&xTaskQueueMutex);
return xReturn;
}
@ -4081,7 +4054,7 @@ TCB_t *pxTCB;
{
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
taskENTER_CRITICAL(&xTickCountMutex);
taskENTER_CRITICAL(&xTaskQueueMutex);
/* If the mutex was given back by an interrupt while the queue was
locked then the mutex holder might now be NULL. */
if( pxMutexHolder != NULL )
@ -4138,7 +4111,7 @@ TCB_t *pxTCB;
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL(&xTickCountMutex);
taskEXIT_CRITICAL(&xTaskQueueMutex);
}
@ -4151,7 +4124,7 @@ TCB_t *pxTCB;
{
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
BaseType_t xReturn = pdFALSE;
taskENTER_CRITICAL(&xTickCountMutex);
taskENTER_CRITICAL(&xTaskQueueMutex);
if( pxMutexHolder != NULL )
{
@ -4215,7 +4188,7 @@ TCB_t *pxTCB;
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL(&xTickCountMutex);
taskEXIT_CRITICAL(&xTaskQueueMutex);
return xReturn;
}

View file

@ -595,7 +595,7 @@ TEST_CASE("opendir, readdir, rewinddir, seekdir work as expected", "[spiffs]")
test_teardown();
}
TEST_CASE("readdir with large number of files", "[spiffs][timeout=15]")
TEST_CASE("readdir with large number of files", "[spiffs][timeout=30]")
{
test_setup();
test_spiffs_readdir_many_files("/spiffs/dir2");