Merge branch 'master' into feature/init_refactoring

* master:
  components/spi_flash: remove stray level of indentation
  components/nvs: fix broken sentences in comment blocks
  Roll back submodule version
  Spinlocks already come initialized. Remove the code that would essentially re-initialize them at runtime
  Remove all references to prvLockQueue / prvUnlockQueue
  components/esp32: clean up unused function warnings in single core mode
  clean up warnings
  components/nvs: fix build, use log library instead of printf
  components/spi_flash: add flash operation counters option to Kconfig
  components/nvs: add erase function
  components/nvs: fix formatting
  components/nvs: batch writes when possible
  components/spi_flash: add performance counters
  components/nvs: maintain item hash list at page level
  components/nvs: avoid reading just-erased page

# Conflicts:
#	components/esp32/cpu_start.c
#	components/esp32/event_default_handlers.c
This commit is contained in:
Ivan Grokhotkov 2016-09-26 14:35:09 +08:00
commit dabe53f082
23 changed files with 869 additions and 479 deletions

View file

@ -44,10 +44,14 @@
#include "esp_log.h"
void start_cpu0(void) __attribute__((weak, alias("start_cpu0_default")));
void start_cpu1(void) __attribute__((weak, alias("start_cpu0_default")));
void start_cpu0_default(void) IRAM_ATTR;
void start_cpu1_default(void) IRAM_ATTR;
#if !CONFIG_FREERTOS_UNICORE
static void IRAM_ATTR call_start_cpu1();
void start_cpu1(void) __attribute__((weak, alias("start_cpu0_default")));
void start_cpu1_default(void) IRAM_ATTR;
static bool app_cpu_started = false;
#endif //!CONFIG_FREERTOS_UNICORE
static void do_global_ctors(void);
static void main_task(void* args);
extern void ets_setup_syscalls(void);
@ -61,7 +65,6 @@ extern void (*__init_array_end)(void);
extern volatile int port_xSchedulerRunning[2];
static const char* TAG = "cpu_start";
static bool app_cpu_started = false;
/*
* We arrive here after the bootloader finished loading the program from flash. The hardware is mostly uninitialized,
@ -91,7 +94,7 @@ void IRAM_ATTR call_start_cpu0()
ESP_EARLY_LOGI(TAG, "Pro cpu up.");
#ifndef CONFIG_FREERTOS_UNICORE
#if !CONFIG_FREERTOS_UNICORE
ESP_EARLY_LOGI(TAG, "Starting app cpu, entry point is %p", call_start_cpu1);
SET_PERI_REG_MASK(DPORT_APPCPU_CTRL_B_REG, DPORT_APPCPU_CLKGATE_EN);
@ -111,6 +114,7 @@ void IRAM_ATTR call_start_cpu0()
start_cpu0();
}
#if !CONFIG_FREERTOS_UNICORE
void IRAM_ATTR call_start_cpu1()
{
asm volatile (\
@ -123,6 +127,7 @@ void IRAM_ATTR call_start_cpu1()
app_cpu_started = 1;
start_cpu1();
}
#endif //!CONFIG_FREERTOS_UNICORE
void start_cpu0_default(void)
{
@ -139,6 +144,7 @@ void start_cpu0_default(void)
vTaskStartScheduler();
}
#if !CONFIG_FREERTOS_UNICORE
void start_cpu1_default(void)
{
// Wait for FreeRTOS initialization to finish on PRO CPU
@ -148,6 +154,7 @@ void start_cpu1_default(void)
ESP_LOGI(TAG, "Starting scheduler on APP CPU.");
xPortStartScheduler();
}
#endif //!CONFIG_FREERTOS_UNICORE
static void do_global_ctors(void)
{

View file

@ -100,11 +100,6 @@ header files above, but not in this file, in order to generate the correct
privileged Vs unprivileged linkage and placement. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
/* Constants used with the xRxLock and xTxLock structure members. */
#define queueUNLOCKED ( ( BaseType_t ) -1 )
#define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
/* When the Queue_t structure is used to represent a base queue its pcHead and
pcTail members are used as pointers into the queue storage area. When the
Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
@ -163,9 +158,6 @@ typedef struct QueueDefinition
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
#if ( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxQueueNumber;
uint8_t ucQueueType;
@ -212,15 +204,6 @@ typedef xQUEUE Queue_t;
#endif /* configQUEUE_REGISTRY_SIZE */
/*
* Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
* prevent an ISR from adding or removing items to the queue, but does prevent
* an ISR from removing tasks from the queue event lists. If an ISR finds a
* queue is locked it will instead increment the appropriate queue lock count
* to indicate that a task may require unblocking. When the queue in unlocked
* these lock counts are inspected, and the appropriate action taken.
*/
static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
/*
* Uses a critical section to determine if there is any data in a queue.
@ -255,27 +238,6 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
#endif
/*-----------------------------------------------------------*/
/*
* Macro to mark a queue as locked. Locking a queue prevents an ISR from
* accessing the queue event lists.
*/
#define prvLockQueue( pxQueue ) \
taskENTER_CRITICAL(&pxQueue->mux); \
{ \
if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
{ \
( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
} \
if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
{ \
( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
} \
} \
taskEXIT_CRITICAL(&pxQueue->mux)
/*-----------------------------------------------------------*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
{
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
@ -292,8 +254,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
pxQueue->pcWriteTo = pxQueue->pcHead;
pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
pxQueue->xRxLock = queueUNLOCKED;
pxQueue->xTxLock = queueUNLOCKED;
if( xNewQueue == pdFALSE )
{
@ -441,8 +401,6 @@ int8_t *pcAllocatedBuffer;
pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
pxNewQueue->uxLength = ( UBaseType_t ) 1U;
pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
pxNewQueue->xRxLock = queueUNLOCKED;
pxNewQueue->xTxLock = queueUNLOCKED;
#if ( configUSE_TRACE_FACILITY == 1 )
{
@ -787,7 +745,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
now the critical section has been exited. */
taskENTER_CRITICAL(&pxQueue->mux);
// prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -797,13 +754,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Unlocking the queue means queue events can effect the
event list. It is possible that interrupts occurring now
remove this task from the event list again - but as the
scheduler is suspended the task will go onto the pending
ready last instead of the actual ready list. */
// prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending
ready list into the ready list - so it is feasible that this
@ -816,14 +766,12 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
else
{
/* Try again. */
// prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL(&pxQueue->mux);
}
}
else
{
/* The timeout has expired. */
// prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL(&pxQueue->mux);
/* Return to the original privilege level before exiting the
@ -1129,27 +1077,18 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
disinheritance here or to clear the mutex holder TCB member. */
( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
/* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */
if( pxQueue->xTxLock == queueUNLOCKED )
#if ( configUSE_QUEUE_SETS == 1 )
{
#if ( configUSE_QUEUE_SETS == 1 )
if( pxQueue->pxQueueSetContainer != NULL )
{
if( pxQueue->pxQueueSetContainer != NULL )
if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
{
if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
/* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to
unblock. A context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
/* The queue is a member of a queue set, and posting
to the queue set caused a higher priority task to
unblock. A context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
@ -1158,40 +1097,17 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
}
else
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so
record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
mtCOVERAGE_TEST_MARKER();
}
}
#else /* configUSE_QUEUE_SETS */
else
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
/* The task waiting has a higher priority so
record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
@ -1211,16 +1127,35 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
}
else
#else /* configUSE_QUEUE_SETS */
{
/* Increment the lock count so the task that unlocks the queue
knows that data was posted while it was locked. */
++( pxQueue->xTxLock );
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
xReturn = pdPASS;
#endif /* configUSE_QUEUE_SETS */
}
else
{
@ -1285,27 +1220,18 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
++( pxQueue->uxMessagesWaiting );
/* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */
if( pxQueue->xTxLock == queueUNLOCKED )
#if ( configUSE_QUEUE_SETS == 1 )
{
#if ( configUSE_QUEUE_SETS == 1 )
if( pxQueue->pxQueueSetContainer != NULL )
{
if( pxQueue->pxQueueSetContainer != NULL )
if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
{
if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
/* The semaphore is a member of a queue set, and
posting to the queue set caused a higher priority
task to unblock. A context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
/* The semaphore is a member of a queue set, and
posting to the queue set caused a higher priority
task to unblock. A context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
@ -1314,40 +1240,17 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
}
else
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so
record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
mtCOVERAGE_TEST_MARKER();
}
}
#else /* configUSE_QUEUE_SETS */
else
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
/* The task waiting has a higher priority so
record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
@ -1367,14 +1270,35 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
}
else
#else /* configUSE_QUEUE_SETS */
{
/* Increment the lock count so the task that unlocks the queue
knows that data was posted while it was locked. */
++( pxQueue->xTxLock );
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
xReturn = pdPASS;
}
@ -1525,7 +1449,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
now the critical section has been exited. */
taskENTER_CRITICAL(&pxQueue->mux);
// prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
@ -1548,20 +1471,17 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
#endif
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
// prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL(&pxQueue->mux);
portYIELD_WITHIN_API();
}
else
{
/* Try again. */
// prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL(&pxQueue->mux);
}
}
else
{
// prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL(&pxQueue->mux);
traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY;
@ -1606,26 +1526,15 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
prvCopyDataFromQueue( pxQueue, pvBuffer );
--( pxQueue->uxMessagesWaiting );
/* If the queue is locked the event list will not be modified.
Instead update the lock count so the task that unlocks the queue
will know that an ISR has removed data while the queue was
locked. */
if( pxQueue->xRxLock == queueUNLOCKED )
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
/* The task waiting has a higher priority than us so
force a context switch. */
if( pxHigherPriorityTaskWoken != NULL )
{
/* The task waiting has a higher priority than us so
force a context switch. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
@ -1639,9 +1548,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
}
else
{
/* Increment the lock count so the task that unlocks the queue
knows that data was removed while it was locked. */
++( pxQueue->xRxLock );
mtCOVERAGE_TEST_MARKER();
}
xReturn = pdPASS;
@ -1902,129 +1809,7 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
}
}
/*-----------------------------------------------------------*/
static void prvUnlockQueue( Queue_t * const pxQueue )
{
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
/* The lock counts contains the number of extra data items placed or
removed from the queue while the queue was locked. When a queue is
locked items can be added or removed, but the event lists cannot be
updated. */
taskENTER_CRITICAL(&pxQueue->mux);
{
/* See if data was added to the queue while it was locked. */
while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
{
/* Data was posted while the queue was locked. Are any tasks
blocked waiting for data to become available? */
#if ( configUSE_QUEUE_SETS == 1 )
{
if( pxQueue->pxQueueSetContainer != NULL )
{
if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
{
/* The queue is a member of a queue set, and posting to
the queue set caused a higher priority task to unblock.
A context switch is required. */
taskEXIT_CRITICAL(&pxQueue->mux); //ToDo: Is aquire/release needed around any of the bTaskMissedYield calls?
vTaskMissedYield();
taskENTER_CRITICAL(&pxQueue->mux);
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* Tasks that are removed from the event list will get added to
the pending ready list as the scheduler is still suspended. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
taskEXIT_CRITICAL(&pxQueue->mux);
vTaskMissedYield();
taskENTER_CRITICAL(&pxQueue->mux);
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
break;
}
}
}
#else /* configUSE_QUEUE_SETS */
{
/* Tasks that are removed from the event list will get added to
the pending ready list as the scheduler is still suspended. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
taskEXIT_CRITICAL(&pxQueue->mux);
vTaskMissedYield();
taskENTER_CRITICAL(&pxQueue->mux);
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
break;
}
}
#endif /* configUSE_QUEUE_SETS */
--( pxQueue->xTxLock );
}
pxQueue->xTxLock = queueUNLOCKED;
}
taskEXIT_CRITICAL(&pxQueue->mux);
/* Do the same for the Rx lock. */
taskENTER_CRITICAL(&pxQueue->mux);
{
while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
taskEXIT_CRITICAL(&pxQueue->mux);
vTaskMissedYield();
taskENTER_CRITICAL(&pxQueue->mux);
}
else
{
mtCOVERAGE_TEST_MARKER();
}
--( pxQueue->xRxLock );
}
else
{
break;
}
}
pxQueue->xRxLock = queueUNLOCKED;
}
taskEXIT_CRITICAL(&pxQueue->mux);
}
/*-----------------------------------------------------------*/
static BaseType_t prvIsQueueEmpty( Queue_t *pxQueue )
@ -2458,10 +2243,8 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
/* Only do anything if there are no messages in the queue. This function
will not actually cause the task to block, just place it on a blocked
list. It will not block until the scheduler is unlocked - at which
time a yield will be performed. If an item is added to the queue while
the queue is locked, and the calling task blocks on the queue, then the
calling task will be immediately unblocked when the queue is unlocked. */
// prvLockQueue( pxQueue );
time a yield will be performed. */
taskENTER_CRITICAL(&pxQueue->mux);
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
{
/* There is nothing in the queue, block for the specified period. */
@ -2471,7 +2254,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
mtCOVERAGE_TEST_MARKER();
}
// prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL(&pxQueue->mux);
}
#endif /* configUSE_TIMERS */

View file

@ -275,9 +275,7 @@ when the scheduler is unsuspended. The pending ready list itself can only be
accessed from a critical section. */
PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
/* Muxes used in the task code */
PRIVILEGED_DATA static portBASE_TYPE xMutexesInitialised = pdFALSE;
/* For now, we use just one mux for all the critical sections. ToDo: give evrything a bit more granularity;
/* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
@ -577,15 +575,6 @@ static void prvResetNextTaskUnblockTime( void );
#endif
/*-----------------------------------------------------------*/
static void vTaskInitializeLocalMuxes( void )
{
vPortCPUInitializeMutex(&xTaskQueueMutex);
vPortCPUInitializeMutex(&xTickCountMutex);
xMutexesInitialised = pdTRUE;
}
/*-----------------------------------------------------------*/
@ -596,9 +585,6 @@ TCB_t * pxNewTCB;
StackType_t *pxTopOfStack;
BaseType_t i;
/* Initialize mutexes, if they're not already initialized. */
if (xMutexesInitialised == pdFALSE) vTaskInitializeLocalMuxes();
configASSERT( pxTaskCode );
configASSERT( ( ( uxPriority & ( ~portPRIVILEGE_BIT ) ) < configMAX_PRIORITIES ) );
configASSERT( (xCoreID>=0 && xCoreID<portNUM_PROCESSORS) || (xCoreID==tskNO_AFFINITY) );
@ -1725,10 +1711,6 @@ BaseType_t xAlreadyYielded = pdFALSE;
scheduler has been resumed it is safe to move all the pending ready
tasks from this list into their appropriate ready list. */
//This uses a mux, but can be called before tasks are scheduled. Make sure muxes are inited.
/* Initialize mutexes, if they're not already initialized. */
if (xMutexesInitialised == pdFALSE) vTaskInitializeLocalMuxes();
taskENTER_CRITICAL(&xTaskQueueMutex);
{
--uxSchedulerSuspended[ xPortGetCoreID() ];

View file

@ -217,3 +217,11 @@ As mentioned above, each key-value pair belongs to one of the namespaces. Namesp
+-------------------------------------------+
Item hash list
~~~~~~~~~~~~~~
To reduce the number of reads performed from flash memory, each member of Page class maintains a list of pairs: (item index; item hash). This list makes searches much quicker. Instead of iterating over all entries, reading them from flash one at a time, ``Page::findItem`` first performs search for item hash in the hash list. This gives the item index within the page, if such an item exists. Due to a hash collision it is possible that a different item will be found. This is handled by falling back to iteration over items in flash.
Each node in hash list contains a 24-bit hash and 8-bit item index. Hash is calculated based on item namespace and key name. CRC32 is used for calculation, result is truncated to 24 bits. To reduce overhead of storing 32-bit entries in a linked list, list is implemented as a doubly-linked list of arrays. Each array holds 29 entries, for the total size of 128 bytes, together with linked list pointers and 32-bit count field. Minimal amount of extra RAM useage per page is therefore 128 bytes, maximum is 640 bytes.

View file

@ -17,7 +17,7 @@
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <esp_err.h>
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
@ -78,9 +78,8 @@ esp_err_t nvs_open(const char* name, nvs_open_mode open_mode, nvs_handle *out_ha
* This family of functions set value for the key, given its name. Note that
* actual storage will not be updated until nvs_commit function is called.
*
* @param[in] handle Handle obtained from nvs_open function. If the handle was
* opened with read_only set to true, nvs_set_X functions will
* fail with ESP_ERR_NVS_READONLY.
* @param[in] handle Handle obtained from nvs_open function.
* Handles that were opened read only cannot be used.
* @param[in] key Key name. Maximal length is determined by the underlying
* implementation, but is guaranteed to be at least
* 16 characters. Shouldn't be empty.
@ -180,6 +179,41 @@ esp_err_t nvs_get_u64 (nvs_handle handle, const char* key, uint64_t* out_value);
esp_err_t nvs_get_str (nvs_handle handle, const char* key, char* out_value, size_t* length);
esp_err_t nvs_get_blob(nvs_handle handle, const char* key, void* out_value, size_t* length);
/**
* @brief Erase key-value pair with given key name.
*
* Note that actual storage may not be updated until nvs_commit function is called.
*
* @param[in] handle Storage handle obtained with nvs_open.
* Handles that were opened read only cannot be used.
*
* @param[in] key Key name. Maximal length is determined by the underlying
* implementation, but is guaranteed to be at least
* 16 characters. Shouldn't be empty.
*
* @return - ESP_OK if erase operation was successful
* - ESP_ERR_NVS_INVALID_HANDLE if handle has been closed or is NULL
* - ESP_ERR_NVS_READ_ONLY if handle was opened as read only
* - ESP_ERR_NVS_NOT_FOUND if the requested key doesn't exist
* - other error codes from the underlying storage driver
*/
esp_err_t nvs_erase_key(nvs_handle handle, const char* key);
/**
* @brief Erase all key-value pairs in a namespace
*
* Note that actual storage may not be updated until nvs_commit function is called.
*
* @param[in] handle Storage handle obtained with nvs_open.
* Handles that were opened read only cannot be used.
*
* @return - ESP_OK if erase operation was successful
* - ESP_ERR_NVS_INVALID_HANDLE if handle has been closed or is NULL
* - ESP_ERR_NVS_READ_ONLY if handle was opened as read only
* - other error codes from the underlying storage driver
*/
esp_err_t nvs_erase_all(nvs_handle handle);
/**
* @brief Write any pending changes to non-volatile storage
*
@ -187,8 +221,8 @@ esp_err_t nvs_get_blob(nvs_handle handle, const char* key, void* out_value, size
* to non-volatile storage. Individual implementations may write to storage at other times,
* but this is not guaranteed.
*
* @param[in] handle Storage handle obtained with nvs_open. If handle has to be
* opened as not read only for this call to succeed.
* @param[in] handle Storage handle obtained with nvs_open.
* Handles that were opened read only cannot be used.
*
* @return - ESP_OK if the changes have been written successfully
* - ESP_ERR_NVS_INVALID_HANDLE if handle has been closed or is NULL

View file

@ -17,18 +17,27 @@
#include "intrusive_list.h"
#include "nvs_platform.hpp"
#ifdef ESP_PLATFORM
// Uncomment this line to force output from this module
// #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
#include "esp_log.h"
static const char* TAG = "nvs";
#else
#define ESP_LOGD(...)
#endif
class HandleEntry : public intrusive_list_node<HandleEntry>
{
public:
HandleEntry(){}
HandleEntry() {}
HandleEntry(nvs_handle handle, bool readOnly, uint8_t nsIndex) :
mHandle(handle),
mReadOnly(readOnly),
mNsIndex(nsIndex)
mHandle(handle),
mReadOnly(readOnly),
mNsIndex(nsIndex)
{
}
nvs_handle mHandle;
uint8_t mReadOnly;
uint8_t mNsIndex;
@ -55,7 +64,7 @@ extern "C" esp_err_t nvs_flash_init(uint32_t baseSector, uint32_t sectorCount)
{
Lock::init();
Lock lock;
NVS_DEBUGV("%s %d %d\r\n", __func__, baseSector, sectorCount);
ESP_LOGD(TAG, "init start=%d count=%d", baseSector, sectorCount);
s_nvs_handles.clear();
return s_nvs_storage.init(baseSector, sectorCount);
}
@ -75,7 +84,7 @@ static esp_err_t nvs_find_ns_handle(nvs_handle handle, HandleEntry& entry)
extern "C" esp_err_t nvs_open(const char* name, nvs_open_mode open_mode, nvs_handle *out_handle)
{
Lock lock;
NVS_DEBUGV("%s %s %d\r\n", __func__, name, open_mode);
ESP_LOGD(TAG, "%s %s %d", __func__, name, open_mode);
uint8_t nsIndex;
esp_err_t err = s_nvs_storage.createOrOpenNamespace(name, open_mode == NVS_READWRITE, nsIndex);
if (err != ESP_OK) {
@ -93,7 +102,7 @@ extern "C" esp_err_t nvs_open(const char* name, nvs_open_mode open_mode, nvs_han
extern "C" void nvs_close(nvs_handle handle)
{
Lock lock;
NVS_DEBUGV("%s %d\r\n", __func__, handle);
ESP_LOGD(TAG, "%s %d", __func__, handle);
auto it = find_if(begin(s_nvs_handles), end(s_nvs_handles), [=](HandleEntry& e) -> bool {
return e.mHandle == handle;
});
@ -103,11 +112,41 @@ extern "C" void nvs_close(nvs_handle handle)
s_nvs_handles.erase(it);
}
extern "C" esp_err_t nvs_erase_key(nvs_handle handle, const char* key)
{
Lock lock;
ESP_LOGD(TAG, "%s %s\r\n", __func__, key);
HandleEntry entry;
auto err = nvs_find_ns_handle(handle, entry);
if (err != ESP_OK) {
return err;
}
if (entry.mReadOnly) {
return ESP_ERR_NVS_READ_ONLY;
}
return s_nvs_storage.eraseItem(entry.mNsIndex, key);
}
extern "C" esp_err_t nvs_erase_all(nvs_handle handle)
{
Lock lock;
ESP_LOGD(TAG, "%s\r\n", __func__);
HandleEntry entry;
auto err = nvs_find_ns_handle(handle, entry);
if (err != ESP_OK) {
return err;
}
if (entry.mReadOnly) {
return ESP_ERR_NVS_READ_ONLY;
}
return s_nvs_storage.eraseNamespace(entry.mNsIndex);
}
template<typename T>
static esp_err_t nvs_set(nvs_handle handle, const char* key, T value)
{
Lock lock;
NVS_DEBUGV("%s %s %d %d\r\n", __func__, key, sizeof(T), (uint32_t) value);
ESP_LOGD(TAG, "%s %s %d %d", __func__, key, sizeof(T), (uint32_t) value);
HandleEntry entry;
auto err = nvs_find_ns_handle(handle, entry);
if (err != ESP_OK) {
@ -170,7 +209,7 @@ extern "C" esp_err_t nvs_commit(nvs_handle handle)
extern "C" esp_err_t nvs_set_str(nvs_handle handle, const char* key, const char* value)
{
Lock lock;
NVS_DEBUGV("%s %s %s\r\n", __func__, key, value);
ESP_LOGD(TAG, "%s %s %s", __func__, key, value);
HandleEntry entry;
auto err = nvs_find_ns_handle(handle, entry);
if (err != ESP_OK) {
@ -182,7 +221,7 @@ extern "C" esp_err_t nvs_set_str(nvs_handle handle, const char* key, const char*
extern "C" esp_err_t nvs_set_blob(nvs_handle handle, const char* key, const void* value, size_t length)
{
Lock lock;
NVS_DEBUGV("%s %s %d\r\n", __func__, key, length);
ESP_LOGD(TAG, "%s %s %d", __func__, key, length);
HandleEntry entry;
auto err = nvs_find_ns_handle(handle, entry);
if (err != ESP_OK) {
@ -196,7 +235,7 @@ template<typename T>
static esp_err_t nvs_get(nvs_handle handle, const char* key, T* out_value)
{
Lock lock;
NVS_DEBUGV("%s %s %d\r\n", __func__, key, sizeof(T));
ESP_LOGD(TAG, "%s %s %d", __func__, key, sizeof(T));
HandleEntry entry;
auto err = nvs_find_ns_handle(handle, entry);
if (err != ESP_OK) {
@ -248,7 +287,7 @@ extern "C" esp_err_t nvs_get_u64 (nvs_handle handle, const char* key, uint64_t*
static esp_err_t nvs_get_str_or_blob(nvs_handle handle, nvs::ItemType type, const char* key, void* out_value, size_t* length)
{
Lock lock;
NVS_DEBUGV("%s %s\r\n", __func__, key);
ESP_LOGD(TAG, "%s %s", __func__, key);
HandleEntry entry;
auto err = nvs_find_ns_handle(handle, entry);
if (err != ESP_OK) {
@ -263,12 +302,10 @@ static esp_err_t nvs_get_str_or_blob(nvs_handle handle, nvs::ItemType type, cons
if (length == nullptr) {
return ESP_ERR_NVS_INVALID_LENGTH;
}
else if (out_value == nullptr) {
} else if (out_value == nullptr) {
*length = dataSize;
return ESP_OK;
}
else if (*length < dataSize) {
} else if (*length < dataSize) {
*length = dataSize;
return ESP_ERR_NVS_INVALID_LENGTH;
}

View file

@ -0,0 +1,96 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "nvs_item_hash_list.hpp"
namespace nvs
{
HashList::~HashList()
{
for (auto it = mBlockList.begin(); it != mBlockList.end();) {
auto tmp = it;
++it;
mBlockList.erase(tmp);
delete static_cast<HashListBlock*>(tmp);
}
}
HashList::HashListBlock::HashListBlock()
{
static_assert(sizeof(HashListBlock) == HashListBlock::BYTE_SIZE,
"cache block size calculation incorrect");
}
void HashList::insert(const Item& item, size_t index)
{
const uint32_t hash_24 = item.calculateCrc32WithoutValue() & 0xffffff;
// add entry to the end of last block if possible
if (mBlockList.size()) {
auto& block = mBlockList.back();
if (block.mCount < HashListBlock::ENTRY_COUNT) {
block.mNodes[block.mCount++] = HashListNode(hash_24, index);
return;
}
}
// if the above failed, create a new block and add entry to it
HashListBlock* newBlock = new HashListBlock;
mBlockList.push_back(newBlock);
newBlock->mNodes[0] = HashListNode(hash_24, index);
newBlock->mCount++;
}
void HashList::erase(size_t index)
{
for (auto it = std::begin(mBlockList); it != std::end(mBlockList);) {
bool haveEntries = false;
for (size_t i = 0; i < it->mCount; ++i) {
if (it->mNodes[i].mIndex == index) {
it->mNodes[i].mIndex = 0xff;
return;
}
if (it->mNodes[i].mIndex != 0xff) {
haveEntries = true;
}
}
if (!haveEntries) {
auto tmp = it;
++it;
mBlockList.erase(tmp);
delete static_cast<HashListBlock*>(tmp);
} else {
++it;
}
}
assert(false && "item should have been present in cache");
}
size_t HashList::find(size_t start, const Item& item)
{
const uint32_t hash_24 = item.calculateCrc32WithoutValue() & 0xffffff;
for (auto it = std::begin(mBlockList); it != std::end(mBlockList); ++it) {
for (size_t index = 0; index < it->mCount; ++index) {
HashListNode& e = it->mNodes[index];
if (e.mIndex >= start &&
e.mHash == hash_24 &&
e.mIndex != 0xff) {
return e.mIndex;
}
}
}
return SIZE_MAX;
}
} // namespace nvs

View file

@ -0,0 +1,68 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef nvs_item_hash_list_h
#define nvs_item_hash_list_h
#include "nvs.h"
#include "nvs_types.hpp"
#include "intrusive_list.h"
namespace nvs
{
class HashList
{
public:
~HashList();
void insert(const Item& item, size_t index);
void erase(const size_t index);
size_t find(size_t start, const Item& item);
protected:
struct HashListNode {
HashListNode() :
mIndex(0xff), mHash(0)
{
}
HashListNode(uint32_t hash, size_t index) :
mIndex((uint32_t) index), mHash(hash)
{
}
uint32_t mIndex : 8;
uint32_t mHash : 24;
};
struct HashListBlock : public intrusive_list_node<HashList::HashListBlock> {
HashListBlock();
static const size_t BYTE_SIZE = 128;
static const size_t ENTRY_COUNT = (BYTE_SIZE - sizeof(intrusive_list_node<HashListBlock>) - sizeof(size_t)) / 4;
size_t mCount = 0;
HashListNode mNodes[ENTRY_COUNT];
};
typedef intrusive_list<HashListBlock> TBlockList;
TBlockList mBlockList;
}; // class HashList
} // namespace nvs
#endif /* nvs_item_hash_list_h */

View file

@ -29,7 +29,7 @@ uint32_t Page::Header::calculateCrc32()
reinterpret_cast<uint8_t*>(this) + offsetof(Header, mSeqNumber),
offsetof(Header, mCrc32) - offsetof(Header, mSeqNumber));
}
esp_err_t Page::load(uint32_t sectorNumber)
{
mBaseAddress = sectorNumber * SEC_SIZE;
@ -59,15 +59,13 @@ esp_err_t Page::load(uint32_t sectorNumber)
break;
}
}
}
else if (header.mCrc32 != header.calculateCrc32()) {
} else if (header.mCrc32 != header.calculateCrc32()) {
header.mState = PageState::CORRUPT;
}
else {
} else {
mState = header.mState;
mSeqNumber = header.mSeqNumber;
}
switch (mState) {
case PageState::UNINITIALIZED:
break;
@ -108,6 +106,27 @@ esp_err_t Page::writeEntry(const Item& item)
return ESP_OK;
}
esp_err_t Page::writeEntryData(const uint8_t* data, size_t size)
{
assert(size % ENTRY_SIZE == 0);
assert(mNextFreeEntry != INVALID_ENTRY);
assert(mFirstUsedEntry != INVALID_ENTRY);
const uint16_t count = size / ENTRY_SIZE;
auto rc = spi_flash_write(getEntryAddress(mNextFreeEntry), reinterpret_cast<const uint32_t*>(data), static_cast<uint32_t>(size));
if (rc != ESP_OK) {
mState = PageState::INVALID;
return rc;
}
auto err = alterEntryRangeState(mNextFreeEntry, mNextFreeEntry + count, EntryState::WRITTEN);
if (err != ESP_OK) {
return err;
}
mUsedEntryCount += count;
mNextFreeEntry += count;
return ESP_OK;
}
esp_err_t Page::writeItem(uint8_t nsIndex, ItemType datatype, const char* key, const void* data, size_t dataSize)
{
@ -148,16 +167,9 @@ esp_err_t Page::writeItem(uint8_t nsIndex, ItemType datatype, const char* key, c
// write first item
item.nsIndex = nsIndex;
item.datatype = datatype;
item.span = (totalSize + ENTRY_SIZE - 1) / ENTRY_SIZE;
item.reserved = 0xff;
std::fill_n(reinterpret_cast<uint32_t*>(item.key), sizeof(item.key) / 4, 0xffffffff);
std::fill_n(reinterpret_cast<uint32_t*>(item.data), sizeof(item.data) / 4, 0xffffffff);
strncpy(item.key, key, sizeof(item.key) - 1);
item.key[sizeof(item.key) - 1] = 0;
size_t span = (totalSize + ENTRY_SIZE - 1) / ENTRY_SIZE;
item = Item(nsIndex, datatype, span, key);
mHashList.insert(item, mNextFreeEntry);
if (datatype != ItemType::SZ && datatype != ItemType::BLOB) {
memcpy(item.data, data, dataSize);
@ -177,18 +189,24 @@ esp_err_t Page::writeItem(uint8_t nsIndex, ItemType datatype, const char* key, c
return err;
}
size_t left = dataSize;
while (left != 0) {
size_t willWrite = Page::ENTRY_SIZE;
willWrite = (left < willWrite)?left:willWrite;
memcpy(item.rawData, src, willWrite);
src += willWrite;
left -= willWrite;
size_t left = dataSize / ENTRY_SIZE * ENTRY_SIZE;
if (left > 0) {
err = writeEntryData(static_cast<const uint8_t*>(data), left);
if (err != ESP_OK) {
return err;
}
}
size_t tail = dataSize - left;
if (tail > 0) {
std::fill_n(item.rawData, ENTRY_SIZE / 4, 0xffffffff);
memcpy(item.rawData, static_cast<const uint8_t*>(data) + left, tail);
err = writeEntry(item);
if (err != ESP_OK) {
return err;
}
}
}
return ESP_OK;
}
@ -260,23 +278,11 @@ esp_err_t Page::findItem(uint8_t nsIndex, ItemType datatype, const char* key)
return findItem(nsIndex, datatype, key, index, item);
}
esp_err_t Page::eraseEntry(size_t index)
{
auto state = mEntryTable.get(index);
assert(state == EntryState::WRITTEN || state == EntryState::EMPTY);
auto rc = alterEntryState(index, EntryState::ERASED);
if (rc != ESP_OK) {
return rc;
}
return ESP_OK;
}
esp_err_t Page::eraseEntryAndSpan(size_t index)
{
auto state = mEntryTable.get(index);
assert(state == EntryState::WRITTEN || state == EntryState::EMPTY);
mHashList.erase(index);
size_t span = 1;
if (state == EntryState::WRITTEN) {
@ -296,15 +302,18 @@ esp_err_t Page::eraseEntryAndSpan(size_t index)
if (mEntryTable.get(i) == EntryState::WRITTEN) {
--mUsedEntryCount;
}
rc = alterEntryState(i, EntryState::ERASED);
if (rc != ESP_OK) {
return rc;
}
++mErasedEntryCount;
}
if (span == 1) {
rc = alterEntryState(index, EntryState::ERASED);
} else {
rc = alterEntryRangeState(index, index + span, EntryState::ERASED);
}
if (rc != ESP_OK) {
return rc;
}
}
}
else {
} else {
auto rc = alterEntryState(index, EntryState::ERASED);
if (rc != ESP_OK) {
return rc;
@ -314,7 +323,7 @@ esp_err_t Page::eraseEntryAndSpan(size_t index)
if (index == mFirstUsedEntry) {
updateFirstUsedEntry(index, span);
}
if (index + span > mNextFreeEntry) {
mNextFreeEntry = index + span;
}
@ -337,7 +346,7 @@ void Page::updateFirstUsedEntry(size_t index, size_t span)
}
}
}
esp_err_t Page::moveItem(Page& other)
{
if (mFirstUsedEntry == INVALID_ENTRY) {
@ -347,7 +356,7 @@ esp_err_t Page::moveItem(Page& other)
if (mFindInfo.itemIndex() == mFirstUsedEntry) {
invalidateCache();
}
if (other.mState == PageState::UNINITIALIZED) {
auto err = other.initialize();
if (err != ESP_OK) {
@ -360,6 +369,7 @@ esp_err_t Page::moveItem(Page& other)
if (err != ESP_OK) {
return err;
}
other.mHashList.insert(entry, other.mNextFreeEntry);
err = other.writeEntry(entry);
if (err != ESP_OK) {
return err;
@ -367,9 +377,9 @@ esp_err_t Page::moveItem(Page& other)
size_t span = entry.span;
size_t end = mFirstUsedEntry + span;
assert(mFirstUsedEntry != INVALID_ENTRY || span == 1);
for (size_t i = mFirstUsedEntry + 1; i < end; ++i) {
readEntry(i, entry);
err = other.writeEntry(entry);
@ -377,17 +387,7 @@ esp_err_t Page::moveItem(Page& other)
return err;
}
}
for (size_t i = mFirstUsedEntry; i < end; ++i) {
err = eraseEntry(i);
if (err != ESP_OK) {
return err;
}
}
updateFirstUsedEntry(mFirstUsedEntry, span);
mErasedEntryCount += span;
mUsedEntryCount -= span;
return ESP_OK;
return eraseEntryAndSpan(mFirstUsedEntry);
}
esp_err_t Page::mLoadEntryTable()
@ -432,7 +432,7 @@ esp_err_t Page::mLoadEntryTable()
// but before the entry state table was altered, the entry locacted via
// entry state table may actually be half-written.
// this is easy to check by reading EntryHeader (i.e. first word)
if (mNextFreeEntry != INVALID_ENTRY) {
while (mNextFreeEntry < ENTRY_COUNT) {
uint32_t entryAddress = getEntryAddress(mNextFreeEntry);
uint32_t header;
auto rc = spi_flash_read(entryAddress, &header, sizeof(header));
@ -441,12 +441,20 @@ esp_err_t Page::mLoadEntryTable()
return rc;
}
if (header != 0xffffffff) {
auto oldState = mEntryTable.get(mNextFreeEntry);
auto err = alterEntryState(mNextFreeEntry, EntryState::ERASED);
if (err != ESP_OK) {
mState = PageState::INVALID;
return err;
}
++mNextFreeEntry;
if (oldState == EntryState::WRITTEN) {
--mUsedEntryCount;
}
++mErasedEntryCount;
}
else {
break;
}
}
@ -462,7 +470,7 @@ esp_err_t Page::mLoadEntryTable()
lastItemIndex = INVALID_ENTRY;
continue;
}
lastItemIndex = i;
auto err = readEntry(i, item);
@ -480,6 +488,8 @@ esp_err_t Page::mLoadEntryTable()
continue;
}
mHashList.insert(item, i);
if (item.datatype != ItemType::BLOB && item.datatype != ItemType::SZ) {
continue;
}
@ -498,7 +508,7 @@ esp_err_t Page::mLoadEntryTable()
}
i += span - 1;
}
// check that last item is not duplicate
if (lastItemIndex != INVALID_ENTRY) {
size_t findItemIndex = 0;
@ -513,6 +523,27 @@ esp_err_t Page::mLoadEntryTable()
}
}
}
} else if (mState == PageState::FULL || mState == PageState::FREEING) {
// We have already filled mHashList for page in active state.
// Do the same for the case when page is in full or freeing state.
Item item;
for (size_t i = mFirstUsedEntry; i < ENTRY_COUNT; ++i) {
if (mEntryTable.get(i) != EntryState::WRITTEN) {
continue;
}
auto err = readEntry(i, item);
if (err != ESP_OK) {
mState = PageState::INVALID;
return err;
}
mHashList.insert(item, i);
size_t span = item.span;
i += span - 1;
}
}
return ESP_OK;
@ -527,7 +558,7 @@ esp_err_t Page::initialize()
header.mState = mState;
header.mSeqNumber = mSeqNumber;
header.mCrc32 = header.calculateCrc32();
auto rc = spi_flash_write(mBaseAddress, reinterpret_cast<uint32_t*>(&header), sizeof(header));
if (rc != ESP_OK) {
mState = PageState::INVALID;
@ -554,6 +585,31 @@ esp_err_t Page::alterEntryState(size_t index, EntryState state)
return ESP_OK;
}
esp_err_t Page::alterEntryRangeState(size_t begin, size_t end, EntryState state)
{
assert(end <= ENTRY_COUNT);
assert(end > begin);
size_t wordIndex = mEntryTable.getWordIndex(end - 1);
for (ptrdiff_t i = end - 1; i >= static_cast<ptrdiff_t>(begin); --i) {
mEntryTable.set(i, state);
size_t nextWordIndex;
if (i == static_cast<ptrdiff_t>(begin)) {
nextWordIndex = (size_t) -1;
} else {
nextWordIndex = mEntryTable.getWordIndex(i - 1);
}
if (nextWordIndex != wordIndex) {
uint32_t word = mEntryTable.data()[wordIndex];
auto rc = spi_flash_write(mBaseAddress + ENTRY_TABLE_OFFSET + static_cast<uint32_t>(wordIndex) * 4, &word, 4);
if (rc != ESP_OK) {
return rc;
}
}
wordIndex = nextWordIndex;
}
return ESP_OK;
}
esp_err_t Page::alterPageState(PageState state)
{
auto rc = spi_flash_write(mBaseAddress, reinterpret_cast<uint32_t*>(&state), sizeof(state));
@ -579,7 +635,7 @@ esp_err_t Page::findItem(uint8_t nsIndex, ItemType datatype, const char* key, si
if (mState == PageState::CORRUPT || mState == PageState::INVALID || mState == PageState::UNINITIALIZED) {
return ESP_ERR_NVS_NOT_FOUND;
}
if (itemIndex >= ENTRY_COUNT) {
return ESP_ERR_NVS_NOT_FOUND;
}
@ -593,12 +649,21 @@ esp_err_t Page::findItem(uint8_t nsIndex, ItemType datatype, const char* key, si
if (itemIndex > mFirstUsedEntry && itemIndex < ENTRY_COUNT) {
start = itemIndex;
}
size_t end = mNextFreeEntry;
if (end > ENTRY_COUNT) {
end = ENTRY_COUNT;
}
if (nsIndex != NS_ANY && datatype != ItemType::ANY && key != NULL) {
size_t cachedIndex = mHashList.find(start, Item(nsIndex, datatype, 0, key));
if (cachedIndex < ENTRY_COUNT) {
start = cachedIndex;
} else {
return ESP_ERR_NVS_NOT_FOUND;
}
}
size_t next;
for (size_t i = start; i < end; i = next) {
next = i + 1;
@ -671,7 +736,13 @@ esp_err_t Page::erase()
mState = PageState::INVALID;
return rc;
}
return load(sector);
mUsedEntryCount = 0;
mErasedEntryCount = 0;
mFirstUsedEntry = INVALID_ENTRY;
mNextFreeEntry = INVALID_ENTRY;
mState = PageState::UNINITIALIZED;
mHashList = HashList();
return ESP_OK;
}
esp_err_t Page::markFreeing()
@ -695,7 +766,7 @@ void Page::invalidateCache()
{
mFindInfo = CachedFindInfo();
}
void Page::debugDump() const
{
printf("state=%x addr=%x seq=%d\nfirstUsed=%d nextFree=%d used=%d erased=%d\n", mState, mBaseAddress, mSeqNumber, static_cast<int>(mFirstUsedEntry), static_cast<int>(mNextFreeEntry), mUsedEntryCount, mErasedEntryCount);
@ -705,18 +776,15 @@ void Page::debugDump() const
EntryState state = mEntryTable.get(i);
if (state == EntryState::EMPTY) {
printf("E\n");
}
else if (state == EntryState::ERASED) {
} else if (state == EntryState::ERASED) {
printf("X\n");
}
else if (state == EntryState::WRITTEN) {
} else if (state == EntryState::WRITTEN) {
Item item;
readEntry(i, item);
if (skip == 0) {
printf("W ns=%2u type=%2u span=%3u key=\"%s\"\n", item.nsIndex, static_cast<unsigned>(item.datatype), item.span, item.key);
skip = item.span - 1;
}
else {
} else {
printf("D\n");
skip--;
}

View file

@ -23,6 +23,7 @@
#include "esp_spi_flash.h"
#include "compressed_enum_table.hpp"
#include "intrusive_list.h"
#include "nvs_item_hash_list.hpp"
namespace nvs
{
@ -161,25 +162,27 @@ public:
esp_err_t erase();
void invalidateCache();
void debugDump() const;
protected:
class Header {
class Header
{
public:
Header() {
Header()
{
std::fill_n(mReserved, sizeof(mReserved)/sizeof(mReserved[0]), UINT32_MAX);
}
PageState mState; // page state
uint32_t mSeqNumber; // sequence number of this page
uint32_t mReserved[5]; // unused, must be 0xffffffff
uint32_t mCrc32; // crc of everything except mState
uint32_t calculateCrc32();
};
enum class EntryState {
EMPTY = 0x3, // 0b11, default state after flash erase
WRITTEN = EMPTY & ~ESB_WRITTEN, // entry was written
@ -193,16 +196,18 @@ protected:
esp_err_t alterEntryState(size_t index, EntryState state);
esp_err_t alterEntryRangeState(size_t begin, size_t end, EntryState state);
esp_err_t alterPageState(PageState state);
esp_err_t readEntry(size_t index, Item& dst) const;
esp_err_t writeEntry(const Item& item);
esp_err_t writeEntryData(const uint8_t* data, size_t size);
esp_err_t eraseEntry(size_t index);
esp_err_t eraseEntryAndSpan(size_t index);
void updateFirstUsedEntry(size_t index, size_t span);
static constexpr size_t getAlignmentForType(ItemType type)
@ -229,6 +234,7 @@ protected:
uint16_t mErasedEntryCount = 0;
CachedFindInfo mFindInfo;
HashList mHashList;
static const uint32_t HEADER_OFFSET = 0;
static const uint32_t ENTRY_TABLE_OFFSET = HEADER_OFFSET + 32;

View file

@ -47,13 +47,12 @@ esp_err_t PageManager::load(uint32_t baseSector, uint32_t sectorCount)
if (mPageList.empty()) {
mSeqNumber = 0;
return activatePage();
}
else {
} else {
uint32_t lastSeqNo;
assert(mPageList.back().getSeqNumber(lastSeqNo) == ESP_OK);
mSeqNumber = lastSeqNo + 1;
}
// if power went out after a new item for the given key was written,
// but before the old one was erased, we end up with a duplicate item
Page& lastPage = back();
@ -64,7 +63,7 @@ esp_err_t PageManager::load(uint32_t baseSector, uint32_t sectorCount)
itemIndex += item.span;
lastItemIndex = itemIndex;
}
if (lastItemIndex != SIZE_MAX) {
auto last = PageManager::TPageListIterator(&lastPage);
for (auto it = begin(); it != last; ++it) {
@ -78,7 +77,7 @@ esp_err_t PageManager::load(uint32_t baseSector, uint32_t sectorCount)
for (auto it = begin(); it!= end(); ++it) {
if (it->state() == Page::PageState::FREEING) {
Page* newPage = &mPageList.back();
if(newPage->state() != Page::PageState::ACTIVE) {
if (newPage->state() != Page::PageState::ACTIVE) {
auto err = activatePage();
if (err != ESP_OK) {
return err;
@ -93,12 +92,12 @@ esp_err_t PageManager::load(uint32_t baseSector, uint32_t sectorCount)
return err;
}
}
auto err = it->erase();
if (err != ESP_OK) {
return err;
}
Page* p = static_cast<Page*>(it);
mPageList.erase(it);
mFreePageList.push_back(p);
@ -139,7 +138,7 @@ esp_err_t PageManager::requestNewPage()
if (err != ESP_OK) {
return err;
}
Page* newPage = &mPageList.back();
Page* erasedPage = maxErasedItemsPageIt;
@ -161,7 +160,7 @@ esp_err_t PageManager::requestNewPage()
if (err != ESP_OK) {
return err;
}
assert(usedEntries == newPage->getUsedEntryCount());
mPageList.erase(maxErasedItemsPageIt);
@ -188,5 +187,5 @@ esp_err_t PageManager::activatePage()
++mSeqNumber;
return ESP_OK;
}
} // namespace nvs

View file

@ -52,7 +52,7 @@ public:
protected:
friend class Iterator;
esp_err_t activatePage();
TPageList mPageList;

View file

@ -61,7 +61,6 @@ public:
} // namespace nvs
#else // ESP_PLATFORM
#define NVS_DEBUGV(...) printf(__VA_ARGS__)
namespace nvs
{
class Lock
@ -75,10 +74,5 @@ public:
} // namespace nvs
#endif // ESP_PLATFORM
#ifndef CONFIG_NVS_DEBUG
#undef NVS_DEBUGV
#define NVS_DEBUGV(...)
#endif
#endif /* nvs_platform_h */

View file

@ -51,7 +51,7 @@ esp_err_t Storage::init(uint32_t baseSector, uint32_t sectorCount)
Page& p = *it;
size_t itemIndex = 0;
Item item;
while(p.findItem(Page::NS_INDEX, ItemType::U8, nullptr, itemIndex, item) == ESP_OK) {
while (p.findItem(Page::NS_INDEX, ItemType::U8, nullptr, itemIndex, item) == ESP_OK) {
NamespaceEntry* entry = new NamespaceEntry;
item.getKey(entry->mName, sizeof(entry->mName) - 1);
item.getValue(entry->mIndex);
@ -69,6 +69,19 @@ esp_err_t Storage::init(uint32_t baseSector, uint32_t sectorCount)
return ESP_OK;
}
esp_err_t Storage::findItem(uint8_t nsIndex, ItemType datatype, const char* key, Page* &page, Item& item)
{
size_t itemIndex = 0;
for (auto it = std::begin(mPageManager); it != std::end(mPageManager); ++it) {
auto err = it->findItem(nsIndex, datatype, key, itemIndex, item);
if (err == ESP_OK) {
page = it;
return ESP_OK;
}
}
return ESP_ERR_NVS_NOT_FOUND;
}
esp_err_t Storage::writeItem(uint8_t nsIndex, ItemType datatype, const char* key, const void* data, size_t dataSize)
{
if (mState != StorageState::ACTIVE) {
@ -103,14 +116,13 @@ esp_err_t Storage::writeItem(uint8_t nsIndex, ItemType datatype, const char* key
if (err != ESP_OK) {
return err;
}
}
else if (err != ESP_OK) {
} else if (err != ESP_OK) {
return err;
}
if (findPage) {
if (findPage->state() == Page::PageState::UNINITIALIZED ||
findPage->state() == Page::PageState::INVALID) {
findPage->state() == Page::PageState::INVALID) {
auto err = findItem(nsIndex, datatype, key, findPage, item);
assert(err == ESP_OK);
}
@ -158,7 +170,7 @@ esp_err_t Storage::createOrOpenNamespace(const char* nsName, bool canCreate, uin
}
mNamespaceUsage.set(ns, true);
nsIndex = ns;
NamespaceEntry* entry = new NamespaceEntry;
entry->mIndex = ns;
strncpy(entry->mName, nsName, sizeof(entry->mName) - 1);
@ -203,6 +215,27 @@ esp_err_t Storage::eraseItem(uint8_t nsIndex, ItemType datatype, const char* key
return findPage->eraseItem(nsIndex, datatype, key);
}
esp_err_t Storage::eraseNamespace(uint8_t nsIndex)
{
if (mState != StorageState::ACTIVE) {
return ESP_ERR_NVS_NOT_INITIALIZED;
}
for (auto it = std::begin(mPageManager); it != std::end(mPageManager); ++it) {
while (true) {
auto err = it->eraseItem(nsIndex, ItemType::ANY, nullptr);
if (err == ESP_ERR_NVS_NOT_FOUND) {
break;
}
else if (err != ESP_OK) {
return err;
}
}
}
return ESP_OK;
}
esp_err_t Storage::getItemDataSize(uint8_t nsIndex, ItemType datatype, const char* key, size_t& dataSize)
{
if (mState != StorageState::ACTIVE) {
@ -234,6 +267,7 @@ void Storage::debugCheck()
for (auto p = mPageManager.begin(); p != mPageManager.end(); ++p) {
size_t itemIndex = 0;
size_t usedCount = 0;
Item item;
while (p->findItem(Page::NS_ANY, ItemType::ANY, nullptr, itemIndex, item) == ESP_OK) {
std::stringstream keyrepr;
@ -246,7 +280,9 @@ void Storage::debugCheck()
}
keys.insert(std::make_pair(keystr, static_cast<Page*>(p)));
itemIndex += item.span;
usedCount += item.span;
}
assert(usedCount == p->getUsedEntryCount());
}
}
#endif //ESP_PLATFORM

View file

@ -69,13 +69,15 @@ public:
return readItem(nsIndex, itemTypeOf(value), key, &value, sizeof(value));
}
template<typename T>
esp_err_t eraseItem(uint8_t nsIndex, const char* key)
{
return eraseItem(nsIndex, itemTypeOf<T>(), key);
return eraseItem(nsIndex, ItemType::ANY, key);
}
esp_err_t eraseNamespace(uint8_t nsIndex);
void debugDump();
void debugCheck();
@ -88,19 +90,7 @@ protected:
void clearNamespaces();
esp_err_t findItem(uint8_t nsIndex, ItemType datatype, const char* key, Page* &page, Item& item)
{
size_t itemIndex = 0;
for (auto it = std::begin(mPageManager); it != std::end(mPageManager); ++it) {
auto err = it->findItem(nsIndex, datatype, key, itemIndex, item);
if (err == ESP_OK) {
page = it;
return ESP_OK;
}
}
return ESP_ERR_NVS_NOT_FOUND;
}
esp_err_t findItem(uint8_t nsIndex, ItemType datatype, const char* key, Page* &page, Item& item);
protected:
size_t mPageCount;

View file

@ -21,7 +21,7 @@
namespace nvs
{
uint32_t Item::calculateCrc32()
uint32_t Item::calculateCrc32() const
{
uint32_t result = 0xffffffff;
const uint8_t* p = reinterpret_cast<const uint8_t*>(this);
@ -32,6 +32,16 @@ uint32_t Item::calculateCrc32()
return result;
}
uint32_t Item::calculateCrc32WithoutValue() const
{
uint32_t result = 0xffffffff;
const uint8_t* p = reinterpret_cast<const uint8_t*>(this);
result = crc32_le(result, p + offsetof(Item, nsIndex),
offsetof(Item, datatype) - offsetof(Item, nsIndex));
result = crc32_le(result, p + offsetof(Item, key), sizeof(key));
return result;
}
uint32_t Item::calculateCrc32(const uint8_t* data, size_t size)
{
uint32_t result = 0xffffffff;

View file

@ -77,7 +77,25 @@ public:
static const size_t MAX_KEY_LENGTH = sizeof(key) - 1;
uint32_t calculateCrc32();
Item(uint8_t nsIndex, ItemType datatype, uint8_t span, const char* key_)
: nsIndex(nsIndex), datatype(datatype), span(span), reserved(0xff)
{
std::fill_n(reinterpret_cast<uint32_t*>(key), sizeof(key) / 4, 0xffffffff);
std::fill_n(reinterpret_cast<uint32_t*>(data), sizeof(data) / 4, 0xffffffff);
if (key_) {
strncpy(key, key_, sizeof(key) - 1);
key[sizeof(key) - 1] = 0;
} else {
key[0] = 0;
}
}
Item()
{
}
uint32_t calculateCrc32() const;
uint32_t calculateCrc32WithoutValue() const;
static uint32_t calculateCrc32(const uint8_t* data, size_t size);
void getKey(char* dst, size_t dstSize)

View file

@ -8,6 +8,7 @@ SOURCE_FILES = \
nvs_page.cpp \
nvs_pagemanager.cpp \
nvs_storage.cpp \
nvs_item_hash_list.cpp \
) \
spi_flash_emulation.cpp \
test_compressed_enum_table.cpp \

View file

View file

@ -386,6 +386,27 @@ TEST_CASE("can modify an item on a page which will be erased", "[nvs]")
}
TEST_CASE("can erase items", "[nvs]")
{
SpiFlashEmulator emu(3);
Storage storage;
CHECK(storage.init(0, 3) == ESP_OK);
for (size_t i = 0; i < Page::ENTRY_COUNT * 2 - 3; ++i) {
char name[Item::MAX_KEY_LENGTH + 1];
snprintf(name, sizeof(name), "key%05d", static_cast<int>(i));
REQUIRE(storage.writeItem(3, name, static_cast<int>(i)) == ESP_OK);
}
CHECK(storage.writeItem(1, "foo", 32) == ESP_OK);
CHECK(storage.writeItem(2, "foo", 64) == ESP_OK);
CHECK(storage.eraseItem(2, "foo") == ESP_OK);
int val;
CHECK(storage.readItem(1, "foo", val) == ESP_OK);
CHECK(val == 32);
CHECK(storage.eraseNamespace(3) == ESP_OK);
CHECK(storage.readItem(2, "foo", val) == ESP_ERR_NVS_NOT_FOUND);
CHECK(storage.readItem(3, "key00222", val) == ESP_ERR_NVS_NOT_FOUND);
}
#define TEST_ESP_ERR(rc, res) CHECK((rc) == (res))
#define TEST_ESP_OK(rc) CHECK((rc) == ESP_OK)
@ -443,18 +464,140 @@ TEST_CASE("wifi test", "[nvs]")
SpiFlashEmulator emu(10);
emu.randomize(10);
nvs_handle handle;
const uint32_t NVS_FLASH_SECTOR = 5;
const uint32_t NVS_FLASH_SECTOR_COUNT_MIN = 3;
emu.setBounds(NVS_FLASH_SECTOR, NVS_FLASH_SECTOR + NVS_FLASH_SECTOR_COUNT_MIN);
TEST_ESP_OK(nvs_flash_init(NVS_FLASH_SECTOR, NVS_FLASH_SECTOR_COUNT_MIN));
TEST_ESP_OK(nvs_open("nvs.net80211", NVS_READWRITE, &handle));
nvs_handle misc_handle;
TEST_ESP_OK(nvs_open("nvs.net80211", NVS_READWRITE, &misc_handle));
char log[33];
size_t log_size = sizeof(log);
TEST_ESP_ERR(nvs_get_str(misc_handle, "log", log, &log_size), ESP_ERR_NVS_NOT_FOUND);
strcpy(log, "foobarbazfizzz");
TEST_ESP_OK(nvs_set_str(misc_handle, "log", log));
nvs_handle net80211_handle;
TEST_ESP_OK(nvs_open("nvs.net80211", NVS_READWRITE, &net80211_handle));
uint8_t opmode = 2;
if (nvs_get_u8(handle, "wifi.opmode", &opmode) != ESP_OK) {
TEST_ESP_OK(nvs_set_u8(handle, "wifi.opmode", opmode));
}
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "wifi.opmode", &opmode), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "wifi.opmode", opmode));
uint8_t country = 0;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "wifi.country", &opmode), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "wifi.country", opmode));
char ssid[36];
size_t size = sizeof(ssid);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "sta.ssid", ssid, &size), ESP_ERR_NVS_NOT_FOUND);
strcpy(ssid, "my android AP");
TEST_ESP_OK(nvs_set_blob(net80211_handle, "sta.ssid", ssid, size));
char mac[6];
size = sizeof(mac);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "sta.mac", mac, &size), ESP_ERR_NVS_NOT_FOUND);
memset(mac, 0xab, 6);
TEST_ESP_OK(nvs_set_blob(net80211_handle, "sta.mac", mac, size));
uint8_t authmode = 1;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "sta.authmode", &authmode), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "sta.authmode", authmode));
char pswd[65];
size = sizeof(pswd);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "sta.pswd", pswd, &size), ESP_ERR_NVS_NOT_FOUND);
strcpy(pswd, "`123456788990-=");
TEST_ESP_OK(nvs_set_blob(net80211_handle, "sta.pswd", pswd, size));
char pmk[32];
size = sizeof(pmk);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "sta.pmk", pmk, &size), ESP_ERR_NVS_NOT_FOUND);
memset(pmk, 1, size);
TEST_ESP_OK(nvs_set_blob(net80211_handle, "sta.pmk", pmk, size));
uint8_t chan = 1;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "sta.chan", &chan), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "sta.chan", chan));
uint8_t autoconn = 1;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "auto.conn", &autoconn), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "auto.conn", autoconn));
uint8_t bssid_set = 1;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "bssid.set", &bssid_set), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "bssid.set", bssid_set));
char bssid[6];
size = sizeof(bssid);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "sta.bssid", bssid, &size), ESP_ERR_NVS_NOT_FOUND);
memset(mac, 0xcd, 6);
TEST_ESP_OK(nvs_set_blob(net80211_handle, "sta.bssid", bssid, size));
uint8_t phym = 3;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "sta.phym", &phym), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "sta.phym", phym));
uint8_t phybw = 2;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "sta.phybw", &phybw), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "sta.phybw", phybw));
char apsw[2];
size = sizeof(apsw);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "sta.apsw", apsw, &size), ESP_ERR_NVS_NOT_FOUND);
memset(apsw, 0x2, size);
TEST_ESP_OK(nvs_set_blob(net80211_handle, "sta.apsw", apsw, size));
char apinfo[700];
size = sizeof(apinfo);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "sta.apinfo", apinfo, &size), ESP_ERR_NVS_NOT_FOUND);
memset(apinfo, 0, size);
TEST_ESP_OK(nvs_set_blob(net80211_handle, "sta.apinfo", apinfo, size));
size = sizeof(ssid);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "ap.ssid", ssid, &size), ESP_ERR_NVS_NOT_FOUND);
strcpy(ssid, "ESP_A2F340");
TEST_ESP_OK(nvs_set_blob(net80211_handle, "ap.ssid", ssid, size));
size = sizeof(mac);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "ap.mac", mac, &size), ESP_ERR_NVS_NOT_FOUND);
memset(mac, 0xac, 6);
TEST_ESP_OK(nvs_set_blob(net80211_handle, "ap.mac", mac, size));
size = sizeof(pswd);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "ap.passwd", pswd, &size), ESP_ERR_NVS_NOT_FOUND);
strcpy(pswd, "");
TEST_ESP_OK(nvs_set_blob(net80211_handle, "ap.passwd", pswd, size));
size = sizeof(pmk);
TEST_ESP_ERR(nvs_get_blob(net80211_handle, "ap.pmk", pmk, &size), ESP_ERR_NVS_NOT_FOUND);
memset(pmk, 1, size);
TEST_ESP_OK(nvs_set_blob(net80211_handle, "ap.pmk", pmk, size));
chan = 6;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "ap.chan", &chan), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "ap.chan", chan));
authmode = 0;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "ap.authmode", &authmode), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "ap.authmode", authmode));
uint8_t hidden = 0;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "ap.hidden", &hidden), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "ap.hidden", hidden));
uint8_t max_conn = 4;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "ap.max.conn", &max_conn), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "ap.max.conn", max_conn));
uint8_t bcn_interval = 2;
TEST_ESP_ERR(nvs_get_u8(net80211_handle, "bcn_interval", &bcn_interval), ESP_ERR_NVS_NOT_FOUND);
TEST_ESP_OK(nvs_set_u8(net80211_handle, "bcn_interval", bcn_interval));
s_perf << "Time to simulate nvs init with wifi libs: " << emu.getTotalTime() << " us (" << emu.getEraseOps() << "E " << emu.getWriteOps() << "W " << emu.getReadOps() << "R " << emu.getWriteBytes() << "Wb " << emu.getReadBytes() << "Rb)" << std::endl;
}

View file

@ -0,0 +1,16 @@
menu "SPI Flash driver"
config SPI_FLASH_ENABLE_COUNTERS
bool "Enable operation counters"
default 0
help
This option enables the following APIs:
spi_flash_reset_counters
spi_flash_dump_counters
spi_flash_get_counters
These APIs may be used to collect performance data for spi_flash APIs
and to help understand behaviour of libraries which use SPI flash.
endmenu

View file

@ -14,6 +14,9 @@
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include <freertos/FreeRTOS.h>
#include <freertos/task.h>
#include <freertos/semphr.h>
@ -25,7 +28,7 @@
#include "esp_ipc.h"
#include "esp_attr.h"
#include "esp_spi_flash.h"
#include "esp_log.h"
/*
Driver for SPI flash read/write/erase operations
@ -71,6 +74,19 @@ static bool s_flash_op_can_start = false;
static bool s_flash_op_complete = false;
#endif //CONFIG_FREERTOS_UNICORE
#if CONFIG_SPI_FLASH_ENABLE_COUNTERS
static const char* TAG = "spi_flash";
static spi_flash_counters_t s_flash_stats;
#define COUNTER_START() uint32_t ts_begin = xthal_get_ccount()
#define COUNTER_STOP(counter) do{ s_flash_stats.counter.count++; s_flash_stats.counter.time += (xthal_get_ccount() - ts_begin) / (XT_CLOCK_FREQ / 1000000); } while(0)
#define COUNTER_ADD_BYTES(counter, size) do { s_flash_stats.counter.bytes += size; } while (0)
#else
#define COUNTER_START()
#define COUNTER_STOP(counter)
#define COUNTER_ADD_BYTES(counter, size)
#endif //CONFIG_SPI_FLASH_ENABLE_COUNTERS
#ifndef CONFIG_FREERTOS_UNICORE
@ -95,6 +111,10 @@ static void IRAM_ATTR spi_flash_op_block_func(void* arg)
void spi_flash_init()
{
s_flash_op_mutex = xSemaphoreCreateMutex();
#if CONFIG_SPI_FLASH_ENABLE_COUNTERS
spi_flash_reset_counters();
#endif
}
static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu()
@ -160,7 +180,9 @@ static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu()
void spi_flash_init()
{
// No-op in single core mode
#if CONFIG_SPI_FLASH_ENABLE_COUNTERS
spi_flash_reset_counters();
#endif
}
static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu()
@ -193,6 +215,7 @@ SpiFlashOpResult IRAM_ATTR spi_flash_unlock()
esp_err_t IRAM_ATTR spi_flash_erase_sector(uint16_t sec)
{
COUNTER_START();
spi_flash_disable_interrupts_caches_and_other_cpu();
SpiFlashOpResult rc;
rc = spi_flash_unlock();
@ -200,27 +223,33 @@ esp_err_t IRAM_ATTR spi_flash_erase_sector(uint16_t sec)
rc = SPIEraseSector(sec);
}
spi_flash_enable_interrupts_caches_and_other_cpu();
COUNTER_STOP(erase);
return spi_flash_translate_rc(rc);
}
esp_err_t IRAM_ATTR spi_flash_write(uint32_t dest_addr, const uint32_t *src, uint32_t size)
{
COUNTER_START();
spi_flash_disable_interrupts_caches_and_other_cpu();
SpiFlashOpResult rc;
rc = spi_flash_unlock();
if (rc == SPI_FLASH_RESULT_OK) {
rc = SPIWrite(dest_addr, src, (int32_t) size);
COUNTER_ADD_BYTES(write, size);
}
spi_flash_enable_interrupts_caches_and_other_cpu();
COUNTER_STOP(write);
return spi_flash_translate_rc(rc);
}
esp_err_t IRAM_ATTR spi_flash_read(uint32_t src_addr, uint32_t *dest, uint32_t size)
{
COUNTER_START();
spi_flash_disable_interrupts_caches_and_other_cpu();
SpiFlashOpResult rc;
rc = SPIRead(src_addr, dest, (int32_t) size);
SpiFlashOpResult rc = SPIRead(src_addr, dest, (int32_t) size);
COUNTER_ADD_BYTES(read, size);
spi_flash_enable_interrupts_caches_and_other_cpu();
COUNTER_STOP(read);
return spi_flash_translate_rc(rc);
}
@ -270,3 +299,30 @@ static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_sta
SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL1_REG, cache_mask, saved_state, 0);
}
}
#if CONFIG_SPI_FLASH_ENABLE_COUNTERS
static inline void dump_counter(spi_flash_counter_t* counter, const char* name)
{
ESP_LOGI(TAG, "%s count=%8d time=%8dms bytes=%8d\n", name,
counter->count, counter->time, counter->bytes);
}
const spi_flash_counters_t* spi_flash_get_counters()
{
return &s_flash_stats;
}
void spi_flash_reset_counters()
{
memset(&s_flash_stats, 0, sizeof(s_flash_stats));
}
void spi_flash_dump_counters()
{
dump_counter(&s_flash_stats.read, "read ");
dump_counter(&s_flash_stats.write, "write");
dump_counter(&s_flash_stats.erase, "erase");
}
#endif //CONFIG_SPI_FLASH_ENABLE_COUNTERS

View file

@ -17,6 +17,7 @@
#include <stdint.h>
#include "esp_err.h"
#include "sdkconfig.h"
#ifdef __cplusplus
extern "C" {
@ -69,6 +70,43 @@ esp_err_t spi_flash_write(uint32_t des_addr, const uint32_t *src_addr, uint32_t
esp_err_t spi_flash_read(uint32_t src_addr, uint32_t *des_addr, uint32_t size);
#if CONFIG_SPI_FLASH_ENABLE_COUNTERS
/**
* Structure holding statistics for one type of operation
*/
typedef struct {
uint32_t count; // number of times operation was executed
uint32_t time; // total time taken, in microseconds
uint32_t bytes; // total number of bytes, for read and write operations
} spi_flash_counter_t;
typedef struct {
spi_flash_counter_t read;
spi_flash_counter_t write;
spi_flash_counter_t erase;
} spi_flash_counters_t;
/**
* @brief Reset SPI flash operation counters
*/
void spi_flash_reset_counters();
/**
* @brief Print SPI flash operation counters
*/
void spi_flash_dump_counters();
/**
* @brief Return current SPI flash operation counters
*
* @return pointer to the spi_flash_counters_t structure holding values
* of the operation counters
*/
const spi_flash_counters_t* spi_flash_get_counters();
#endif //CONFIG_SPI_FLASH_ENABLE_COUNTERS
#ifdef __cplusplus
}
#endif