app_update: fix intermittent failure with firmware updates

Routine `spi_flash_cache2phys` sometimes return incorrect value,
resulting in failure in getting currently running ota partition.
This in turn aborts firmware update process.This issue was more
prominent with SPIRAM enabled cases.

Fix ensures proper cache guards during `spi_flash_cache2phys`, and
also for few other similar APIs.

In addition, `esp_ota_get_running_partition` has also been optimized
to save currently running partition for subsequent invocations.

Fixes https://github.com/espressif/esp-idf/issues/2451
This commit is contained in:
Mahavir Jain 2018-09-28 15:59:52 +05:30 committed by bot
parent 18684f53ee
commit b3aff63db4
4 changed files with 48 additions and 17 deletions

View file

@ -494,9 +494,18 @@ const esp_partition_t *esp_ota_get_boot_partition(void)
const esp_partition_t* esp_ota_get_running_partition(void)
{
static const esp_partition_t *curr_partition = NULL;
/*
* Currently running partition is unlikely to change across reset cycle,
* so it can be cached here, and avoid lookup on every flash write operation.
*/
if (curr_partition != NULL) {
return curr_partition;
}
/* Find the flash address of this exact function. By definition that is part
of the currently running firmware. Then find the enclosing partition. */
size_t phys_offs = spi_flash_cache2phys(esp_ota_get_running_partition);
assert (phys_offs != SPI_FLASH_CACHE2PHYS_FAIL); /* indicates cache2phys lookup is buggy */
@ -510,6 +519,7 @@ const esp_partition_t* esp_ota_get_running_partition(void)
const esp_partition_t *p = esp_partition_get(it);
if (p->address <= phys_offs && p->address + p->size > phys_offs) {
esp_partition_iterator_release(it);
curr_partition = p;
return p;
}
it = esp_partition_next(it);

View file

@ -68,4 +68,7 @@
// after restart or during a deep sleep / wake cycle.
#define RTC_NOINIT_ATTR __attribute__((section(".rtc_noinit")))
// Forces to not inline function
#define NOINLINE_ATTR __attribute__((noinline))
#endif /* __ESP_ATTR_H__ */

View file

@ -286,23 +286,41 @@ void IRAM_ATTR spi_flash_munmap(spi_flash_mmap_handle_t handle)
free(it);
}
static void IRAM_ATTR NOINLINE_ATTR spi_flash_protected_mmap_init()
{
spi_flash_disable_interrupts_caches_and_other_cpu();
spi_flash_mmap_init();
spi_flash_enable_interrupts_caches_and_other_cpu();
}
static uint32_t IRAM_ATTR NOINLINE_ATTR spi_flash_protected_read_mmu_entry(int index)
{
uint32_t value;
spi_flash_disable_interrupts_caches_and_other_cpu();
value = DPORT_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[index]);
spi_flash_enable_interrupts_caches_and_other_cpu();
return value;
}
void spi_flash_mmap_dump()
{
spi_flash_mmap_init();
spi_flash_protected_mmap_init();
mmap_entry_t* it;
for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
printf("handle=%d page=%d count=%d\n", it->handle, it->page, it->count);
}
for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
if (s_mmap_page_refcnt[i] != 0) {
printf("page %d: refcnt=%d paddr=%d\n",
i, (int) s_mmap_page_refcnt[i], DPORT_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]));
uint32_t paddr = spi_flash_protected_read_mmu_entry(i);
printf("page %d: refcnt=%d paddr=%d\n", i, (int) s_mmap_page_refcnt[i], paddr);
}
}
}
uint32_t spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
uint32_t IRAM_ATTR spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
{
spi_flash_disable_interrupts_caches_and_other_cpu();
spi_flash_mmap_init();
int count = 0;
int region_begin; // first page to check
@ -315,7 +333,8 @@ uint32_t spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
count++;
}
}
DPORT_INTERRUPT_RESTORE();
DPORT_INTERRUPT_RESTORE();
spi_flash_enable_interrupts_caches_and_other_cpu();
return count;
}
@ -384,7 +403,6 @@ static inline IRAM_ATTR bool update_written_pages(size_t start_addr, size_t leng
return false;
}
uint32_t spi_flash_cache2phys(const void *cached)
{
intptr_t c = (intptr_t)cached;
@ -405,7 +423,7 @@ uint32_t spi_flash_cache2phys(const void *cached)
/* cached address was not in IROM or DROM */
return SPI_FLASH_CACHE2PHYS_FAIL;
}
uint32_t phys_page = DPORT_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[cache_page]);
uint32_t phys_page = spi_flash_protected_read_mmu_entry(cache_page);
if (phys_page == INVALID_ENTRY_VAL) {
/* page is not mapped */
return SPI_FLASH_CACHE2PHYS_FAIL;
@ -414,8 +432,7 @@ uint32_t spi_flash_cache2phys(const void *cached)
return phys_offs | (c & (SPI_FLASH_MMU_PAGE_SIZE-1));
}
const void *spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t memory)
const void *IRAM_ATTR spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t memory)
{
uint32_t phys_page = phys_offs / SPI_FLASH_MMU_PAGE_SIZE;
int start, end, page_delta;
@ -432,15 +449,18 @@ const void *spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t mem
base = VADDR1_START_ADDR;
page_delta = 64;
}
spi_flash_disable_interrupts_caches_and_other_cpu();
DPORT_INTERRUPT_DISABLE();
for (int i = start; i < end; i++) {
if (DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]) == phys_page) {
i -= page_delta;
intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
DPORT_INTERRUPT_RESTORE();
spi_flash_enable_interrupts_caches_and_other_cpu();
return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
}
}
DPORT_INTERRUPT_RESTORE();
spi_flash_enable_interrupts_caches_and_other_cpu();
return NULL;
}

View file

@ -244,15 +244,13 @@ void spi_flash_mmap_dump();
/**
* @brief get free pages number which can be mmap
*
* This function will return free page number of the mmu table which can mmap,
* when you want to call spi_flash_mmap to mmap an ranger of flash data to Dcache or Icache
* memmory region, maybe the size of MMU table will exceed,so if you are not sure the
* size need mmap is ok, can call the interface and watch how many MMU table page can be
* mmaped.
* This function will return number of free pages available in mmu table. This could be useful
* before calling actual spi_flash_mmap (maps flash range to DCache or ICache memory) to check
* if there is sufficient space available for mapping.
*
* @param memory memmory type of MMU table free page
* @param memory memory type of MMU table free page
*
* @return number of free pages which can be mmaped
* @return number of free pages which can be mmaped
*/
uint32_t spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory);