Merge branch 'bugfix/fix_random_failure_with_ota_updates_v3.1' into 'release/v3.1'
app_update: fix intermittent failure with firmware updates (backport v3.1) See merge request idf/esp-idf!3419
This commit is contained in:
commit
93f04b87b2
|
@ -292,7 +292,7 @@ static esp_err_t esp_rewrite_ota_data(esp_partition_subtype_t subtype)
|
||||||
uint16_t ota_app_count = 0;
|
uint16_t ota_app_count = 0;
|
||||||
uint32_t i = 0;
|
uint32_t i = 0;
|
||||||
uint32_t seq;
|
uint32_t seq;
|
||||||
static spi_flash_mmap_memory_t ota_data_map;
|
spi_flash_mmap_handle_t ota_data_map;
|
||||||
const void *result = NULL;
|
const void *result = NULL;
|
||||||
|
|
||||||
find_partition = esp_partition_find_first(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_DATA_OTA, NULL);
|
find_partition = esp_partition_find_first(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_DATA_OTA, NULL);
|
||||||
|
@ -438,7 +438,7 @@ const esp_partition_t *esp_ota_get_boot_partition(void)
|
||||||
{
|
{
|
||||||
esp_err_t ret;
|
esp_err_t ret;
|
||||||
const esp_partition_t *find_partition = NULL;
|
const esp_partition_t *find_partition = NULL;
|
||||||
static spi_flash_mmap_memory_t ota_data_map;
|
spi_flash_mmap_handle_t ota_data_map;
|
||||||
const void *result = NULL;
|
const void *result = NULL;
|
||||||
uint16_t ota_app_count = 0;
|
uint16_t ota_app_count = 0;
|
||||||
find_partition = esp_partition_find_first(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_DATA_OTA, NULL);
|
find_partition = esp_partition_find_first(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_DATA_OTA, NULL);
|
||||||
|
@ -494,9 +494,18 @@ const esp_partition_t *esp_ota_get_boot_partition(void)
|
||||||
|
|
||||||
const esp_partition_t* esp_ota_get_running_partition(void)
|
const esp_partition_t* esp_ota_get_running_partition(void)
|
||||||
{
|
{
|
||||||
|
static const esp_partition_t *curr_partition = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Currently running partition is unlikely to change across reset cycle,
|
||||||
|
* so it can be cached here, and avoid lookup on every flash write operation.
|
||||||
|
*/
|
||||||
|
if (curr_partition != NULL) {
|
||||||
|
return curr_partition;
|
||||||
|
}
|
||||||
|
|
||||||
/* Find the flash address of this exact function. By definition that is part
|
/* Find the flash address of this exact function. By definition that is part
|
||||||
of the currently running firmware. Then find the enclosing partition. */
|
of the currently running firmware. Then find the enclosing partition. */
|
||||||
|
|
||||||
size_t phys_offs = spi_flash_cache2phys(esp_ota_get_running_partition);
|
size_t phys_offs = spi_flash_cache2phys(esp_ota_get_running_partition);
|
||||||
|
|
||||||
assert (phys_offs != SPI_FLASH_CACHE2PHYS_FAIL); /* indicates cache2phys lookup is buggy */
|
assert (phys_offs != SPI_FLASH_CACHE2PHYS_FAIL); /* indicates cache2phys lookup is buggy */
|
||||||
|
@ -510,6 +519,7 @@ const esp_partition_t* esp_ota_get_running_partition(void)
|
||||||
const esp_partition_t *p = esp_partition_get(it);
|
const esp_partition_t *p = esp_partition_get(it);
|
||||||
if (p->address <= phys_offs && p->address + p->size > phys_offs) {
|
if (p->address <= phys_offs && p->address + p->size > phys_offs) {
|
||||||
esp_partition_iterator_release(it);
|
esp_partition_iterator_release(it);
|
||||||
|
curr_partition = p;
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
it = esp_partition_next(it);
|
it = esp_partition_next(it);
|
||||||
|
|
|
@ -55,4 +55,7 @@
|
||||||
// after restart or during a deep sleep / wake cycle.
|
// after restart or during a deep sleep / wake cycle.
|
||||||
#define RTC_NOINIT_ATTR __attribute__((section(".rtc_noinit")))
|
#define RTC_NOINIT_ATTR __attribute__((section(".rtc_noinit")))
|
||||||
|
|
||||||
|
// Forces to not inline function
|
||||||
|
#define NOINLINE_ATTR __attribute__((noinline))
|
||||||
|
|
||||||
#endif /* __ESP_ATTR_H__ */
|
#endif /* __ESP_ATTR_H__ */
|
||||||
|
|
|
@ -286,23 +286,41 @@ void IRAM_ATTR spi_flash_munmap(spi_flash_mmap_handle_t handle)
|
||||||
free(it);
|
free(it);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void IRAM_ATTR NOINLINE_ATTR spi_flash_protected_mmap_init()
|
||||||
|
{
|
||||||
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
||||||
|
spi_flash_mmap_init();
|
||||||
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
||||||
|
}
|
||||||
|
|
||||||
|
static uint32_t IRAM_ATTR NOINLINE_ATTR spi_flash_protected_read_mmu_entry(int index)
|
||||||
|
{
|
||||||
|
uint32_t value;
|
||||||
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
||||||
|
value = DPORT_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[index]);
|
||||||
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
void spi_flash_mmap_dump()
|
void spi_flash_mmap_dump()
|
||||||
{
|
{
|
||||||
spi_flash_mmap_init();
|
spi_flash_protected_mmap_init();
|
||||||
|
|
||||||
mmap_entry_t* it;
|
mmap_entry_t* it;
|
||||||
for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
|
for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
|
||||||
printf("handle=%d page=%d count=%d\n", it->handle, it->page, it->count);
|
printf("handle=%d page=%d count=%d\n", it->handle, it->page, it->count);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
|
for (int i = 0; i < REGIONS_COUNT * PAGES_PER_REGION; ++i) {
|
||||||
if (s_mmap_page_refcnt[i] != 0) {
|
if (s_mmap_page_refcnt[i] != 0) {
|
||||||
printf("page %d: refcnt=%d paddr=%d\n",
|
uint32_t paddr = spi_flash_protected_read_mmu_entry(i);
|
||||||
i, (int) s_mmap_page_refcnt[i], DPORT_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]));
|
printf("page %d: refcnt=%d paddr=%d\n", i, (int) s_mmap_page_refcnt[i], paddr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
|
uint32_t IRAM_ATTR spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
|
||||||
{
|
{
|
||||||
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
||||||
spi_flash_mmap_init();
|
spi_flash_mmap_init();
|
||||||
int count = 0;
|
int count = 0;
|
||||||
int region_begin; // first page to check
|
int region_begin; // first page to check
|
||||||
|
@ -316,6 +334,7 @@ uint32_t spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DPORT_INTERRUPT_RESTORE();
|
DPORT_INTERRUPT_RESTORE();
|
||||||
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,7 +403,6 @@ static inline IRAM_ATTR bool update_written_pages(size_t start_addr, size_t leng
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint32_t spi_flash_cache2phys(const void *cached)
|
uint32_t spi_flash_cache2phys(const void *cached)
|
||||||
{
|
{
|
||||||
intptr_t c = (intptr_t)cached;
|
intptr_t c = (intptr_t)cached;
|
||||||
|
@ -405,7 +423,7 @@ uint32_t spi_flash_cache2phys(const void *cached)
|
||||||
/* cached address was not in IROM or DROM */
|
/* cached address was not in IROM or DROM */
|
||||||
return SPI_FLASH_CACHE2PHYS_FAIL;
|
return SPI_FLASH_CACHE2PHYS_FAIL;
|
||||||
}
|
}
|
||||||
uint32_t phys_page = DPORT_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[cache_page]);
|
uint32_t phys_page = spi_flash_protected_read_mmu_entry(cache_page);
|
||||||
if (phys_page == INVALID_ENTRY_VAL) {
|
if (phys_page == INVALID_ENTRY_VAL) {
|
||||||
/* page is not mapped */
|
/* page is not mapped */
|
||||||
return SPI_FLASH_CACHE2PHYS_FAIL;
|
return SPI_FLASH_CACHE2PHYS_FAIL;
|
||||||
|
@ -414,8 +432,7 @@ uint32_t spi_flash_cache2phys(const void *cached)
|
||||||
return phys_offs | (c & (SPI_FLASH_MMU_PAGE_SIZE-1));
|
return phys_offs | (c & (SPI_FLASH_MMU_PAGE_SIZE-1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const void *IRAM_ATTR spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t memory)
|
||||||
const void *spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t memory)
|
|
||||||
{
|
{
|
||||||
uint32_t phys_page = phys_offs / SPI_FLASH_MMU_PAGE_SIZE;
|
uint32_t phys_page = phys_offs / SPI_FLASH_MMU_PAGE_SIZE;
|
||||||
int start, end, page_delta;
|
int start, end, page_delta;
|
||||||
|
@ -432,15 +449,18 @@ const void *spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_memory_t mem
|
||||||
base = VADDR1_START_ADDR;
|
base = VADDR1_START_ADDR;
|
||||||
page_delta = 64;
|
page_delta = 64;
|
||||||
}
|
}
|
||||||
|
spi_flash_disable_interrupts_caches_and_other_cpu();
|
||||||
DPORT_INTERRUPT_DISABLE();
|
DPORT_INTERRUPT_DISABLE();
|
||||||
for (int i = start; i < end; i++) {
|
for (int i = start; i < end; i++) {
|
||||||
if (DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]) == phys_page) {
|
if (DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]) == phys_page) {
|
||||||
i -= page_delta;
|
i -= page_delta;
|
||||||
intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
|
intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i);
|
||||||
DPORT_INTERRUPT_RESTORE();
|
DPORT_INTERRUPT_RESTORE();
|
||||||
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
||||||
return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
|
return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DPORT_INTERRUPT_RESTORE();
|
DPORT_INTERRUPT_RESTORE();
|
||||||
|
spi_flash_enable_interrupts_caches_and_other_cpu();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -244,13 +244,11 @@ void spi_flash_mmap_dump();
|
||||||
/**
|
/**
|
||||||
* @brief get free pages number which can be mmap
|
* @brief get free pages number which can be mmap
|
||||||
*
|
*
|
||||||
* This function will return free page number of the mmu table which can mmap,
|
* This function will return number of free pages available in mmu table. This could be useful
|
||||||
* when you want to call spi_flash_mmap to mmap an ranger of flash data to Dcache or Icache
|
* before calling actual spi_flash_mmap (maps flash range to DCache or ICache memory) to check
|
||||||
* memmory region, maybe the size of MMU table will exceed,so if you are not sure the
|
* if there is sufficient space available for mapping.
|
||||||
* size need mmap is ok, can call the interface and watch how many MMU table page can be
|
|
||||||
* mmaped.
|
|
||||||
*
|
*
|
||||||
* @param memory memmory type of MMU table free page
|
* @param memory memory type of MMU table free page
|
||||||
*
|
*
|
||||||
* @return number of free pages which can be mmaped
|
* @return number of free pages which can be mmaped
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in a new issue