esp32s2: IRAM/DRAM memory protection

* new mem_prot API
* mem_prot on & locked by default (see Kconfig)
* feature activated in start_cpu0_default()

JIRA IDF-1355
This commit is contained in:
Martin Vychodil 2020-03-10 16:46:10 +01:00
parent 412c43d9b9
commit 7491ea677a
22 changed files with 1699 additions and 91 deletions

View file

@ -33,13 +33,13 @@
void esp_cache_err_int_init(void)
{
uint32_t core_id = xPortGetCoreID();
ESP_INTR_DISABLE(ETS_CACHEERR_INUM);
ESP_INTR_DISABLE(ETS_MEMACCESS_ERR_INUM);
// We do not register a handler for the interrupt because it is interrupt
// level 4 which is not serviceable from C. Instead, xtensa_vectors.S has
// a call to the panic handler for
// this interrupt.
intr_matrix_set(core_id, ETS_CACHE_IA_INTR_SOURCE, ETS_CACHEERR_INUM);
intr_matrix_set(core_id, ETS_CACHE_IA_INTR_SOURCE, ETS_MEMACCESS_ERR_INUM);
// Enable invalid cache access interrupt when the cache is disabled.
// When the interrupt happens, we can not determine the CPU where the
@ -67,7 +67,7 @@ void esp_cache_err_int_init(void)
DPORT_CACHE_IA_INT_APP_IRAM0 |
DPORT_CACHE_IA_INT_APP_IRAM1);
}
ESP_INTR_ENABLE(ETS_CACHEERR_INUM);
ESP_INTR_ENABLE(ETS_MEMACCESS_ERR_INUM);
}
int IRAM_ATTR esp_cache_err_get_cpuid(void)

View file

@ -17,7 +17,7 @@
* @brief initialize cache invalid access interrupt
*
* This function enables cache invalid access interrupt source and connects it
* to interrupt input number ETS_CACHEERR_INUM (see soc/soc.h). It is called
* to interrupt input number ETS_MEMACCESS_ERR_INUM (see soc/soc.h). It is called
* from the startup code.
*/
void esp_cache_err_int_init(void);

View file

@ -12,6 +12,7 @@ else()
# Regular app build
set(srcs "cache_err_int.c"
"memprot.c"
"clk.c"
"cpu_start.c"
"crosscore_int.c"

View file

@ -29,6 +29,29 @@ menu "ESP32S2-specific"
default 160 if ESP32S2_DEFAULT_CPU_FREQ_160
default 240 if ESP32S2_DEFAULT_CPU_FREQ_240
menu "Memory protection"
config ESP32S2_MEMPROT_FEATURE
bool "Enable memory protection"
default "y"
help
If enabled, permission control module watches all memory access and fires panic handler
if permission violation is detected. This feature automatically splits
memory into data and instruction segments and sets Read/Execute permissions
for instruction part (below splitting address) and Read/Write permissions
for data part (above splitting address). The memory protection is effective
on all access through IRAM0 and DRAM0 buses.
config ESP32S2_MEMPROT_FEATURE_LOCK
depends on ESP32S2_MEMPROT_FEATURE
bool "Lock memory protection settings"
default "y"
help
Once locked, memory protection settings cannot be changed anymore.
The lock is reset only on the chip startup.
endmenu # Memory protection
menu "Cache config"
choice ESP32S2_INSTRUCTION_CACHE_SIZE

View file

@ -36,13 +36,13 @@
void esp_cache_err_int_init(void)
{
uint32_t core_id = xPortGetCoreID();
ESP_INTR_DISABLE(ETS_CACHEERR_INUM);
ESP_INTR_DISABLE(ETS_MEMACCESS_ERR_INUM);
// We do not register a handler for the interrupt because it is interrupt
// level 4 which is not serviceable from C. Instead, xtensa_vectors.S has
// a call to the panic handler for
// this interrupt.
intr_matrix_set(core_id, ETS_CACHE_IA_INTR_SOURCE, ETS_CACHEERR_INUM);
intr_matrix_set(core_id, ETS_CACHE_IA_INTR_SOURCE, ETS_MEMACCESS_ERR_INUM);
// Enable invalid cache access interrupt when the cache is disabled.
// When the interrupt happens, we can not determine the CPU where the
@ -73,7 +73,7 @@ void esp_cache_err_int_init(void)
EXTMEM_IC_SYNC_SIZE_FAULT_INT_ENA |
EXTMEM_CACHE_DBG_EN);
ESP_INTR_ENABLE(ETS_CACHEERR_INUM);
ESP_INTR_ENABLE(ETS_MEMACCESS_ERR_INUM);
}
int IRAM_ATTR esp_cache_err_get_cpuid(void)

View file

@ -26,6 +26,7 @@
#include "esp32s2/brownout.h"
#include "esp32s2/cache_err_int.h"
#include "esp32s2/spiram.h"
#include "esp32s2/memprot.h"
#include "soc/cpu.h"
#include "soc/rtc.h"
@ -317,6 +318,14 @@ void start_cpu0_default(void)
err = esp_pthread_init();
assert(err == ESP_OK && "Failed to init pthread module!");
#if CONFIG_ESP32S2_MEMPROT_FEATURE
#if CONFIG_ESP32S2_MEMPROT_FEATURE_LOCK
esp_memprot_set_prot(true, true);
#else
esp_memprot_set_prot(true, false);
#endif
#endif
do_global_ctors();
#if CONFIG_ESP_INT_WDT
esp_int_wdt_init();
@ -353,6 +362,7 @@ void start_cpu0_default(void)
ESP_TASK_MAIN_STACK, NULL,
ESP_TASK_MAIN_PRIO, NULL, 0);
assert(res == pdTRUE);
ESP_LOGI(TAG, "Starting scheduler on PRO CPU.");
vTaskStartScheduler();
abort(); /* Only get to here if not enough free heap to start scheduler */

View file

@ -20,7 +20,7 @@ extern "C" {
* @brief initialize cache invalid access interrupt
*
* This function enables cache invalid access interrupt source and connects it
* to interrupt input number ETS_CACHEERR_INUM (see soc/soc.h). It is called
* to interrupt input number ETS_MEMACCESS_ERR_INUM (see soc/soc.h). It is called
* from the startup code.
*/
void esp_cache_err_int_init(void);

View file

@ -0,0 +1,353 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* INTERNAL API
* generic interface to MMU memory protection features
*/
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
MEMPROT_IRAM0 = 0x00000000,
MEMPROT_DRAM0 = 0x00000001,
MEMPROT_UNKNOWN
} mem_type_prot_t;
/**
* @brief Returns splitting address for required memory region
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return Splitting address for the memory region required.
* The address is given by region-specific global symbol exported from linker script,
* it is not read out from related configuration register.
*/
uint32_t *IRAM_ATTR esp_memprot_get_split_addr(mem_type_prot_t mem_type);
/**
* @brief Initializes illegal memory access control (MMU) for required memory section.
*
* All memory access interrupts share ETS_MEMACCESS_ERR_INUM input channel, it is caller's
* responsibility to properly detect actual intr. source as well as possible prioritization in case
* of multiple source reported during one intr.handling routine run
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*/
void esp_memprot_intr_init(mem_type_prot_t mem_type);
/**
* @brief Enable/disable the memory protection interrupt
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param enable enable/disable
*/
void esp_memprot_intr_ena(mem_type_prot_t mem_type, bool enable);
/**
* @brief Detects whether any of the memory protection interrupts is active
*
* @return true/false
*/
bool esp_memprot_is_assoc_intr_any(void);
/**
* @brief Detects whether specific memory protection interrupt is active
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return true/false
*/
bool esp_memprot_is_assoc_intr(mem_type_prot_t mem_type);
/**
* @brief Sets a request for clearing interrupt-on flag for specified memory region (register write)
*
* @note When called without actual interrupt-on flag set, subsequent occurrence of related interrupt is ignored.
* Should be used only after the real interrupt appears, typically as the last step in interrupt handler's routine.
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*/
void esp_memprot_clear_intr(mem_type_prot_t mem_type);
/**
* @brief Detects which memory protection interrupt is active, check order: IRAM0, DRAM0
*
* @return Memory protection area type (see mem_type_prot_t enum)
*/
mem_type_prot_t IRAM_ATTR esp_memprot_get_intr_memtype(void);
/**
* @brief Gets interrupt status register contents for specified memory region
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return Contents of status register
*/
uint32_t esp_memprot_get_fault_reg(mem_type_prot_t mem_type);
/**
* @brief Get details of given interrupt status
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param faulting_address Faulting address causing the interrupt [out]
* @param op_type Operation being processed at the faulting address [out]
* IRAM0: 0 - read, 1 - write
* DRAM0: 0 - read, 1 - write
* @param op_subtype Additional info for op_type [out]
* IRAM0: 0 - instruction segment access, 1 - data segment access
* DRAM0: 0 - non-atomic operation, 1 - atomic operation
*/
void IRAM_ATTR esp_memprot_get_fault_status(mem_type_prot_t mem_type, uint32_t **faulting_address, uint32_t *op_type, uint32_t *op_subtype);
/**
* @brief Gets string representation of required memory region identifier
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return mem_type as string
*/
const char *IRAM_ATTR esp_memprot_type_to_str(mem_type_prot_t mem_type);
/**
* @brief Detects whether any of the interrupt locks is active (requires digital system reset to unlock)
*
* @return true/false
*/
bool esp_memprot_is_locked_any(void);
/**
* @brief Sets lock for specified memory region.
*
* Locks can be unlocked only by digital system reset
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*/
void esp_memprot_set_lock(mem_type_prot_t mem_type);
/**
* @brief Gets lock status for required memory region
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return true/false (locked/unlocked)
*/
bool esp_memprot_get_lock(mem_type_prot_t mem_type);
/**
* @brief Gets interrupt permission control register contents for required memory region
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return Permission control register contents
*/
uint32_t esp_memprot_get_ena_reg(mem_type_prot_t mem_type);
/**
* @brief Gets interrupt permission settings for unified management block
*
* Gets interrupt permission settings register contents for required memory region, returns settings for unified management blocks
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return Permission settings register contents
*/
uint32_t esp_memprot_get_perm_uni_reg(mem_type_prot_t mem_type);
/**
* @brief Gets interrupt permission settings for split management block
*
* Gets interrupt permission settings register contents for required memory region, returns settings for split management blocks
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return Permission settings register contents
*/
uint32_t esp_memprot_get_perm_split_reg(mem_type_prot_t mem_type);
/**
* @brief Detects whether any of the memory protection interrupts is enabled
*
* @return true/false
*/
bool esp_memprot_is_intr_ena_any(void);
/**
* @brief Gets interrupt-enabled flag for given memory region
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return Interrupt-enabled value
*/
uint32_t esp_memprot_get_intr_ena_bit(mem_type_prot_t mem_type);
/**
* @brief Gets interrupt-active flag for given memory region
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return Interrupt-active value
*/
uint32_t esp_memprot_get_intr_on_bit(mem_type_prot_t mem_type);
/**
* @brief Gets interrupt-clear request flag for given memory region
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
*
* @return Interrupt-clear request value
*/
uint32_t esp_memprot_get_intr_clr_bit(mem_type_prot_t mem_type);
/**
* @brief Gets read permission value for specified block and memory region
*
* Returns read permission bit value for required unified-management block (0-3) in given memory region.
* Applicable to all memory types.
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param block Memory block identifier (0-3)
*
* @return Read permission value for required block
*/
uint32_t esp_memprot_get_uni_block_read_bit(mem_type_prot_t mem_type, uint32_t block);
/**
* @brief Gets write permission value for specified block and memory region
*
* Returns write permission bit value for required unified-management block (0-3) in given memory region.
* Applicable to all memory types.
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param block Memory block identifier (0-3)
*
* @return Write permission value for required block
*/
uint32_t esp_memprot_get_uni_block_write_bit(mem_type_prot_t mem_type, uint32_t block);
/**
* @brief Gets execute permission value for specified block and memory region
*
* Returns execute permission bit value for required unified-management block (0-3) in given memory region.
* Applicable only to IRAM memory types
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param block Memory block identifier (0-3)
*
* @return Execute permission value for required block
*/
uint32_t esp_memprot_get_uni_block_exec_bit(mem_type_prot_t mem_type, uint32_t block);
/**
* @brief Sets permissions for specified block in DRAM region
*
* Sets Read and Write permission for specified unified-management block (0-3) in given memory region.
* Applicable only to DRAM memory types
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param block Memory block identifier (0-3)
* @param write_perm Write permission flag
* @param read_perm Read permission flag
*/
void esp_memprot_set_uni_block_perm_dram(mem_type_prot_t mem_type, uint32_t block, bool write_perm, bool read_perm);
/**
* @brief Sets permissions for high and low memory segment in DRAM region
*
* Sets Read and Write permission for both low and high memory segments given by splitting address.
* The splitting address must be equal to or higher then beginning of block 5
* Applicable only to DRAM memory types
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param split_addr Address to split the memory region to lower and higher segment
* @param lw Low segment Write permission flag
* @param lr Low segment Read permission flag
* @param hw High segment Write permission flag
* @param hr High segment Read permission flag
*/
void esp_memprot_set_prot_dram(mem_type_prot_t mem_type, uint32_t *split_addr, bool lw, bool lr, bool hw, bool hr);
/**
* @brief Sets permissions for specified block in IRAM region
*
* Sets Read, Write and Execute permission for specified unified-management block (0-3) in given memory region.
* Applicable only to IRAM memory types
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param block Memory block identifier (0-3)
* @param write_perm Write permission flag
* @param exec_perm Execute permission flag
*/
void esp_memprot_set_uni_block_perm_iram(mem_type_prot_t mem_type, uint32_t block, bool write_perm, bool read_perm, bool exec_perm);
/**
* @brief Sets permissions for high and low memory segment in IRAM region
*
* Sets Read, Write and Execute permission for both low and high memory segments given by splitting address.
* The splitting address must be equal to or higher then beginning of block 5
* Applicable only to IRAM memory types
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param split_addr Address to split the memory region to lower and higher segment
* @param lw Low segment Write permission flag
* @param lr Low segment Read permission flag
* @param lx Low segment Execute permission flag
* @param hw High segment Write permission flag
* @param hr High segment Read permission flag
* @param hx High segment Execute permission flag
*/
void esp_memprot_set_prot_iram(mem_type_prot_t mem_type, uint32_t *split_addr, bool lw, bool lr, bool lx, bool hw, bool hr, bool hx);
/**
* @brief Activates memory protection for all supported memory region types
*
* @note The feature is disabled when JTAG interface is connected
*
* @param invoke_panic_handler map mem.prot interrupt to ETS_MEMACCESS_ERR_INUM and thus invokes panic handler when fired ('true' not suitable for testing)
* @param lock_feature sets LOCK bit, see esp_memprot_set_lock() ('true' not suitable for testing)
*/
void esp_memprot_set_prot(bool invoke_panic_handler, bool lock_feature);
/**
* @brief Get permission settings bits for IRAM split mgmt based on current split address
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param lw Low segment Write permission flag
* @param lr Low segment Read permission flag
* @param lx Low segment Execute permission flag
* @param hw High segment Write permission flag
* @param hr High segment Read permission flag
* @param hx High segment Execute permission flag
*/
void esp_memprot_get_perm_split_bits_iram(mem_type_prot_t mem_type, bool *lw, bool *lr, bool *lx, bool *hw, bool *hr, bool *hx);
/**
* @brief Get permission settings bits for DRAM split mgmt based on current split address
*
* @param mem_type Memory protection area type (see mem_type_prot_t enum)
* @param lw Low segment Write permission flag
* @param lr Low segment Read permission flag
* @param hw High segment Write permission flag
* @param hr High segment Read permission flag
*/
void esp_memprot_get_perm_split_bits_dram(mem_type_prot_t mem_type, bool *lw, bool *lr, bool *hw, bool *hr);
#ifdef __cplusplus
}
#endif

View file

@ -160,6 +160,8 @@ SECTIONS
mapping[iram0_text]
/* align + add 16B for the possibly overlapping instructions */
. = ALIGN(4) + 16;
_iram_text_end = ABSOLUTE(.);
_iram_end = ABSOLUTE(.);
} > iram0_0_seg

View file

@ -0,0 +1,491 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* INTERNAL API
* implementation of generic interface to MMU memory protection features
*/
#include <stdio.h>
#include "sdkconfig.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "esp_system.h"
#include "esp_spi_flash.h"
#include "soc/sensitive_reg.h"
#include "soc/dport_access.h"
#include "soc/periph_defs.h"
#include "esp_intr_alloc.h"
#include "esp32s2/memprot.h"
#include "hal/memprot_ll.h"
#include "esp_fault.h"
#include "esp_log.h"
extern int _iram_text_end;
extern int _data_start;
static const char *TAG = "memprot";
uint32_t *esp_memprot_iram0_get_min_split_addr(void)
{
return (uint32_t *)&_iram_text_end;
}
uint32_t *esp_memprot_dram0_get_min_split_addr(void)
{
return (uint32_t *)&_data_start;
}
uint32_t *esp_memprot_get_split_addr(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_min_split_addr();
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_min_split_addr();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
const char *esp_memprot_type_to_str(mem_type_prot_t mem_type)
{
switch (mem_type) {
case MEMPROT_IRAM0:
return "IRAM0";
case MEMPROT_DRAM0:
return "DRAM0";
default:
return "UNKOWN";
}
}
void esp_memprot_intr_init(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
ESP_INTR_DISABLE(ETS_MEMACCESS_ERR_INUM);
switch (mem_type) {
case MEMPROT_IRAM0:
intr_matrix_set(PRO_CPU_NUM, esp_memprot_iram0_get_intr_source_num(), ETS_MEMACCESS_ERR_INUM);
break;
case MEMPROT_DRAM0:
intr_matrix_set(PRO_CPU_NUM, esp_memprot_dram0_get_intr_source_num(), ETS_MEMACCESS_ERR_INUM);
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
ESP_INTR_ENABLE(ETS_MEMACCESS_ERR_INUM);
}
void esp_memprot_intr_ena(mem_type_prot_t mem_type, bool enable)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
esp_memprot_iram0_intr_ena(enable);
break;
case MEMPROT_DRAM0:
esp_memprot_dram0_intr_ena(enable);
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
bool esp_memprot_is_assoc_intr_any()
{
return esp_memprot_iram0_is_assoc_intr() || esp_memprot_dram0_is_assoc_intr();
}
mem_type_prot_t esp_memprot_get_intr_memtype()
{
if ( esp_memprot_is_assoc_intr(MEMPROT_IRAM0) ) {
return MEMPROT_IRAM0;
} else if ( esp_memprot_is_assoc_intr(MEMPROT_DRAM0) ) {
return MEMPROT_DRAM0;
}
return MEMPROT_UNKNOWN;
}
bool esp_memprot_is_assoc_intr(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_is_assoc_intr();
case MEMPROT_DRAM0:
return esp_memprot_dram0_is_assoc_intr();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
void esp_memprot_clear_intr(mem_type_prot_t mem_type)
{
switch (mem_type) {
case MEMPROT_IRAM0:
esp_memprot_iram0_clear_intr();
break;
case MEMPROT_DRAM0:
esp_memprot_dram0_clear_intr();
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
void esp_memprot_set_lock(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
esp_memprot_iram0_set_lock();
break;
case MEMPROT_DRAM0:
esp_memprot_dram0_set_lock();
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
bool esp_memprot_get_lock(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_lock_reg() > 0;
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_lock_reg() > 0;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
bool esp_memprot_is_locked_any()
{
return esp_memprot_iram0_get_lock_reg() > 0 || esp_memprot_iram0_get_lock_reg() > 0;
}
uint32_t esp_memprot_get_lock_bit(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_lock_bit();
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_lock_bit();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
uint32_t esp_memprot_get_ena_reg(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_ena_reg();
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_ena_reg();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
uint32_t esp_memprot_get_fault_reg(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_fault_reg();
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_fault_reg();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
void esp_memprot_get_fault_status(mem_type_prot_t mem_type, uint32_t **faulting_address, uint32_t *op_type, uint32_t *op_subtype)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
esp_memprot_iram0_get_fault_status(faulting_address, op_type, op_subtype);
break;
case MEMPROT_DRAM0:
esp_memprot_dram0_get_fault_status(faulting_address, op_type, op_subtype);
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
bool esp_memprot_is_intr_ena_any()
{
return esp_memprot_iram0_get_intr_ena_bit() > 0 || esp_memprot_dram0_get_intr_ena_bit() > 0;
}
uint32_t esp_memprot_get_intr_ena_bit(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_intr_ena_bit();
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_intr_ena_bit();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
uint32_t esp_memprot_get_intr_on_bit(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_intr_on_bit();
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_intr_on_bit();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
uint32_t esp_memprot_get_intr_clr_bit(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_intr_clr_bit();
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_intr_clr_bit();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
uint32_t esp_memprot_get_uni_block_read_bit(mem_type_prot_t mem_type, uint32_t block)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_uni_block_read_bit(block);
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_uni_block_read_bit(block);
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
uint32_t esp_memprot_get_uni_block_write_bit(mem_type_prot_t mem_type, uint32_t block)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_uni_block_write_bit(block);
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_uni_block_write_bit(block);
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
uint32_t esp_memprot_get_uni_block_exec_bit(mem_type_prot_t mem_type, uint32_t block)
{
assert(mem_type == MEMPROT_IRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_uni_block_exec_bit(block);
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
void esp_memprot_set_uni_block_perm_dram(mem_type_prot_t mem_type, uint32_t block, bool write_perm, bool read_perm)
{
assert(mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_DRAM0:
esp_memprot_dram0_set_uni_block_perm(block, write_perm, read_perm);
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
uint32_t esp_memprot_get_perm_uni_reg(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_perm_uni_reg();
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_perm_reg();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
uint32_t esp_memprot_get_perm_split_reg(mem_type_prot_t mem_type)
{
assert(mem_type == MEMPROT_IRAM0 || mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
return esp_memprot_iram0_get_perm_split_reg();
case MEMPROT_DRAM0:
return esp_memprot_dram0_get_perm_reg();
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
void esp_memprot_set_prot_dram(mem_type_prot_t mem_type, uint32_t *split_addr, bool lw, bool lr, bool hw, bool hr)
{
assert(mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_DRAM0:
esp_memprot_dram0_set_prot(split_addr != NULL ? split_addr : esp_memprot_dram0_get_min_split_addr(), lw, lr, hw, hr);
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
void esp_memprot_set_uni_block_perm_iram(mem_type_prot_t mem_type, uint32_t block, bool write_perm, bool read_perm, bool exec_perm)
{
assert(mem_type == MEMPROT_IRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
esp_memprot_iram0_set_uni_block_perm(block, write_perm, read_perm, exec_perm);
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
void esp_memprot_set_prot_iram(mem_type_prot_t mem_type, uint32_t *split_addr, bool lw, bool lr, bool lx, bool hw, bool hr, bool hx)
{
assert(mem_type == MEMPROT_IRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
esp_memprot_iram0_set_prot(split_addr != NULL ? split_addr : esp_memprot_iram0_get_min_split_addr(), lw, lr, lx, hw, hr, hx);
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
void esp_memprot_get_perm_split_bits_iram(mem_type_prot_t mem_type, bool *lw, bool *lr, bool *lx, bool *hw, bool *hr, bool *hx)
{
assert(mem_type == MEMPROT_IRAM0);
switch (mem_type) {
case MEMPROT_IRAM0:
esp_memprot_iram0_get_split_sgnf_bits(lw, lr, lx, hw, hr, hx);
break;
default:
assert(0);
}
}
void esp_memprot_get_perm_split_bits_dram(mem_type_prot_t mem_type, bool *lw, bool *lr, bool *hw, bool *hr)
{
assert(mem_type == MEMPROT_DRAM0);
switch (mem_type) {
case MEMPROT_DRAM0:
esp_memprot_dram0_get_split_sgnf_bits(lw, lr, hw, hr);
break;
default:
ESP_LOGE(TAG, "Invalid mem_type %d", mem_type);
abort();
}
}
void esp_memprot_set_prot(bool invoke_panic_handler, bool lock_feature)
{
esp_memprot_intr_ena(MEMPROT_DRAM0, false);
esp_memprot_intr_ena(MEMPROT_IRAM0, false);
if (!esp_cpu_in_ocd_debug_mode()) {
ESP_FAULT_ASSERT(!esp_cpu_in_ocd_debug_mode());
if ( invoke_panic_handler ) {
esp_memprot_intr_init(MEMPROT_DRAM0);
esp_memprot_intr_init(MEMPROT_IRAM0);
}
esp_memprot_set_prot_dram(MEMPROT_DRAM0, NULL, false, true, true, true);
esp_memprot_set_prot_iram(MEMPROT_IRAM0, NULL, false, true, true, true, true, false);
esp_memprot_intr_ena(MEMPROT_DRAM0, true);
esp_memprot_intr_ena(MEMPROT_IRAM0, true);
if ( lock_feature ) {
esp_memprot_set_lock(MEMPROT_DRAM0);
esp_memprot_set_lock(MEMPROT_IRAM0);
}
}
}

View file

@ -77,11 +77,11 @@ xt_highint4:
/* Figure out reason, save into EXCCAUSE reg */
rsr a0, INTERRUPT
extui a0, a0, ETS_CACHEERR_INUM, 1 /* get cacheerr int bit */
extui a0, a0, ETS_MEMACCESS_ERR_INUM, 1 /* get cacheerr int bit */
beqz a0, 1f
/* Kill this interrupt; we cannot reset it. */
rsr a0, INTENABLE
movi a4, ~(1<<ETS_CACHEERR_INUM)
movi a4, ~(1<<ETS_MEMACCESS_ERR_INUM)
and a0, a4, a0
wsr a0, INTENABLE
movi a0, PANIC_RSN_CACHEERR

View file

@ -61,11 +61,11 @@ xt_highint4:
/* Figure out reason, save into EXCCAUSE reg */
rsr a0, INTERRUPT
extui a0, a0, ETS_CACHEERR_INUM, 1 /* get cacheerr int bit */
extui a0, a0, ETS_MEMACCESS_ERR_INUM, 1 /* get cacheerr int bit */
beqz a0, 1f
/* Kill this interrupt; we cannot reset it. */
rsr a0, INTENABLE
movi a4, ~(1<<ETS_CACHEERR_INUM)
movi a4, ~(1<<ETS_MEMACCESS_ERR_INUM)
and a0, a4, a0
wsr a0, INTENABLE
movi a0, PANIC_RSN_CACHEERR

View file

@ -43,6 +43,7 @@
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/cache_err_int.h"
#include "esp32s2/rom/uart.h"
#include "esp32s2/memprot.h"
#include "soc/extmem_reg.h"
#include "soc/cache_memory.h"
#include "soc/rtc_cntl_reg.h"
@ -50,11 +51,11 @@
#include "panic_internal.h"
extern void esp_panic_handler(panic_info_t*);
extern void esp_panic_handler(panic_info_t *);
static wdt_hal_context_t wdt0_context = {.inst = WDT_MWDT0, .mwdt_dev = &TIMERG0};
static XtExcFrame* xt_exc_frames[SOC_CPU_CORES_NUM] = {NULL};
static XtExcFrame *xt_exc_frames[SOC_CPU_CORES_NUM] = {NULL};
/*
Panic handlers; these get called when an unhandled exception occurs or the assembly-level
@ -65,9 +66,9 @@ static XtExcFrame* xt_exc_frames[SOC_CPU_CORES_NUM] = {NULL};
/*
Note: The linker script will put everything in this file in IRAM/DRAM, so it also works with flash cache disabled.
*/
static void print_illegal_instruction_details(const void* f)
static void print_illegal_instruction_details(const void *f)
{
XtExcFrame* frame = (XtExcFrame*) f;
XtExcFrame *frame = (XtExcFrame *) f;
/* Print out memory around the instruction word */
uint32_t epc = frame->pc;
epc = (epc & ~0x3) - 4;
@ -76,7 +77,7 @@ static void print_illegal_instruction_details(const void* f)
if (epc < SOC_IROM_MASK_LOW || epc >= SOC_IROM_HIGH) {
return;
}
volatile uint32_t* pepc = (uint32_t*)epc;
volatile uint32_t *pepc = (uint32_t *)epc;
panic_print_str("Memory dump at 0x");
panic_print_hex(epc);
@ -89,7 +90,7 @@ static void print_illegal_instruction_details(const void* f)
panic_print_hex(*(pepc + 2));
}
static void print_debug_exception_details(const void* f)
static void print_debug_exception_details(const void *f)
{
int debug_rsn;
asm("rsr.debugcause %0":"=r"(debug_rsn));
@ -144,9 +145,9 @@ static void print_backtrace_entry(uint32_t pc, uint32_t sp)
panic_print_hex(sp);
}
static void print_backtrace(const void* f, int core)
static void print_backtrace(const void *f, int core)
{
XtExcFrame *frame = (XtExcFrame*) f;
XtExcFrame *frame = (XtExcFrame *) f;
int depth = 100;
//Initialize stk_frame with first frame of stack
esp_backtrace_frame_t stk_frame = {.pc = frame->pc, .sp = frame->a1, .next_pc = frame->a0};
@ -155,7 +156,7 @@ static void print_backtrace(const void* f, int core)
//Check if first frame is valid
bool corrupted = !(esp_stack_ptr_is_sane(stk_frame.sp) &&
esp_ptr_executable((void*)esp_cpu_process_stack_pc(stk_frame.pc)));
esp_ptr_executable((void *)esp_cpu_process_stack_pc(stk_frame.pc)));
uint32_t i = ((depth <= 0) ? INT32_MAX : depth) - 1; //Account for stack frame that's already printed
while (i-- > 0 && stk_frame.next_pc != 0 && !corrupted) {
@ -176,7 +177,7 @@ static void print_backtrace(const void* f, int core)
static void print_registers(const void *f, int core)
{
XtExcFrame* frame = (XtExcFrame*) f;
XtExcFrame *frame = (XtExcFrame *) f;
int *regs = (int *)frame;
int x, y;
const char *sdesc[] = {
@ -207,10 +208,10 @@ static void print_registers(const void *f, int core)
// If the core which triggers the interrupt watchpoint was in ISR context, dump the epc registers.
if (xPortInterruptedFromISRContext()
#if !CONFIG_FREERTOS_UNICORE
&& ((core == 0 && frame->exccause == PANIC_RSN_INTWDT_CPU0) ||
(core == 1 && frame->exccause == PANIC_RSN_INTWDT_CPU1))
&& ((core == 0 && frame->exccause == PANIC_RSN_INTWDT_CPU0) ||
(core == 1 && frame->exccause == PANIC_RSN_INTWDT_CPU1))
#endif //!CONFIG_FREERTOS_UNICORE
) {
) {
panic_print_str("\r\n");
@ -246,7 +247,7 @@ static void print_state_for_core(const void *f, int core)
print_backtrace(f, core);
}
static void print_state(const void* f)
static void print_state(const void *f)
{
#if !CONFIG_FREERTOS_UNICORE
int err_core = f == xt_exc_frames[0] ? 0 : 1;
@ -271,7 +272,7 @@ static void print_state(const void* f)
}
#if CONFIG_IDF_TARGET_ESP32S2
static inline void print_cache_err_details(const void* f)
static inline void print_cache_err_details(const void *f)
{
uint32_t vaddr = 0, size = 0;
uint32_t status[2];
@ -357,9 +358,27 @@ static inline void print_cache_err_details(const void* f)
}
}
}
static inline void print_memprot_err_details(const void *f)
{
uint32_t *fault_addr;
uint32_t op_type, op_subtype;
mem_type_prot_t mem_type = esp_memprot_get_intr_memtype();
esp_memprot_get_fault_status( mem_type, &fault_addr, &op_type, &op_subtype );
char *operation_type = "Write";
if ( op_type == 0 ) {
operation_type = (mem_type == MEMPROT_IRAM0 && op_subtype == 0) ? "Instruction fetch" : "Read";
}
panic_print_str( operation_type );
panic_print_str( " operation at address 0x" );
panic_print_hex( (uint32_t)fault_addr );
panic_print_str(" not permitted.\r\n");
}
#endif
static void frame_to_panic_info(XtExcFrame *frame, panic_info_t* info, bool pseudo_excause)
static void frame_to_panic_info(XtExcFrame *frame, panic_info_t *info, bool pseudo_excause)
{
info->core = cpu_hal_get_core_id();
info->exception = PANIC_EXCEPTION_FAULT;
@ -405,8 +424,13 @@ static void frame_to_panic_info(XtExcFrame *frame, panic_info_t* info, bool pseu
}
#if CONFIG_IDF_TARGET_ESP32S2
if(frame->exccause == PANIC_RSN_CACHEERR) {
info->details = print_cache_err_details;
if (frame->exccause == PANIC_RSN_CACHEERR) {
if ( esp_memprot_is_assoc_intr_any() ) {
info->details = print_memprot_err_details;
info->reason = "Memory protection fault";
} else {
info->details = print_cache_err_details;
}
}
#endif
} else {
@ -437,7 +461,7 @@ static void frame_to_panic_info(XtExcFrame *frame, panic_info_t* info, bool pseu
}
info->state = print_state;
info->addr = ((void*) ((XtExcFrame*) frame)->pc);
info->addr = ((void *) ((XtExcFrame *) frame)->pc);
info->frame = frame;
}
@ -456,7 +480,7 @@ static void panic_handler(XtExcFrame *frame, bool pseudo_excause)
// These are cases where both CPUs both go into panic handler. The following code ensures
// only one core proceeds to the system panic handler.
if (pseudo_excause) {
#define BUSY_WAIT_IF_TRUE(b) { if (b) while(1); }
#define BUSY_WAIT_IF_TRUE(b) { if (b) while(1); }
// For WDT expiry, pause the non-offending core - offending core handles panic
BUSY_WAIT_IF_TRUE(frame->exccause == PANIC_RSN_INTWDT_CPU0 && core_id == 1);
BUSY_WAIT_IF_TRUE(frame->exccause == PANIC_RSN_INTWDT_CPU1 && core_id == 0);
@ -483,7 +507,7 @@ static void panic_handler(XtExcFrame *frame, bool pseudo_excause)
if (esp_cpu_in_ocd_debug_mode()) {
if (frame->exccause == PANIC_RSN_INTWDT_CPU0 ||
frame->exccause == PANIC_RSN_INTWDT_CPU1) {
frame->exccause == PANIC_RSN_INTWDT_CPU1) {
wdt_hal_write_protect_disable(&wdt0_context);
wdt_hal_handle_intr(&wdt0_context);
wdt_hal_write_protect_enable(&wdt0_context);
@ -536,9 +560,18 @@ void __attribute__((noreturn)) panic_restart(void)
// If resetting because of a cache error, reset the digital part
// Make sure that the reset reason is not a generic panic reason as well on ESP32S2,
// as esp_cache_err_get_cpuid always returns PRO_CPU_NUM
if (esp_cache_err_get_cpuid() != -1 && esp_reset_reason_get_hint() != ESP_RST_PANIC) {
esp_digital_reset();
} else {
esp_restart_noos();
bool digital_reset_needed = false;
if ( esp_cache_err_get_cpuid() != -1 && esp_reset_reason_get_hint() != ESP_RST_PANIC ) {
digital_reset_needed = true;
}
}
#if CONFIG_IDF_TARGET_ESP32S2
if ( esp_memprot_is_intr_ena_any() || esp_memprot_is_locked_any() ) {
digital_reset_needed = true;
}
#endif
if ( digital_reset_needed ) {
esp_digital_reset();
}
esp_restart_noos();
}

View file

@ -4,6 +4,13 @@
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#if CONFIG_IDF_TARGET_ESP32S2
#include "soc/rtc.h"
#include "soc/rtc_cntl_reg.h"
#include "esp32s2/rom/uart.h"
#include "esp32s2/memprot.h"
#endif
#include "esp_system.h"
#include "panic_internal.h"
@ -34,6 +41,23 @@ esp_err_t esp_unregister_shutdown_handler(shutdown_handler_t handler)
return ESP_ERR_INVALID_STATE;
}
#if CONFIG_IDF_TARGET_ESP32S2
static __attribute__((noreturn)) void esp_digital_reset(void)
{
// make sure all the panic handler output is sent from UART FIFO
uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
// switch to XTAL (otherwise we will keep running from the PLL)
rtc_clk_cpu_freq_set_xtal();
// reset the digital part
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, RTC_CNTL_SW_SYS_RST);
while (true) {
;
}
}
#endif
void IRAM_ATTR esp_restart(void)
{
for (int i = SHUTDOWN_HANDLERS_NO - 1; i >= 0; i--) {
@ -45,6 +69,11 @@ void IRAM_ATTR esp_restart(void)
// Disable scheduler on this core.
vTaskSuspendAll();
#if CONFIG_IDF_TARGET_ESP32S2
if ( esp_memprot_is_intr_ena_any() || esp_memprot_is_locked_any()) {
esp_digital_reset();
}
#endif
esp_restart_noos();
}
@ -58,12 +87,12 @@ uint32_t esp_get_minimum_free_heap_size( void )
return heap_caps_get_minimum_free_size( MALLOC_CAP_DEFAULT );
}
const char* esp_get_idf_version(void)
const char *esp_get_idf_version(void)
{
return IDF_VER;
}
void __attribute__((noreturn)) esp_system_abort(const char* details)
void __attribute__((noreturn)) esp_system_abort(const char *details)
{
panic_abort(details);
}

View file

@ -11,6 +11,7 @@
#include <stdlib.h>
#include <sys/param.h>
#ifndef CONFIG_ESP32S2_MEMPROT_FEATURE
TEST_CASE("Capabilities allocator test", "[heap]")
{
char *m1, *m2[10];
@ -100,6 +101,7 @@ TEST_CASE("Capabilities allocator test", "[heap]")
free(m1);
printf("Done.\n");
}
#endif
#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
TEST_CASE("IRAM_8BIT capability test", "[heap]")

View file

@ -14,7 +14,7 @@
/* (can't realloc in place if comprehensive is enabled) */
TEST_CASE("realloc shrink buffer in place", "[heap]")
{
{
void *x = malloc(64);
TEST_ASSERT(x);
void *y = realloc(x, 48);
@ -23,6 +23,7 @@ TEST_CASE("realloc shrink buffer in place", "[heap]")
#endif
#ifndef CONFIG_ESP32S2_MEMPROT_FEATURE
TEST_CASE("realloc shrink buffer with EXEC CAPS", "[heap]")
{
const size_t buffer_size = 64;
@ -52,7 +53,7 @@ TEST_CASE("realloc move data to a new heap type", "[heap]")
TEST_ASSERT_NOT_NULL(b);
TEST_ASSERT_NOT_EQUAL(a, b);
TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_INVALID, true));
TEST_ASSERT_EQUAL_HEX32_ARRAY(buf, b, 64/sizeof(uint32_t));
TEST_ASSERT_EQUAL_HEX32_ARRAY(buf, b, 64 / sizeof(uint32_t));
// Move data back to DRAM
char *c = heap_caps_realloc(b, 48, MALLOC_CAP_8BIT);
@ -63,3 +64,4 @@ TEST_CASE("realloc move data to a new heap type", "[heap]")
free(c);
}
#endif

View file

@ -30,8 +30,12 @@ TEST_CASE("Allocate new heap at runtime", "[heap][ignore]")
TEST_CASE("Allocate new heap with new capability", "[heap][ignore]")
{
const size_t BUF_SZ = 100;
#ifdef CONFIG_ESP32S2_MEMPROT_FEATURE
const size_t ALLOC_SZ = 32;
#else
const size_t ALLOC_SZ = 64; // More than half of BUF_SZ
const uint32_t MALLOC_CAP_INVENTED = (1<<30); /* this must be unused in esp_heap_caps.h */
#endif
const uint32_t MALLOC_CAP_INVENTED = (1 << 30); /* this must be unused in esp_heap_caps.h */
/* no memory exists to provide this capability */
TEST_ASSERT_NULL( heap_caps_malloc(ALLOC_SZ, MALLOC_CAP_INVENTED) );

View file

@ -398,7 +398,9 @@
#define ETS_TG0_T1_INUM 10 /**< use edge interrupt*/
#define ETS_FRC1_INUM 22
#define ETS_T1_WDT_INUM 24
#define ETS_CACHEERR_INUM 25
#define ETS_MEMACCESS_ERR_INUM 25
/* backwards compatibility only, use ETS_MEMACCESS_ERR_INUM instead*/
#define ETS_CACHEERR_INUM ETS_MEMACCESS_ERR_INUM
#define ETS_DPORT_INUM 28
//CPU0 Interrupt number used in ROM, should be cancelled in SDK

View file

@ -336,7 +336,7 @@
#define ETS_TG0_T1_INUM 10 /**< use edge interrupt*/
#define ETS_FRC1_INUM 22
#define ETS_T1_WDT_INUM 24
#define ETS_CACHEERR_INUM 25
#define ETS_MEMACCESS_ERR_INUM 25
#define ETS_DPORT_INUM 28
//CPU0 Interrupt number used in ROM, should be cancelled in SDK

View file

@ -0,0 +1,651 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
/**
* === IRAM0 ====
*/
#define IRAM0_TOTAL_UNI_BLOCKS 4
#define IRAM0_UNI_BLOCK_0 0
#define IRAM0_UNI_BLOCK_1 1
#define IRAM0_UNI_BLOCK_2 2
#define IRAM0_UNI_BLOCK_3 3
#define IRAM0_SPL_BLOCK_BASE 0x40000000
//unified management (SRAM blocks 0-3)
#define IRAM0_UNI_BLOCK_0_LOW 0x40020000
#define IRAM0_UNI_BLOCK_0_HIGH 0x40021FFF
#define IRAM0_UNI_BLOCK_1_LOW 0x40022000
#define IRAM0_UNI_BLOCK_1_HIGH 0x40023FFF
#define IRAM0_UNI_BLOCK_2_LOW 0x40024000
#define IRAM0_UNI_BLOCK_2_HIGH 0x40025FFF
#define IRAM0_UNI_BLOCK_3_LOW 0x40026000
#define IRAM0_UNI_BLOCK_3_HIGH 0x40027FFF
//split management (SRAM blocks 4-21)
#define IRAM0_SPL_BLOCK_LOW 0x40028000 //block 4 low
#define IRAM0_SPL_BLOCK_HIGH 0x4006FFFF //block 21 high
#define IRAM0_SPLTADDR_MIN 0x40030000 //block 6 low - minimum splitting address
//IRAM0 interrupt status bitmasks
#define IRAM0_INTR_ST_FAULTADDR_M 0x003FFFFC //(bits 21:6 in the reg, as well as in real address)
#define IRAM0_INTR_ST_FAULTADDR_HI 0x40000000 //(high nonsignificant bits 31:22 of the faulting address - constant)
#define IRAM0_INTR_ST_OP_TYPE_BIT BIT(1) //instruction: 0, data: 1
#define IRAM0_INTR_ST_OP_RW_BIT BIT(0) //read: 0, write: 1
static inline uint32_t esp_memprot_iram0_get_intr_source_num(void)
{
return ETS_PMS_PRO_IRAM0_ILG_INTR_SOURCE;
}
static inline void esp_memprot_iram0_intr_ena(bool enable)
{
if ( enable ) {
DPORT_SET_PERI_REG_MASK( DPORT_PMS_PRO_IRAM0_4_REG, DPORT_PMS_PRO_IRAM0_ILG_EN );
} else {
DPORT_CLEAR_PERI_REG_MASK( DPORT_PMS_PRO_IRAM0_4_REG, DPORT_PMS_PRO_IRAM0_ILG_EN );
}
}
static inline uint32_t esp_memprot_iram0_get_ena_reg(void)
{
return DPORT_READ_PERI_REG(DPORT_PMS_PRO_IRAM0_4_REG);
}
static inline uint32_t esp_memprot_iram0_get_fault_reg(void)
{
return DPORT_READ_PERI_REG(DPORT_PMS_PRO_IRAM0_5_REG);
}
static inline void esp_memprot_iram0_get_fault_status(uint32_t **faulting_address, uint32_t *op_type, uint32_t *op_subtype)
{
uint32_t status_bits = esp_memprot_iram0_get_fault_reg();
uint32_t fault_addr = (status_bits & IRAM0_INTR_ST_FAULTADDR_M);
*faulting_address = (uint32_t *)(fault_addr | IRAM0_INTR_ST_FAULTADDR_HI);
*op_type = (uint32_t)status_bits & IRAM0_INTR_ST_OP_RW_BIT;
*op_subtype = (uint32_t)status_bits & IRAM0_INTR_ST_OP_TYPE_BIT;
}
static inline bool esp_memprot_iram0_is_assoc_intr(void)
{
return DPORT_GET_PERI_REG_MASK(DPORT_PMS_PRO_IRAM0_4_REG, DPORT_PMS_PRO_IRAM0_ILG_INTR) > 0;
}
static inline void esp_memprot_iram0_clear_intr(void)
{
DPORT_SET_PERI_REG_MASK(DPORT_PMS_PRO_IRAM0_4_REG, DPORT_PMS_PRO_IRAM0_ILG_CLR);
}
static inline uint32_t esp_memprot_iram0_get_intr_ena_bit(void)
{
return DPORT_REG_GET_FIELD(DPORT_PMS_PRO_IRAM0_4_REG, DPORT_PMS_PRO_IRAM0_ILG_EN);
}
static inline uint32_t esp_memprot_iram0_get_intr_on_bit(void)
{
return DPORT_REG_GET_FIELD(DPORT_PMS_PRO_IRAM0_4_REG, DPORT_PMS_PRO_IRAM0_ILG_INTR);
}
static inline uint32_t esp_memprot_iram0_get_intr_clr_bit(void)
{
return DPORT_REG_GET_FIELD(DPORT_PMS_PRO_IRAM0_4_REG, DPORT_PMS_PRO_IRAM0_ILG_CLR);
}
//resets automatically on CPU restart
static inline void esp_memprot_iram0_set_lock(void)
{
DPORT_WRITE_PERI_REG( DPORT_PMS_PRO_IRAM0_0_REG, DPORT_PMS_PRO_IRAM0_LOCK);
}
static inline uint32_t esp_memprot_iram0_get_lock_reg(void)
{
return DPORT_READ_PERI_REG(DPORT_PMS_PRO_IRAM0_0_REG);
}
static inline uint32_t esp_memprot_iram0_get_lock_bit(void)
{
return DPORT_REG_GET_FIELD(DPORT_PMS_PRO_IRAM0_0_REG, DPORT_PMS_PRO_IRAM0_LOCK);
}
//block 0-3
static inline void esp_memprot_iram0_set_uni_block_perm(uint32_t block, bool write_perm, bool read_perm, bool exec_perm)
{
assert(block < IRAM0_TOTAL_UNI_BLOCKS);
uint32_t write_bit, read_bit, exec_bit;
switch ( block ) {
case IRAM0_UNI_BLOCK_0:
write_bit = DPORT_PMS_PRO_IRAM0_SRAM_0_W;
read_bit = DPORT_PMS_PRO_IRAM0_SRAM_0_R;
exec_bit = DPORT_PMS_PRO_IRAM0_SRAM_0_F;
break;
case IRAM0_UNI_BLOCK_1:
write_bit = DPORT_PMS_PRO_IRAM0_SRAM_1_W;
read_bit = DPORT_PMS_PRO_IRAM0_SRAM_1_R;
exec_bit = DPORT_PMS_PRO_IRAM0_SRAM_1_F;
break;
case IRAM0_UNI_BLOCK_2:
write_bit = DPORT_PMS_PRO_IRAM0_SRAM_2_W;
read_bit = DPORT_PMS_PRO_IRAM0_SRAM_2_R;
exec_bit = DPORT_PMS_PRO_IRAM0_SRAM_2_F;
break;
case IRAM0_UNI_BLOCK_3:
write_bit = DPORT_PMS_PRO_IRAM0_SRAM_3_W;
read_bit = DPORT_PMS_PRO_IRAM0_SRAM_3_R;
exec_bit = DPORT_PMS_PRO_IRAM0_SRAM_3_F;
break;
default:
abort();
}
if ( write_perm ) {
DPORT_SET_PERI_REG_MASK( DPORT_PMS_PRO_IRAM0_1_REG, write_bit );
} else {
DPORT_CLEAR_PERI_REG_MASK( DPORT_PMS_PRO_IRAM0_1_REG, write_bit );
}
if ( read_perm ) {
DPORT_SET_PERI_REG_MASK( DPORT_PMS_PRO_IRAM0_1_REG, read_bit );
} else {
DPORT_CLEAR_PERI_REG_MASK( DPORT_PMS_PRO_IRAM0_1_REG, read_bit );
}
if ( exec_perm ) {
DPORT_SET_PERI_REG_MASK( DPORT_PMS_PRO_IRAM0_1_REG, exec_bit );
} else {
DPORT_CLEAR_PERI_REG_MASK( DPORT_PMS_PRO_IRAM0_1_REG, exec_bit );
}
}
static inline uint32_t esp_memprot_iram0_get_uni_block_read_bit(uint32_t block)
{
assert(block < IRAM0_TOTAL_UNI_BLOCKS);
switch ( block ) {
case IRAM0_UNI_BLOCK_0:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_0_R );
case IRAM0_UNI_BLOCK_1:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_1_R );
case IRAM0_UNI_BLOCK_2:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_2_R );
case IRAM0_UNI_BLOCK_3:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_3_R );
default:
abort();
}
}
static inline uint32_t esp_memprot_iram0_get_uni_block_write_bit(uint32_t block)
{
assert(block < IRAM0_TOTAL_UNI_BLOCKS);
switch ( block ) {
case IRAM0_UNI_BLOCK_0:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_0_W );
case IRAM0_UNI_BLOCK_1:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_1_W );
case IRAM0_UNI_BLOCK_2:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_2_W );
case IRAM0_UNI_BLOCK_3:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_3_W );
default:
abort();
}
}
static inline uint32_t esp_memprot_iram0_get_uni_block_exec_bit(uint32_t block)
{
assert(block < IRAM0_TOTAL_UNI_BLOCKS);
switch ( block ) {
case IRAM0_UNI_BLOCK_0:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_0_F );
case IRAM0_UNI_BLOCK_1:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_1_F );
case IRAM0_UNI_BLOCK_2:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_2_F );
case IRAM0_UNI_BLOCK_3:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_1_REG, DPORT_PMS_PRO_IRAM0_SRAM_3_F );
default:
abort();
}
}
static inline void esp_memprot_iram0_get_uni_block_sgnf_bits(uint32_t block, uint32_t *write_bit, uint32_t *read_bit, uint32_t *exec_bit)
{
assert(block < IRAM0_TOTAL_UNI_BLOCKS);
switch ( block ) {
case IRAM0_UNI_BLOCK_0:
*write_bit = DPORT_PMS_PRO_IRAM0_SRAM_0_W;
*read_bit = DPORT_PMS_PRO_IRAM0_SRAM_0_R;
*exec_bit = DPORT_PMS_PRO_IRAM0_SRAM_0_F;
break;
case IRAM0_UNI_BLOCK_1:
*write_bit = DPORT_PMS_PRO_IRAM0_SRAM_1_W;
*read_bit = DPORT_PMS_PRO_IRAM0_SRAM_1_R;
*exec_bit = DPORT_PMS_PRO_IRAM0_SRAM_1_F;
break;
case IRAM0_UNI_BLOCK_2:
*write_bit = DPORT_PMS_PRO_IRAM0_SRAM_2_W;
*read_bit = DPORT_PMS_PRO_IRAM0_SRAM_2_R;
*exec_bit = DPORT_PMS_PRO_IRAM0_SRAM_2_F;
break;
case IRAM0_UNI_BLOCK_3:
*write_bit = DPORT_PMS_PRO_IRAM0_SRAM_3_W;
*read_bit = DPORT_PMS_PRO_IRAM0_SRAM_3_R;
*exec_bit = DPORT_PMS_PRO_IRAM0_SRAM_3_F;
break;
default:
abort();
}
}
static inline uint32_t esp_memprot_iram0_get_perm_uni_reg(void)
{
return DPORT_READ_PERI_REG(DPORT_PMS_PRO_IRAM0_1_REG);
}
static inline uint32_t esp_memprot_iram0_get_perm_split_reg(void)
{
return DPORT_READ_PERI_REG(DPORT_PMS_PRO_IRAM0_2_REG);
}
static inline void esp_memprot_iram0_set_prot(uint32_t *split_addr, bool lw, bool lr, bool lx, bool hw, bool hr, bool hx)
{
uint32_t addr = (uint32_t)split_addr;
assert( addr <= IRAM0_SPL_BLOCK_HIGH );
//find possible split.address in low region blocks
int uni_blocks_low = -1;
if ( addr >= IRAM0_UNI_BLOCK_0_LOW ) {
uni_blocks_low++;
}
if ( addr >= IRAM0_UNI_BLOCK_1_LOW ) {
uni_blocks_low++;
}
if ( addr >= IRAM0_UNI_BLOCK_2_LOW ) {
uni_blocks_low++;
}
if ( addr >= IRAM0_UNI_BLOCK_3_LOW ) {
uni_blocks_low++;
}
//unified mgmt settings per block (bits W/R/X: [11:9] bl3, [8:6] bl2, [5:3] bl1, [2:0] bl0)
uint32_t write_bit, read_bit, exec_bit;
uint32_t uni_block_perm = 0;
for ( size_t x = 0; x < IRAM0_TOTAL_UNI_BLOCKS; x++ ) {
esp_memprot_iram0_get_uni_block_sgnf_bits(x, &write_bit, &read_bit, &exec_bit);
if ( x <= uni_blocks_low ) {
if (lw) {
uni_block_perm |= write_bit;
}
if (lr) {
uni_block_perm |= read_bit;
}
if (lx) {
uni_block_perm |= exec_bit;
}
} else {
if (hw) {
uni_block_perm |= write_bit;
}
if (hr) {
uni_block_perm |= read_bit;
}
if (hx) {
uni_block_perm |= exec_bit;
}
}
}
//if splt.ddr not set yet, do required normalization to make the addr writeble into splt.mgmt cfg register
uint32_t reg_split_addr = 0;
if ( addr >= IRAM0_SPL_BLOCK_LOW ) {
//split Address must be WORD aligned
reg_split_addr = addr >> 2;
assert(addr == (reg_split_addr << 2));
//use only 17 signf.bits as the cropped parts are constant for whole section (bits [16:0])
reg_split_addr = (reg_split_addr << DPORT_PMS_PRO_IRAM0_SRAM_4_SPLTADDR_S) & DPORT_PMS_PRO_IRAM0_SRAM_4_SPLTADDR_M;
}
//prepare high & low permission mask (bits: [22:20] high range, [19:17] low range)
uint32_t permission_mask = 0;
if ( lw ) {
permission_mask |= DPORT_PMS_PRO_IRAM0_SRAM_4_L_W;
}
if ( lr ) {
permission_mask |= DPORT_PMS_PRO_IRAM0_SRAM_4_L_R;
}
if ( lx ) {
permission_mask |= DPORT_PMS_PRO_IRAM0_SRAM_4_L_F;
}
if ( hw ) {
permission_mask |= DPORT_PMS_PRO_IRAM0_SRAM_4_H_W;
}
if ( hr ) {
permission_mask |= DPORT_PMS_PRO_IRAM0_SRAM_4_H_R;
}
if ( hx ) {
permission_mask |= DPORT_PMS_PRO_IRAM0_SRAM_4_H_F;
}
//write both cfg. registers
DPORT_WRITE_PERI_REG( DPORT_PMS_PRO_IRAM0_1_REG, uni_block_perm );
DPORT_WRITE_PERI_REG( DPORT_PMS_PRO_IRAM0_2_REG, reg_split_addr | permission_mask );
}
static inline void esp_memprot_iram0_get_split_sgnf_bits(bool *lw, bool *lr, bool *lx, bool *hw, bool *hr, bool *hx)
{
*lw = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_2_REG, DPORT_PMS_PRO_IRAM0_SRAM_4_L_W );
*lr = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_2_REG, DPORT_PMS_PRO_IRAM0_SRAM_4_L_R );
*lx = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_2_REG, DPORT_PMS_PRO_IRAM0_SRAM_4_L_F );
*hw = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_2_REG, DPORT_PMS_PRO_IRAM0_SRAM_4_H_W );
*hr = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_2_REG, DPORT_PMS_PRO_IRAM0_SRAM_4_H_R );
*hx = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_IRAM0_2_REG, DPORT_PMS_PRO_IRAM0_SRAM_4_H_F );
}
/**
* === DRAM0 ====
*/
#define DRAM0_TOTAL_UNI_BLOCKS 4
#define DRAM0_UNI_BLOCK_0 0
#define DRAM0_UNI_BLOCK_1 1
#define DRAM0_UNI_BLOCK_2 2
#define DRAM0_UNI_BLOCK_3 3
#define DRAM0_SPL_BLOCK_BASE 0x3FFB0000
//unified management (SRAM blocks 0-3)
#define DRAM0_UNI_BLOCK_0_LOW 0x3FFB0000
#define DRAM0_UNI_BLOCK_0_HIGH 0x3FFB1FFF
#define DRAM0_UNI_BLOCK_1_LOW 0x3FFB2000
#define DRAM0_UNI_BLOCK_1_HIGH 0x3FFB3FFF
#define DRAM0_UNI_BLOCK_2_LOW 0x3FFB4000
#define DRAM0_UNI_BLOCK_2_HIGH 0x3FFB5FFF
#define DRAM0_UNI_BLOCK_3_LOW 0x3FFB6000
#define DRAM0_UNI_BLOCK_3_HIGH 0x3FFB7FFF
//split management (SRAM blocks 4-21)
#define DRAM0_SPL_BLOCK_LOW 0x3FFB8000 //block 4 low
#define DRAM0_SPL_BLOCK_HIGH 0x3FFFFFFF //block 21 high
#define DRAM0_SPLTADDR_MIN 0x3FFC0000 //block 6 low - minimum splitting address
//DRAM0 interrupt status bitmasks
#define DRAM0_INTR_ST_FAULTADDR_M 0x03FFFFC0 //(bits 25:6 in the reg)
#define DRAM0_INTR_ST_FAULTADDR_S 0x4 //(bits 21:2 of real address)
#define DRAM0_INTR_ST_FAULTADDR_HI 0x3FF00000 //(high nonsignificant bits 31:22 of the faulting address - constant)
#define DRAM0_INTR_ST_OP_RW_BIT BIT(4) //read: 0, write: 1
#define DRAM0_INTR_ST_OP_ATOMIC_BIT BIT(5) //non-atomic: 0, atomic: 1
static inline uint32_t esp_memprot_dram0_get_intr_source_num(void)
{
return ETS_PMS_PRO_DRAM0_ILG_INTR_SOURCE;
}
static inline void esp_memprot_dram0_intr_ena(bool enable)
{
if ( enable ) {
DPORT_SET_PERI_REG_MASK( DPORT_PMS_PRO_DRAM0_3_REG, DPORT_PMS_PRO_DRAM0_ILG_EN );
} else {
DPORT_CLEAR_PERI_REG_MASK( DPORT_PMS_PRO_DRAM0_3_REG, DPORT_PMS_PRO_DRAM0_ILG_EN );
}
}
static inline bool esp_memprot_dram0_is_assoc_intr(void)
{
return DPORT_GET_PERI_REG_MASK(DPORT_PMS_PRO_DRAM0_3_REG, DPORT_PMS_PRO_DRAM0_ILG_INTR) > 0;
}
static inline void esp_memprot_dram0_clear_intr(void)
{
DPORT_SET_PERI_REG_MASK(DPORT_PMS_PRO_DRAM0_3_REG, DPORT_PMS_PRO_DRAM0_ILG_CLR);
}
static inline uint32_t esp_memprot_dram0_get_intr_ena_bit(void)
{
return DPORT_REG_GET_FIELD(DPORT_PMS_PRO_DRAM0_3_REG, DPORT_PMS_PRO_DRAM0_ILG_EN);
}
static inline uint32_t esp_memprot_dram0_get_intr_on_bit(void)
{
return DPORT_REG_GET_FIELD(DPORT_PMS_PRO_DRAM0_3_REG, DPORT_PMS_PRO_DRAM0_ILG_INTR);
}
static inline uint32_t esp_memprot_dram0_get_intr_clr_bit(void)
{
return DPORT_REG_GET_FIELD(DPORT_PMS_PRO_DRAM0_3_REG, DPORT_PMS_PRO_DRAM0_ILG_CLR);
}
static inline uint32_t esp_memprot_dram0_get_lock_bit(void)
{
return DPORT_REG_GET_FIELD(DPORT_PMS_PRO_DRAM0_0_REG, DPORT_PMS_PRO_DRAM0_LOCK);
}
static inline void esp_memprot_dram0_get_uni_block_sgnf_bits(uint32_t block, uint32_t *write_bit, uint32_t *read_bit)
{
assert(block < DRAM0_TOTAL_UNI_BLOCKS);
switch ( block ) {
case DRAM0_UNI_BLOCK_0:
*write_bit = DPORT_PMS_PRO_DRAM0_SRAM_0_W;
*read_bit = DPORT_PMS_PRO_DRAM0_SRAM_0_R;
break;
case DRAM0_UNI_BLOCK_1:
*write_bit = DPORT_PMS_PRO_DRAM0_SRAM_1_W;
*read_bit = DPORT_PMS_PRO_DRAM0_SRAM_1_R;
break;
case DRAM0_UNI_BLOCK_2:
*write_bit = DPORT_PMS_PRO_DRAM0_SRAM_2_W;
*read_bit = DPORT_PMS_PRO_DRAM0_SRAM_2_R;
break;
case DRAM0_UNI_BLOCK_3:
*write_bit = DPORT_PMS_PRO_DRAM0_SRAM_3_W;
*read_bit = DPORT_PMS_PRO_DRAM0_SRAM_3_R;
break;
default:
abort();
}
}
static inline void esp_memprot_dram0_set_uni_block_perm(uint32_t block, bool write_perm, bool read_perm)
{
assert(block < DRAM0_TOTAL_UNI_BLOCKS);
uint32_t write_bit, read_bit;
esp_memprot_dram0_get_uni_block_sgnf_bits(block, &write_bit, &read_bit);
if ( write_perm ) {
DPORT_SET_PERI_REG_MASK( DPORT_PMS_PRO_DRAM0_1_REG, write_bit );
} else {
DPORT_CLEAR_PERI_REG_MASK( DPORT_PMS_PRO_DRAM0_1_REG, write_bit );
}
if ( read_perm ) {
DPORT_SET_PERI_REG_MASK( DPORT_PMS_PRO_DRAM0_1_REG, read_bit );
} else {
DPORT_CLEAR_PERI_REG_MASK( DPORT_PMS_PRO_DRAM0_1_REG, read_bit );
}
}
static inline uint32_t esp_memprot_dram0_get_uni_block_read_bit(uint32_t block)
{
assert(block < DRAM0_TOTAL_UNI_BLOCKS);
switch ( block ) {
case DRAM0_UNI_BLOCK_0:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_0_R );
case DRAM0_UNI_BLOCK_1:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_1_R );
case DRAM0_UNI_BLOCK_2:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_2_R );
case DRAM0_UNI_BLOCK_3:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_3_R );
default:
abort();
}
}
static inline uint32_t esp_memprot_dram0_get_uni_block_write_bit(uint32_t block)
{
assert(block < DRAM0_TOTAL_UNI_BLOCKS);
switch ( block ) {
case DRAM0_UNI_BLOCK_0:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_0_W );
case DRAM0_UNI_BLOCK_1:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_1_W );
case DRAM0_UNI_BLOCK_2:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_2_W );
case DRAM0_UNI_BLOCK_3:
return DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_3_W );
default:
abort();
}
}
static inline uint32_t esp_memprot_dram0_get_lock_reg(void)
{
return DPORT_READ_PERI_REG(DPORT_PMS_PRO_DRAM0_0_REG);
}
//lock resets automatically on CPU restart
static inline void esp_memprot_dram0_set_lock(void)
{
DPORT_WRITE_PERI_REG( DPORT_PMS_PRO_DRAM0_0_REG, DPORT_PMS_PRO_DRAM0_LOCK);
}
static inline uint32_t esp_memprot_dram0_get_perm_reg(void)
{
return DPORT_READ_PERI_REG(DPORT_PMS_PRO_DRAM0_1_REG);
}
static inline uint32_t esp_memprot_dram0_get_ena_reg(void)
{
return DPORT_READ_PERI_REG(DPORT_PMS_PRO_DRAM0_3_REG);
}
static inline uint32_t esp_memprot_dram0_get_fault_reg(void)
{
return DPORT_READ_PERI_REG(DPORT_PMS_PRO_DRAM0_4_REG);
}
static inline void esp_memprot_dram0_get_fault_status(uint32_t **faulting_address, uint32_t *op_type, uint32_t *op_subtype)
{
uint32_t status_bits = esp_memprot_dram0_get_fault_reg();
uint32_t fault_addr = (status_bits & DRAM0_INTR_ST_FAULTADDR_M) >> DRAM0_INTR_ST_FAULTADDR_S;
*faulting_address = (uint32_t *)(fault_addr | DRAM0_INTR_ST_FAULTADDR_HI);
*op_type = (uint32_t)status_bits & DRAM0_INTR_ST_OP_RW_BIT;
*op_subtype = (uint32_t)status_bits & DRAM0_INTR_ST_OP_ATOMIC_BIT;
}
static inline void esp_memprot_dram0_set_prot(uint32_t *split_addr, bool lw, bool lr, bool hw, bool hr)
{
uint32_t addr = (uint32_t)split_addr;
//low boundary check provided by LD script. see comment in esp_memprot_iram0_set_prot()
assert( addr <= DRAM0_SPL_BLOCK_HIGH );
//set low region
int uni_blocks_low = -1;
if ( addr >= DRAM0_UNI_BLOCK_0_LOW ) {
uni_blocks_low++;
}
if ( addr >= DRAM0_UNI_BLOCK_1_LOW ) {
uni_blocks_low++;
}
if ( addr >= DRAM0_UNI_BLOCK_2_LOW ) {
uni_blocks_low++;
}
if ( addr >= DRAM0_UNI_BLOCK_3_LOW ) {
uni_blocks_low++;
}
//set unified mgmt region
uint32_t write_bit, read_bit, uni_block_perm = 0;
for ( size_t x = 0; x < DRAM0_TOTAL_UNI_BLOCKS; x++ ) {
esp_memprot_dram0_get_uni_block_sgnf_bits(x, &write_bit, &read_bit);
if ( x <= uni_blocks_low ) {
if (lw) {
uni_block_perm |= write_bit;
}
if (lr) {
uni_block_perm |= read_bit;
}
} else {
if (hw) {
uni_block_perm |= write_bit;
}
if (hr) {
uni_block_perm |= read_bit;
}
}
}
//check split address is WORD aligned
uint32_t reg_split_addr = addr >> 2;
assert(addr == (reg_split_addr << 2));
//shift aligned split address to proper bit offset
reg_split_addr = (reg_split_addr << DPORT_PMS_PRO_DRAM0_SRAM_4_SPLTADDR_S) & DPORT_PMS_PRO_DRAM0_SRAM_4_SPLTADDR_M;
//prepare high & low permission mask
uint32_t permission_mask = 0;
if (lw) {
permission_mask |= DPORT_PMS_PRO_DRAM0_SRAM_4_L_W;
}
if (lr) {
permission_mask |= DPORT_PMS_PRO_DRAM0_SRAM_4_L_R;
}
if (hw) {
permission_mask |= DPORT_PMS_PRO_DRAM0_SRAM_4_H_W;
}
if (hr) {
permission_mask |= DPORT_PMS_PRO_DRAM0_SRAM_4_H_R;
}
//write configuration to DPORT_PMS_PRO_DRAM0_1_REG
DPORT_WRITE_PERI_REG(DPORT_PMS_PRO_DRAM0_1_REG, reg_split_addr | permission_mask | uni_block_perm);
}
static inline void esp_memprot_dram0_get_split_sgnf_bits(bool *lw, bool *lr, bool *hw, bool *hr)
{
*lw = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_4_L_W );
*lr = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_4_L_R );
*hw = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_4_H_W );
*hr = DPORT_REG_GET_FIELD( DPORT_PMS_PRO_DRAM0_1_REG, DPORT_PMS_PRO_DRAM0_SRAM_4_H_R );
}
#ifdef __cplusplus
}
#endif

View file

@ -53,6 +53,12 @@ const soc_memory_type_desc_t soc_memory_types[] = {
{ "SPIRAM", { MALLOC_CAP_SPIRAM|MALLOC_CAP_DEFAULT, 0, MALLOC_CAP_8BIT|MALLOC_CAP_32BIT}, false, false},
};
#ifdef CONFIG_ESP32S2_MEMPROT_FEATURE
#define SOC_MEMORY_TYPE_DEFAULT 0
#else
#define SOC_MEMORY_TYPE_DEFAULT 2
#endif
const size_t soc_memory_type_count = sizeof(soc_memory_types)/sizeof(soc_memory_type_desc_t);
/*
@ -67,41 +73,41 @@ const soc_memory_region_t soc_memory_regions[] = {
#endif
#if CONFIG_ESP32S2_INSTRUCTION_CACHE_8KB
#if CONFIG_ESP32S2_DATA_CACHE_0KB
{ 0x3FFB2000, 0x2000, 2, 0x40022000}, //Block 1, can be use as I/D cache memory
{ 0x3FFB4000, 0x2000, 2, 0x40024000}, //Block 2, can be use as D cache memory
{ 0x3FFB6000, 0x2000, 2, 0x40026000}, //Block 3, can be use as D cache memory
{ 0x3FFB2000, 0x2000, SOC_MEMORY_TYPE_DEFAULT, 0x40022000}, //Block 1, can be use as I/D cache memory
{ 0x3FFB4000, 0x2000, SOC_MEMORY_TYPE_DEFAULT, 0x40024000}, //Block 2, can be use as D cache memory
{ 0x3FFB6000, 0x2000, SOC_MEMORY_TYPE_DEFAULT, 0x40026000}, //Block 3, can be use as D cache memory
#elif CONFIG_ESP32S2_DATA_CACHE_8KB
{ 0x3FFB4000, 0x2000, 2, 0x40024000}, //Block 2, can be use as D cache memory
{ 0x3FFB6000, 0x2000, 2, 0x40026000}, //Block 3, can be use as D cache memory
{ 0x3FFB4000, 0x2000, SOC_MEMORY_TYPE_DEFAULT, 0x40024000}, //Block 2, can be use as D cache memory
{ 0x3FFB6000, 0x2000, SOC_MEMORY_TYPE_DEFAULT, 0x40026000}, //Block 3, can be use as D cache memory
#else
{ 0x3FFB6000, 0x2000, 2, 0x40026000}, //Block 3, can be use as D cache memory
{ 0x3FFB6000, 0x2000, SOC_MEMORY_TYPE_DEFAULT, 0x40026000}, //Block 3, can be use as D cache memory
#endif
#else
#if CONFIG_ESP32S2_DATA_CACHE_0KB
{ 0x3FFB4000, 0x2000, 2, 0x40024000}, //Block 2, can be use as D cache memory
{ 0x3FFB6000, 0x2000, 2, 0x40026000}, //Block 3, can be use as D cache memory
{ 0x3FFB4000, 0x2000, SOC_MEMORY_TYPE_DEFAULT, 0x40024000}, //Block SOC_MEMORY_TYPE_DEFAULT, can be use as D cache memory
{ 0x3FFB6000, 0x2000, SOC_MEMORY_TYPE_DEFAULT, 0x40026000}, //Block 3, can be use as D cache memory
#elif CONFIG_ESP32S2_DATA_CACHE_8KB
{ 0x3FFB6000, 0x2000, 2, 0x40026000}, //Block 3, can be use as D cache memory
{ 0x3FFB6000, 0x2000, SOC_MEMORY_TYPE_DEFAULT, 0x40026000}, //Block 3, can be use as D cache memory
#endif
#endif
{ 0x3FFB8000, 0x4000, 2, 0x40028000}, //Block 4, can be remapped to ROM, can be used as trace memory
{ 0x3FFBC000, 0x4000, 2, 0x4002C000}, //Block 5, can be remapped to ROM, can be used as trace memory
{ 0x3FFC0000, 0x4000, 2, 0x40030000}, //Block 6, can be used as trace memory
{ 0x3FFC4000, 0x4000, 2, 0x40034000}, //Block 7, can be used as trace memory
{ 0x3FFC8000, 0x4000, 2, 0x40038000}, //Block 8, can be used as trace memory
{ 0x3FFCC000, 0x4000, 2, 0x4003C000}, //Block 9, can be used as trace memory
{ 0x3FFB8000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40028000}, //Block 4, can be remapped to ROM, can be used as trace memory
{ 0x3FFBC000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x4002C000}, //Block 5, can be remapped to ROM, can be used as trace memory
{ 0x3FFC0000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40030000}, //Block 6, can be used as trace memory
{ 0x3FFC4000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40034000}, //Block 7, can be used as trace memory
{ 0x3FFC8000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40038000}, //Block 8, can be used as trace memory
{ 0x3FFCC000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x4003C000}, //Block 9, can be used as trace memory
{ 0x3FFD0000, 0x4000, 2, 0x40040000}, //Block 10, can be used as trace memory
{ 0x3FFD4000, 0x4000, 2, 0x40044000}, //Block 11, can be used as trace memory
{ 0x3FFD8000, 0x4000, 2, 0x40048000}, //Block 12, can be used as trace memory
{ 0x3FFDC000, 0x4000, 2, 0x4004C000}, //Block 13, can be used as trace memory
{ 0x3FFE0000, 0x4000, 2, 0x40050000}, //Block 14, can be used as trace memory
{ 0x3FFE4000, 0x4000, 2, 0x40054000}, //Block 15, can be used as trace memory
{ 0x3FFE8000, 0x4000, 2, 0x40058000}, //Block 16, can be used as trace memory
{ 0x3FFEC000, 0x4000, 2, 0x4005C000}, //Block 17, can be used as trace memory
{ 0x3FFF0000, 0x4000, 2, 0x40060000}, //Block 18, can be used for MAC dump, can be used as trace memory
{ 0x3FFF4000, 0x4000, 2, 0x40064000}, //Block 19, can be used for MAC dump, can be used as trace memory
{ 0x3FFF8000, 0x4000, 2, 0x40068000}, //Block 20, can be used for MAC dump, can be used as trace memory
{ 0x3FFD0000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40040000}, //Block 10, can be used as trace memory
{ 0x3FFD4000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40044000}, //Block 11, can be used as trace memory
{ 0x3FFD8000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40048000}, //Block 12, can be used as trace memory
{ 0x3FFDC000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x4004C000}, //Block 13, can be used as trace memory
{ 0x3FFE0000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40050000}, //Block 14, can be used as trace memory
{ 0x3FFE4000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40054000}, //Block 15, can be used as trace memory
{ 0x3FFE8000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40058000}, //Block 16, can be used as trace memory
{ 0x3FFEC000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x4005C000}, //Block 17, can be used as trace memory
{ 0x3FFF0000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40060000}, //Block 18, can be used for MAC dump, can be used as trace memory
{ 0x3FFF4000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40064000}, //Block 19, can be used for MAC dump, can be used as trace memory
{ 0x3FFF8000, 0x4000, SOC_MEMORY_TYPE_DEFAULT, 0x40068000}, //Block 20, can be used for MAC dump, can be used as trace memory
{ 0x3FFFC000, 0x4000, 1, 0x4006C000}, //Block 21, can be used for MAC dump, can be used as trace memory, used for startup stack
};

View file

@ -29,7 +29,7 @@ extern soc_reserved_region_t soc_reserved_memory_region_end;
static size_t s_get_num_reserved_regions(void)
{
return ( &soc_reserved_memory_region_end
- &soc_reserved_memory_region_start );
- &soc_reserved_memory_region_start );
}
size_t soc_get_available_memory_region_max_count(void)
@ -63,20 +63,19 @@ static void s_prepare_reserved_regions(soc_reserved_region_t *reserved, size_t c
&soc_reserved_memory_region_start,
&soc_reserved_memory_region_end);
ESP_EARLY_LOGD(TAG, "Checking %d reserved memory ranges:", count);
for (size_t i = 0; i < count; i++)
{
for (size_t i = 0; i < count; i++) {
ESP_EARLY_LOGD(TAG, "Reserved memory range 0x%08x - 0x%08x",
reserved[i].start, reserved[i].end);
reserved[i].start = reserved[i].start & ~3; /* expand all reserved areas to word boundaries */
reserved[i].end = (reserved[i].end + 3) & ~3;
assert(reserved[i].start < reserved[i].end);
if (i < count - 1) {
assert(reserved[i+1].start > reserved[i].start);
if (reserved[i].end > reserved[i+1].start) {
assert(reserved[i + 1].start > reserved[i].start);
if (reserved[i].end > reserved[i + 1].start) {
ESP_EARLY_LOGE(TAG, "SOC_RESERVE_MEMORY_REGION region range " \
"0x%08x - 0x%08x overlaps with 0x%08x - 0x%08x",
reserved[i].start, reserved[i].end, reserved[i+1].start,
reserved[i+1].end);
reserved[i].start, reserved[i].end, reserved[i + 1].start,
reserved[i + 1].end);
abort();
}
}
@ -101,7 +100,7 @@ size_t soc_get_available_memory_regions(soc_memory_region_t *regions)
region, and then copy them to an out_region once trimmed
*/
ESP_EARLY_LOGD(TAG, "Building list of available memory regions:");
while(in_region != in_regions + soc_memory_region_count) {
while (in_region != in_regions + soc_memory_region_count) {
soc_memory_region_t in = *in_region;
ESP_EARLY_LOGV(TAG, "Examining memory region 0x%08x - 0x%08x", in.start, in.start + in.size);
intptr_t in_start = in.start;
@ -113,21 +112,18 @@ size_t soc_get_available_memory_regions(soc_memory_region_t *regions)
if (reserved[i].end <= in_start) {
/* reserved region ends before 'in' starts */
continue;
}
else if (reserved[i].start >= in_end) {
} else if (reserved[i].start >= in_end) {
/* reserved region starts after 'in' ends */
break;
}
else if (reserved[i].start <= in_start &&
reserved[i].end >= in_end) { /* reserved covers all of 'in' */
} else if (reserved[i].start <= in_start &&
reserved[i].end >= in_end) { /* reserved covers all of 'in' */
ESP_EARLY_LOGV(TAG, "Region 0x%08x - 0x%08x inside of reserved 0x%08x - 0x%08x",
in_start, in_end, reserved[i].start, reserved[i].end);
/* skip 'in' entirely */
copy_in_to_out = false;
break;
}
else if (in_start < reserved[i].start &&
in_end > reserved[i].end) { /* reserved contained inside 'in', need to "hole punch" */
} else if (in_start < reserved[i].start &&
in_end > reserved[i].end) { /* reserved contained inside 'in', need to "hole punch" */
ESP_EARLY_LOGV(TAG, "Region 0x%08x - 0x%08x contains reserved 0x%08x - 0x%08x",
in_start, in_end, reserved[i].start, reserved[i].end);
assert(in_start < reserved[i].start);
@ -145,15 +141,13 @@ size_t soc_get_available_memory_regions(soc_memory_region_t *regions)
/* add first region, then re-run while loop with the updated in_region */
move_to_next = false;
break;
}
else if (reserved[i].start <= in_start) { /* reserved overlaps start of 'in' */
} else if (reserved[i].start <= in_start) { /* reserved overlaps start of 'in' */
ESP_EARLY_LOGV(TAG, "Start of region 0x%08x - 0x%08x overlaps reserved 0x%08x - 0x%08x",
in_start, in_end, reserved[i].start, reserved[i].end);
in.start = reserved[i].end;
in_start = in.start;
in.size = in_end - in_start;
}
else { /* reserved overlaps end of 'in' */
} else { /* reserved overlaps end of 'in' */
ESP_EARLY_LOGV(TAG, "End of region 0x%08x - 0x%08x overlaps reserved 0x%08x - 0x%08x",
in_start, in_end, reserved[i].start, reserved[i].end);
in_end = reserved[i].start;
@ -161,6 +155,11 @@ size_t soc_get_available_memory_regions(soc_memory_region_t *regions)
}
}
/* ignore regions smaller than 16B */
if (in.size <= 16) {
copy_in_to_out = false;
}
if (copy_in_to_out) {
ESP_EARLY_LOGD(TAG, "Available memory region 0x%08x - 0x%08x", in.start, in.start + in.size);
*out_region++ = in;