Heap tracing support

This commit is contained in:
Angus Gratton 2017-05-11 17:56:17 +10:00
parent 64bfdf56bb
commit 1c7b8aa3a5
12 changed files with 716 additions and 44 deletions

View file

@ -189,11 +189,7 @@ extern "C" {
#endif
#ifndef INCLUDE_pcTaskGetTaskName
#if ( configENABLE_MEMORY_DEBUG == 1)
#define INCLUDE_pcTaskGetTaskName 1
#else
#define INCLUDE_pcTaskGetTaskName 0
#endif
#endif
#ifndef configUSE_APPLICATION_TASK_TAG

View file

@ -231,12 +231,6 @@
#define INCLUDE_pcTaskGetTaskName 1
#define INCLUDE_xTaskGetIdleTaskHandle 1
#if CONFIG_ENABLE_MEMORY_DEBUG
#define configENABLE_MEMORY_DEBUG 1
#else
#define configENABLE_MEMORY_DEBUG 0
#endif
#define INCLUDE_xSemaphoreGetMutexHolder 1
/* The priority at which the tick interrupt runs. This should probably be

View file

@ -123,6 +123,7 @@ extern "C" {
#endif
#include "mpu_wrappers.h"
#include "esp_system.h"
/*
* Setup the stack of a new task so it is ready to be placed under the
@ -143,11 +144,10 @@ extern "C" {
* non-FreeRTOS-specific code, and behave the same as
* pvPortMalloc()/vPortFree().
*/
void *pvPortMalloc( size_t xSize ) PRIVILEGED_FUNCTION;
void vPortFree( void *pv ) PRIVILEGED_FUNCTION;
void vPortInitialiseBlocks( void ) PRIVILEGED_FUNCTION;
size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION;
size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION;
#define pvPortMalloc malloc
#define vPortFree free
#define xPortGetFreeHeapSize esp_get_free_heap_size
#define xPortGetMinimumEverFreeHeapSize esp_get_minimum_free_heap_size
/*
* Setup the hardware ready for the scheduler to take control. This generally

View file

@ -382,30 +382,3 @@ void vPortSetStackWatchpoint( void* pxStackStart ) {
uint32_t xPortGetTickRateHz(void) {
return (uint32_t)configTICK_RATE_HZ;
}
/* Heap functions, wrappers around heap_caps_xxx functions
NB: libc malloc() & free() are also defined & available
for this purpose.
*/
void *pvPortMalloc( size_t xWantedSize )
{
return heap_caps_malloc(xWantedSize, MALLOC_CAP_8BIT);
}
void vPortFree( void *pv )
{
return heap_caps_free(pv);
}
size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION
{
return heap_caps_get_free_size( MALLOC_CAP_8BIT );
}
size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION
{
return heap_caps_get_minimum_free_size( MALLOC_CAP_8BIT );
}

View file

@ -27,4 +27,24 @@ config HEAP_POISONING_COMPREHENSIVE
bool "Comprehensive"
endchoice
config HEAP_TRACING
bool "Enable heap tracing"
help
Enables the heap tracing API defined in esp_heap_trace.h.
This function causes a moderate increase in IRAM code side and a minor increase in heap function
(malloc/free/realloc) CPU overhead, even when the tracing feature is not used. So it's best to keep it disabled
unless tracing is being used.
config HEAP_TRACING_STACK_DEPTH
int "Heap tracing stack depth"
range 0 10
default 2
depends on HEAP_TRACING
help
Number of stack frames to save when tracing heap operation callers.
More stack frames uses more memory in the heap trace buffer (and slows down allocation), but
can provide useful information.
endmenu

View file

@ -1,3 +1,18 @@
#
# Component Makefile
#
COMPONENT_OBJS := heap_caps_init.o heap_caps.o multi_heap.o heap_trace.o
ifndef CONFIG_HEAP_POISONING_DISABLED
COMPONENT_OBJS += multi_heap_poisoning.o
endif
ifdef CONFIG_HEAP_TRACING
WRAP_FUNCTIONS = calloc malloc free realloc heap_caps_malloc heap_caps_free heap_caps_realloc
WRAP_ARGUMENT := -Wl,--wrap=
COMPONENT_ADD_LDFLAGS = -l$(COMPONENT_NAME) $(addprefix $(WRAP_ARGUMENT),$(WRAP_FUNCTIONS))
endif

View file

@ -0,0 +1,406 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string.h>
#include <sys/param.h>
#include <sdkconfig.h>
#define HEAP_TRACE_SRCFILE /* don't warn on inclusion here */
#include "esp_heap_trace.h"
#undef HEAP_TRACE_SRCFILE
#include "esp_heap_caps.h"
#include "esp_attr.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "soc/soc_memory_layout.h"
#define STACK_DEPTH CONFIG_HEAP_TRACING_STACK_DEPTH
static portMUX_TYPE trace_mux = portMUX_INITIALIZER_UNLOCKED;
static bool tracing;
static heap_trace_mode_t mode;
/* Buffer used for records, starting at offset 0
*/
static heap_trace_record_t *buffer;
static size_t total_records;
/* Count of entries logged in the buffer.
Maximum total_records
*/
static size_t count;
/* Actual number of allocations logged */
static size_t total_allocations;
/* Actual number of frees logged */
static size_t total_frees;
/* Has the buffer overflowed and lost trace entries? */
static bool has_overflowed = false;
esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records)
{
#ifndef CONFIG_HEAP_TRACING
return ESP_ERR_NOT_SUPPORTED;
#endif
if (tracing) {
return ESP_ERR_INVALID_STATE;
}
buffer = record_buffer;
total_records = num_records;
memset(buffer, 0, num_records * sizeof(heap_trace_record_t));
return ESP_OK;
}
esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
{
#ifndef CONFIG_HEAP_TRACING
return ESP_ERR_NOT_SUPPORTED;
#endif
if (buffer == NULL || total_records == 0) {
return ESP_ERR_INVALID_STATE;
}
taskENTER_CRITICAL(&trace_mux);
tracing = false;
mode = mode_param;
count = 0;
total_allocations = 0;
total_frees = 0;
has_overflowed = false;
heap_trace_resume();
taskEXIT_CRITICAL(&trace_mux);
return ESP_OK;
}
static esp_err_t set_tracing(bool enable)
{
#ifndef CONFIG_HEAP_TRACING
return ESP_ERR_NOT_SUPPORTED;
#endif
if (tracing == enable) {
return ESP_ERR_INVALID_STATE;
}
tracing = enable;
return ESP_OK;
}
esp_err_t heap_trace_stop(void)
{
return set_tracing(false);
}
esp_err_t heap_trace_resume(void)
{
return set_tracing(true);
}
size_t heap_trace_get_count(void)
{
return count;
}
esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record)
{
#ifndef CONFIG_HEAP_TRACING
return ESP_ERR_NOT_SUPPORTED;
#endif
if (record == NULL) {
return ESP_ERR_INVALID_STATE;
}
esp_err_t result = ESP_OK;
taskENTER_CRITICAL(&trace_mux);
if (index >= count) {
result = ESP_ERR_INVALID_ARG; /* out of range for 'count' */
} else {
memcpy(record, &buffer[index], sizeof(heap_trace_record_t));
}
taskEXIT_CRITICAL(&trace_mux);
return result;
}
void heap_trace_dump(void)
{
#ifndef CONFIG_HEAP_TRACING
printf("no data, heap tracing is disabled.\n");
return;
#endif
size_t delta_size = 0;
size_t delta_allocs = 0;
printf("%u allocations trace (%u entry buffer)\n",
count, total_records);
size_t start_count = count;
for (int i = 0; i < count; i++) {
heap_trace_record_t *rec = &buffer[i];
if (rec->address != NULL) {
printf("%d bytes (@ %p) allocated CPU %d ccount 0x%08x caller ",
rec->size, rec->address, rec->ccount & 1, rec->ccount & ~3);
for (int j = 0; j < STACK_DEPTH && rec->alloced_by[j] != 0; j++) {
printf("%p%s", rec->alloced_by[j],
(j < STACK_DEPTH - 1) ? ":" : "");
}
if (mode != HEAP_TRACE_ALL || STACK_DEPTH == 0 || rec->freed_by[0] == NULL) {
delta_size += rec->size;
delta_allocs++;
printf("\n");
} else {
printf("\nfreed by ");
for (int j = 0; j < STACK_DEPTH; j++) {
printf("%p%s", rec->freed_by[j],
(j < STACK_DEPTH - 1) ? ":" : "\n");
}
}
}
}
if (mode == HEAP_TRACE_ALL) {
printf("%u bytes alive in trace (%u/%u allocations)\n",
delta_size, delta_allocs, heap_trace_get_count());
} else {
printf("%u bytes 'leaked' in trace (%u allocations)\n", delta_size, delta_allocs);
}
printf("total allocations %u total frees %u\n", total_allocations, total_frees);
if (start_count != count) { // only a problem if trace isn't stopped before dumping
printf("(NB: New entries were traced while dumping, so trace dump may have duplicate entries.)\n");
}
if (has_overflowed) {
printf("(NB: Buffer has overflowed, so trace data is incomplete.)\n");
}
}
/* Add a new allocation to the heap trace records */
static IRAM_ATTR void record_allocation(const heap_trace_record_t *record)
{
taskENTER_CRITICAL(&trace_mux);
if (tracing) {
if (count == total_records) {
has_overflowed = true;
/* Move the whole buffer back one slot.
This is a bit slow, compared to treating this buffer as a ringbuffer and rotating a head pointer.
However, ringbuffer code gets tricky when we remove elements in mid-buffer (for leak trace mode) while
trying to keep track of an item count that may overflow.
*/
memmove(&buffer[0], &buffer[1], sizeof(heap_trace_record_t) * (total_records -1));
count--;
}
// Copy new record into place
memcpy(&buffer[count], record, sizeof(heap_trace_record_t));
count++;
total_allocations++;
}
taskEXIT_CRITICAL(&trace_mux);
}
// remove a record, used when freeing
static void remove_record(int index);
/* record a free event in the heap trace log
For HEAP_TRACE_ALL, this means filling in the freed_by pointer.
For HEAP_TRACE_LEAKS, this means removing the record from the log.
*/
static IRAM_ATTR void record_free(void *p, void **callers)
{
taskENTER_CRITICAL(&trace_mux);
if (tracing && count > 0) {
total_frees++;
/* search backwards for the allocation record matching this free */
int i;
for (i = count - 1; i >= 0; i--) {
if (buffer[i].address == p) {
break;
}
}
if (i >= 0) {
if (mode == HEAP_TRACE_ALL) {
memcpy(buffer[i].freed_by, callers, sizeof(void *) * STACK_DEPTH);
} else { // HEAP_TRACE_LEAKS
// Leak trace mode, once an allocation is freed we remove it from the list
remove_record(i);
}
}
}
taskEXIT_CRITICAL(&trace_mux);
}
/* remove the entry at 'index' from the ringbuffer of saved records */
static IRAM_ATTR void remove_record(int index)
{
if (index < count - 1) {
// Remove the buffer entry from the list
memmove(&buffer[index], &buffer[index+1],
sizeof(heap_trace_record_t) * (total_records - index - 1));
} else {
// For last element, just zero it out to avoid ambiguity
memset(&buffer[index], 0, sizeof(heap_trace_record_t));
}
count--;
}
/* Encode the CPU ID in the LSB of the ccount value */
inline static uint32_t get_ccount(void)
{
uint32_t ccount = xthal_get_ccount() & ~3;
#ifndef CONFIG_FREERTOS_UNICORE
ccount |= xPortGetCoreID();
#endif
return ccount;
}
#define TEST_STACK(N) do { \
if (STACK_DEPTH == N) { \
return; \
} \
callers[N] = __builtin_return_address(N+offset); \
if (!esp_ptr_executable(callers[N])) { \
return; \
} \
} while(0);
/* Static function to read the call stack for a traced heap call.
Calls to __builtin_return_address are "unrolled" via TEST_STACK macro as gcc requires the
argument to be a compile-time constant.
*/
static IRAM_ATTR __attribute__((noinline)) void get_call_stack(void **callers)
{
const int offset = 2; // Caller is 2 stack frames deeper than we care about
bzero(callers, sizeof(void *) * STACK_DEPTH);
TEST_STACK(0);
TEST_STACK(1);
TEST_STACK(2);
TEST_STACK(3);
TEST_STACK(4);
TEST_STACK(5);
TEST_STACK(6);
TEST_STACK(7);
TEST_STACK(8);
TEST_STACK(9);
}
_Static_assert(STACK_DEPTH >= 0 && STACK_DEPTH <= 10, "CONFIG_HEAP_TRACING_STACK_DEPTH must be in range 0-10");
void *__real_heap_caps_malloc(size_t size, uint32_t caps);
/* trace any 'malloc' event */
static IRAM_ATTR __attribute__((noinline)) void *trace_malloc(size_t size, uint32_t caps)
{
uint32_t ccount = get_ccount();
void *p = __real_heap_caps_malloc(size, caps);
if (tracing && p != NULL) {
heap_trace_record_t rec = {
.address = p,
.ccount = ccount,
.size = size,
};
get_call_stack(rec.alloced_by);
record_allocation(&rec);
}
return p;
}
void __real_heap_caps_free(void *p);
/* trace any 'free' event */
static IRAM_ATTR __attribute__((noinline)) void trace_free(void *p)
{
if (tracing && p != NULL) {
void *callers[STACK_DEPTH];
get_call_stack(callers);
record_free(p, callers);
}
__real_heap_caps_free(p);
}
void * __real_heap_caps_realloc(void *p, size_t size, uint32_t caps);
/* trace any 'realloc' event */
static IRAM_ATTR __attribute__((noinline)) void *trace_realloc(void *p, size_t size, uint32_t caps)
{
void *callers[STACK_DEPTH];
uint32_t ccount = get_ccount();
if (tracing && p != NULL && size == 0) {
get_call_stack(callers);
record_free(p, callers);
}
void *r = __real_heap_caps_realloc(p, size, caps);
if (tracing && r != NULL) {
get_call_stack(callers);
if (p != NULL) {
/* trace realloc as free-then-alloc */
record_free(p, callers);
}
heap_trace_record_t rec = {
.address = p,
.ccount = ccount,
.size = size,
};
memcpy(rec.alloced_by, callers, sizeof(heap_trace_record_t) * STACK_DEPTH);
record_allocation(&rec);
}
return r;
}
/* Note: this changes the behaviour of libc malloc/realloc/free a bit,
as they no longer go via the libc functions in ROM. But more or less
the same in the end. */
IRAM_ATTR void *__wrap_malloc(size_t size)
{
return trace_malloc(size, MALLOC_CAP_8BIT);
}
IRAM_ATTR void __wrap_free(void *p)
{
trace_free(p);
}
IRAM_ATTR void *__wrap_realloc(void *p, size_t size)
{
return trace_realloc(p, size, MALLOC_CAP_8BIT);
}
IRAM_ATTR void *__wrap_calloc(size_t nmemb, size_t size)
{
size = size * nmemb;
void *result = trace_malloc(size, MALLOC_CAP_8BIT);
if (result != NULL) {
memset(result, 0, size);
}
return result;
}
IRAM_ATTR void *__wrap_heap_caps_malloc(size_t size, uint32_t caps)
{
return trace_malloc(size, caps);
}
IRAM_ATTR void __wrap_heap_caps_free(void *p) __attribute__((alias("__wrap_free")));
IRAM_ATTR void *__wrap_heap_caps_realloc(void *p, size_t size, uint32_t caps)
{
return trace_realloc(p, size, caps);
}

View file

@ -0,0 +1,134 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "sdkconfig.h"
#include <stdint.h>
#include <esp_err.h>
#if !defined(CONFIG_HEAP_TRACING) && !defined(HEAP_TRACE_SRCFILE)
#warning "esp_heap_trace.h is included but heap tracing is disabled in menuconfig, functions are no-ops"
#endif
#ifndef CONFIG_HEAP_TRACING_STACK_DEPTH
#define CONFIG_HEAP_TRACING_STACK_DEPTH 0
#endif
typedef enum {
HEAP_TRACE_ALL,
HEAP_TRACE_LEAKS,
} heap_trace_mode_t;
typedef struct {
uint32_t ccount; ///< CCOUNT of the CPU when the allocation was made. LSB (bit value 1) is the CPU number (0 or 1). */
void *address; ///< Address which was allocated
size_t size; ///< Size of the allocation
void *alloced_by[CONFIG_HEAP_TRACING_STACK_DEPTH]; ///< Call stack of the caller which allocated the memory.
void *freed_by[CONFIG_HEAP_TRACING_STACK_DEPTH]; ///< Call stack of the caller which freed the memory (all zero if not freed.)
} heap_trace_record_t;
/**
* @brief Initialise heap tracing in standalone mode.
* @note Standalone mode is the only mode currently supported.
*
* This function must be called before any other heap tracing functions.
*
* To disable heap tracing and allow the buffer to be freed, stop tracing and then call heap_trace_init_standalone(NULL, 0);
*
* @param record_buffer Provide a buffer to use for heap trace data. Must remain valid any time heap tracing is enabled.
* @param num_records Size of the heap trace buffer, as number of record structures.
* @return
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
* - ESP_ERR_INVALID_STATE Heap tracing is currently in progress.
* - ESP_OK Heap tracing initialised successfully.
*/
esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records);
/**
* @brief Start heap tracing. All heap allocations & frees will be traced, until heap_trace_stop() is called.
*
* @note heap_trace_init_standalone() must be called to provide a valid buffer, before this function is called.
*
* @note Calling this function while heap tracing is running will reset the heap trace state and continue tracing.
*
* @param mode_param Mode for tracing.
* - HEAP_TRACE_ALL means all heap allocations and frees are traced.
* - HEAP_TRACE_LEAKS means only suspected memory leaks are traced (freeed memory is removed from the trace buffer.)
* @return
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
* - ESP_ERR_INVALID_STATE A non-zero-length buffer has not been set via heap_trace_init_standalone().
* - ESP_OK Tracing is started.
*/
esp_err_t heap_trace_start(heap_trace_mode_t mode);
/**
* @brief Stop heap tracing.
*
* @return
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
* - ESP_ERR_INVALID_STATE Heap tracing was not in progress.
* - ESP_OK Heap tracing stopped..
*/
esp_err_t heap_trace_stop(void);
/**
* @brief Resume heap tracing which was previously stopped.
*
* Unlike heap_trace_start(), this function does not clear the
* buffer of any pre-existing trace data.
*
* The heap trace mode is the same as when heap_trace_start() was
* last called (or HEAP_TRACE_ALL if heap_trace_start() was never called).
*
* @return
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
* - ESP_ERR_INVALID_STATE Heap tracing was already started.
* - ESP_OK Heap tracing resumed.
*/
esp_err_t heap_trace_resume(void);
/**
* @brief Return number of records in the heap trace buffer
*
* It is safe to call this function while heap tracing is running.
*/
size_t heap_trace_get_count(void);
/**
* @brief Return a raw record from the heap trace buffer
*
* It is safe to call this function while heap tracing is running, however in HEAP_TRACE_LEAK mode record indexing may
* skip entries unless heap tracing is stopped first.
*
* @param index Index (zero-based) of the record to return.
* @param[out] record Record where the heap trace data will be copied.
* @return
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
* - ESP_ERR_INVALID_STATE Heap tracing was not initialised.
* - ESP_ERR_INVALID_ARG Index is out of bounds for current heap trace record count.
* - ESP_OK Record returned successfully.
*/
esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record);
/**
* @brief Dump heap trace data to stdout
*
* It is safe to call this function while heap tracing is running, however in HEAP_TRACE_LEAK mode the dump may skip
* entries unless heap tracing is stopped first.
*
* Record data is dumped in summary form. Use a tool like :doc:`idf_monitor </get-started/idf-monitor>` to decode stack
* trace addresses to the line of code they represent.
*
*/
void heap_trace_dump(void);

View file

@ -46,7 +46,7 @@ multi_heap_handle_t multi_heap_register(void *start, size_t size)
__attribute__((alias("multi_heap_register_impl")));
void multi_get_heap_info(multi_heap_handle_t heap, multi_heap_info_t *info)
__attribute__((alias("multi_get_heap_info_impl")));
__attribute__((alias("multi_heap_get_info_impl")));
size_t multi_heap_free_size(multi_heap_handle_t heap)
__attribute__((alias("multi_heap_free_size_impl")));

View file

@ -0,0 +1,124 @@
/*
Generic test for heap tracing support
Only compiled in if CONFIG_HEAP_TRACING is set
*/
#include <esp_types.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "esp_heap_trace.h"
#include "sdkconfig.h"
#include "unity.h"
#ifdef CONFIG_HEAP_TRACING
// only compile in heap tracing tests if tracing is enabled
TEST_CASE("heap trace leak check", "[heap]")
{
heap_trace_record_t recs[8];
heap_trace_init_standalone(recs, 8);
printf("Leak check test\n"); // Print something before trace starts, or stdout allocations skew total counts
fflush(stdout);
heap_trace_start(HEAP_TRACE_LEAKS);
void *a = malloc(64);
memset(a, '3', 64);
void *b = malloc(96);
memset(b, '4', 11);
printf("a.address %p vs %p b.address %p vs %p\n", a, recs[0].address, b, recs[1].address);
heap_trace_dump();
TEST_ASSERT_EQUAL(2, heap_trace_get_count());
heap_trace_record_t trace_a, trace_b;
heap_trace_get(0, &trace_a);
heap_trace_get(1, &trace_b);
printf("trace_a.address %p trace_bb.address %p\n", trace_a.address, trace_b.address);
TEST_ASSERT_EQUAL_PTR(a, trace_a.address);
TEST_ASSERT_EQUAL_PTR(b, trace_b.address);
TEST_ASSERT_EQUAL_PTR(recs[0].address, trace_a.address);
TEST_ASSERT_EQUAL_PTR(recs[1].address, trace_b.address);
free(a);
TEST_ASSERT_EQUAL(1, heap_trace_get_count());
heap_trace_get(0, &trace_b);
TEST_ASSERT_EQUAL_PTR(b, trace_b.address);
/* buffer deletes trace_a when freed,
so trace_b at head of buffer */
TEST_ASSERT_EQUAL_PTR(recs[0].address, trace_b.address);
heap_trace_stop();
}
TEST_CASE("heap trace wrapped buffer check", "[heap]")
{
const size_t N = 8;
heap_trace_record_t recs[N];
heap_trace_init_standalone(recs, N);
heap_trace_start(HEAP_TRACE_LEAKS);
void *ptrs[N+1];
for (int i = 0; i < N+1; i++) {
ptrs[i] = malloc(i*3);
}
// becuase other mallocs happen as part of this control flow,
// we can't guarantee N entries of ptrs[] are in the heap check buffer.
// but we should guarantee at least the last one is
bool saw_last_ptr = false;
for (int i = 0; i < N; i++) {
heap_trace_record_t rec;
heap_trace_get(i, &rec);
if (rec.address == ptrs[N-1]) {
saw_last_ptr = true;
}
}
TEST_ASSERT(saw_last_ptr);
void *other = malloc(6);
heap_trace_dump();
for (int i = 0; i < N+1; i++) {
free(ptrs[i]);
}
heap_trace_dump();
bool saw_other = false;
for (int i = 0; i < heap_trace_get_count(); i++) {
heap_trace_record_t rec;
heap_trace_get(i, &rec);
// none of ptr[]s should be in the heap trace any more
for (int j = 0; j < N+1; j++) {
TEST_ASSERT_NOT_EQUAL(ptrs[j], rec.address);
}
if (rec.address == other) {
saw_other = true;
}
}
// 'other' pointer should be somewhere in the leak dump
TEST_ASSERT(saw_other);
heap_trace_stop();
}
#endif

View file

@ -58,7 +58,15 @@ typedef struct
extern const soc_reserved_region_t soc_reserved_regions[];
extern const size_t soc_reserved_region_count;
inline bool esp_ptr_dma_capable(const void *p)
inline static bool esp_ptr_dma_capable(const void *p)
{
return (intptr_t)p >= SOC_DMA_LOW && (intptr_t)p < SOC_DMA_HIGH;
}
inline static bool esp_ptr_executable(const void *p)
{
intptr_t ip = (intptr_t) p;
return (ip >= SOC_IROM_LOW && ip < SOC_IROM_HIGH)
|| (ip >= SOC_IRAM_LOW && ip < SOC_IRAM_HIGH)
|| (ip >= SOC_RTC_IRAM_LOW && ip < SOC_RTC_IRAM_HIGH);
}

View file

@ -275,6 +275,8 @@ CONFIG_TIMER_QUEUE_LENGTH=10
# CONFIG_HEAP_POISONING_DISABLED is not set
# CONFIG_HEAP_POISONING_LIGHT is not set
CONFIG_HEAP_POISONING_COMPREHENSIVE=y
CONFIG_HEAP_TRACING=y
CONFIG_HEAP_TRACING_STACK_DEPTH=2
#
# Log output