2016-09-14 16:53:33 +00:00
|
|
|
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
/*
|
2017-08-25 03:46:55 +00:00
|
|
|
* Log library implementation notes.
|
2016-09-14 16:53:33 +00:00
|
|
|
*
|
2016-12-31 17:54:40 +00:00
|
|
|
* Log library stores all tags provided to esp_log_level_set as a linked
|
2016-09-14 16:53:33 +00:00
|
|
|
* list. See uncached_tag_entry_t structure.
|
|
|
|
*
|
|
|
|
* To avoid looking up log level for given tag each time message is
|
|
|
|
* printed, this library caches pointers to tags. Because the suggested
|
|
|
|
* way of creating tags uses one 'TAG' constant per file, this caching
|
|
|
|
* should be effective. Cache is a binary min-heap of cached_tag_entry_t
|
|
|
|
* items, ordering is done on 'generation' member. In this context,
|
|
|
|
* generation is an integer which is incremented each time an operation
|
|
|
|
* with cache is performed. When cache is full, new item is inserted in
|
|
|
|
* place of an oldest item (that is, with smallest 'generation' value).
|
|
|
|
* After that, bubble-down operation is performed to fix ordering in the
|
|
|
|
* min-heap.
|
|
|
|
*
|
|
|
|
* The potential problem with wrap-around of cache generation counter is
|
|
|
|
* ignored for now. This will happen if someone happens to output more
|
|
|
|
* than 4 billion log entries, at which point wrap-around will not be
|
|
|
|
* the biggest problem.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef BOOTLOADER_BUILD
|
|
|
|
#include <freertos/FreeRTOS.h>
|
|
|
|
#include <freertos/FreeRTOSConfig.h>
|
|
|
|
#include <freertos/task.h>
|
2020-06-13 12:22:21 +00:00
|
|
|
#include <freertos/timers.h>
|
2016-09-14 16:53:33 +00:00
|
|
|
#include <freertos/semphr.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "esp_attr.h"
|
|
|
|
#include "xtensa/hal.h"
|
|
|
|
#include "soc/soc.h"
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
2016-09-18 12:24:31 +00:00
|
|
|
#include <assert.h>
|
2017-08-21 14:03:53 +00:00
|
|
|
#include <ctype.h>
|
|
|
|
|
2016-09-14 16:53:33 +00:00
|
|
|
#include "esp_log.h"
|
2017-08-21 14:03:53 +00:00
|
|
|
|
2017-08-24 12:09:51 +00:00
|
|
|
#include "rom/queue.h"
|
2017-08-21 14:03:53 +00:00
|
|
|
#include "soc/soc_memory_layout.h"
|
2016-09-14 16:53:33 +00:00
|
|
|
|
2017-05-31 09:20:29 +00:00
|
|
|
//print number of bytes per line for esp_log_buffer_char and esp_log_buffer_hex
|
|
|
|
#define BYTES_PER_LINE 16
|
2016-09-14 16:53:33 +00:00
|
|
|
|
|
|
|
#ifndef BOOTLOADER_BUILD
|
|
|
|
|
2016-09-18 12:24:31 +00:00
|
|
|
// Number of tags to be cached. Must be 2**n - 1, n >= 2.
|
|
|
|
#define TAG_CACHE_SIZE 31
|
|
|
|
|
|
|
|
// Maximum time to wait for the mutex in a logging statement.
|
|
|
|
#define MAX_MUTEX_WAIT_MS 10
|
|
|
|
#define MAX_MUTEX_WAIT_TICKS ((MAX_MUTEX_WAIT_MS + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS)
|
|
|
|
|
|
|
|
// Uncomment this to enable consistency checks and cache statistics in this file.
|
|
|
|
// #define LOG_BUILTIN_CHECKS
|
2016-09-14 16:53:33 +00:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
const char* tag;
|
|
|
|
uint32_t level : 3;
|
|
|
|
uint32_t generation : 29;
|
|
|
|
} cached_tag_entry_t;
|
|
|
|
|
|
|
|
typedef struct uncached_tag_entry_{
|
2020-06-13 12:22:21 +00:00
|
|
|
SLIST_ENTRY(uncached_tag_entry_) entries;
|
2016-09-14 16:53:33 +00:00
|
|
|
uint8_t level; // esp_log_level_t as uint8_t
|
|
|
|
char tag[0]; // beginning of a zero-terminated string
|
|
|
|
} uncached_tag_entry_t;
|
|
|
|
|
2016-09-18 13:01:28 +00:00
|
|
|
static esp_log_level_t s_log_default_level = ESP_LOG_VERBOSE;
|
2017-08-24 12:09:51 +00:00
|
|
|
static SLIST_HEAD(log_tags_head , uncached_tag_entry_) s_log_tags = SLIST_HEAD_INITIALIZER(s_log_tags);
|
2016-09-18 12:24:31 +00:00
|
|
|
static cached_tag_entry_t s_log_cache[TAG_CACHE_SIZE];
|
|
|
|
static uint32_t s_log_cache_max_generation = 0;
|
|
|
|
static uint32_t s_log_cache_entry_count = 0;
|
|
|
|
static vprintf_like_t s_log_print_func = &vprintf;
|
|
|
|
static SemaphoreHandle_t s_log_mutex = NULL;
|
|
|
|
|
|
|
|
#ifdef LOG_BUILTIN_CHECKS
|
|
|
|
static uint32_t s_log_cache_misses = 0;
|
|
|
|
#endif
|
2016-09-14 16:53:33 +00:00
|
|
|
|
|
|
|
static inline bool get_cached_log_level(const char* tag, esp_log_level_t* level);
|
|
|
|
static inline bool get_uncached_log_level(const char* tag, esp_log_level_t* level);
|
|
|
|
static inline void add_to_cache(const char* tag, esp_log_level_t level);
|
|
|
|
static void heap_bubble_down(int index);
|
|
|
|
static inline void heap_swap(int i, int j);
|
|
|
|
static inline bool should_output(esp_log_level_t level_for_message, esp_log_level_t level_for_tag);
|
|
|
|
static inline void clear_log_level_list();
|
|
|
|
|
2017-11-18 14:27:06 +00:00
|
|
|
vprintf_like_t esp_log_set_vprintf(vprintf_like_t func)
|
2016-09-14 16:53:33 +00:00
|
|
|
{
|
2017-11-18 14:27:06 +00:00
|
|
|
if (!s_log_mutex) {
|
|
|
|
s_log_mutex = xSemaphoreCreateMutex();
|
|
|
|
}
|
|
|
|
xSemaphoreTake(s_log_mutex, portMAX_DELAY);
|
|
|
|
|
|
|
|
vprintf_like_t orig_func = s_log_print_func;
|
2016-09-18 12:24:31 +00:00
|
|
|
s_log_print_func = func;
|
2017-11-18 14:27:06 +00:00
|
|
|
|
|
|
|
xSemaphoreGive(s_log_mutex);
|
|
|
|
return orig_func;
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void esp_log_level_set(const char* tag, esp_log_level_t level)
|
|
|
|
{
|
2016-09-18 12:24:31 +00:00
|
|
|
if (!s_log_mutex) {
|
|
|
|
s_log_mutex = xSemaphoreCreateMutex();
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
2016-09-18 12:24:31 +00:00
|
|
|
xSemaphoreTake(s_log_mutex, portMAX_DELAY);
|
2016-09-14 16:53:33 +00:00
|
|
|
|
|
|
|
// for wildcard tag, remove all linked list items and clear the cache
|
|
|
|
if (strcmp(tag, "*") == 0) {
|
2016-09-18 12:24:31 +00:00
|
|
|
s_log_default_level = level;
|
2016-09-14 16:53:33 +00:00
|
|
|
clear_log_level_list();
|
2016-09-18 12:24:31 +00:00
|
|
|
xSemaphoreGive(s_log_mutex);
|
2016-09-14 16:53:33 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-08-25 03:46:55 +00:00
|
|
|
//searching exist tag
|
|
|
|
uncached_tag_entry_t *it = NULL;
|
|
|
|
SLIST_FOREACH( it, &s_log_tags, entries ) {
|
|
|
|
if ( strcmp(it->tag, tag)==0 ) {
|
|
|
|
//one tag in the linked list match, update the level
|
|
|
|
it->level = level;
|
|
|
|
//quit with it != NULL
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//no exist tag, append new one
|
|
|
|
if ( it == NULL ) {
|
|
|
|
// allocate new linked list entry and append it to the head of the list
|
|
|
|
size_t entry_size = offsetof(uncached_tag_entry_t, tag) + strlen(tag) + 1;
|
|
|
|
uncached_tag_entry_t* new_entry = (uncached_tag_entry_t*) malloc(entry_size);
|
|
|
|
if (!new_entry) {
|
|
|
|
xSemaphoreGive(s_log_mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
new_entry->level = (uint8_t) level;
|
|
|
|
strcpy(new_entry->tag, tag);
|
|
|
|
SLIST_INSERT_HEAD( &s_log_tags, new_entry, entries );
|
|
|
|
}
|
|
|
|
|
2020-06-13 12:22:21 +00:00
|
|
|
//search in the cache and update it if exist
|
2017-08-25 03:46:55 +00:00
|
|
|
for (int i = 0; i < s_log_cache_entry_count; ++i) {
|
|
|
|
#ifdef LOG_BUILTIN_CHECKS
|
|
|
|
assert(i == 0 || s_log_cache[(i - 1) / 2].generation < s_log_cache[i].generation);
|
|
|
|
#endif
|
2018-01-28 22:47:07 +00:00
|
|
|
if (strcmp(s_log_cache[i].tag,tag) == 0) {
|
2017-08-25 03:46:55 +00:00
|
|
|
s_log_cache[i].level = level;
|
|
|
|
break;
|
|
|
|
}
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
2016-09-18 12:24:31 +00:00
|
|
|
xSemaphoreGive(s_log_mutex);
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void clear_log_level_list()
|
|
|
|
{
|
2017-08-24 12:09:51 +00:00
|
|
|
while( !SLIST_EMPTY(&s_log_tags)) {
|
|
|
|
SLIST_REMOVE_HEAD(&s_log_tags, entries );
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
2016-09-18 12:24:31 +00:00
|
|
|
s_log_cache_entry_count = 0;
|
|
|
|
s_log_cache_max_generation = 0;
|
|
|
|
#ifdef LOG_BUILTIN_CHECKS
|
|
|
|
s_log_cache_misses = 0;
|
|
|
|
#endif
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR esp_log_write(esp_log_level_t level,
|
|
|
|
const char* tag,
|
|
|
|
const char* format, ...)
|
|
|
|
{
|
2016-09-18 12:24:31 +00:00
|
|
|
if (!s_log_mutex) {
|
|
|
|
s_log_mutex = xSemaphoreCreateMutex();
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
2020-06-13 12:22:21 +00:00
|
|
|
#if ( configUSE_TIMERS == 1 ) && ( INCLUDE_xTimerGetTimerDaemonTaskHandle == 0 )
|
|
|
|
#warning "Logging from timer context may block, consider enabling INCLUDE_xTimerGetTimerDaemonTaskHandle"
|
|
|
|
#elif ( configUSE_TIMERS == 1 ) && ( INCLUDE_xTimerGetTimerDaemonTaskHandle == 1 )
|
|
|
|
// Avoid blocking in timer context:
|
|
|
|
if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING
|
|
|
|
|| xTaskGetCurrentTaskHandle() == xTimerGetTimerDaemonTaskHandle()) {
|
|
|
|
if (xSemaphoreTake(s_log_mutex, 0) == pdFALSE) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
2016-09-18 12:24:31 +00:00
|
|
|
if (xSemaphoreTake(s_log_mutex, MAX_MUTEX_WAIT_TICKS) == pdFALSE) {
|
2016-09-14 16:53:33 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
esp_log_level_t level_for_tag;
|
|
|
|
// Look for the tag in cache first, then in the linked list of all tags
|
|
|
|
if (!get_cached_log_level(tag, &level_for_tag)) {
|
|
|
|
if (!get_uncached_log_level(tag, &level_for_tag)) {
|
2016-09-18 12:24:31 +00:00
|
|
|
level_for_tag = s_log_default_level;
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
|
|
|
add_to_cache(tag, level_for_tag);
|
2016-09-18 12:24:31 +00:00
|
|
|
#ifdef LOG_BUILTIN_CHECKS
|
|
|
|
++s_log_cache_misses;
|
|
|
|
#endif
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
2016-09-18 12:24:31 +00:00
|
|
|
xSemaphoreGive(s_log_mutex);
|
2016-09-14 16:53:33 +00:00
|
|
|
if (!should_output(level, level_for_tag)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_list list;
|
|
|
|
va_start(list, format);
|
2016-09-18 12:24:31 +00:00
|
|
|
(*s_log_print_func)(format, list);
|
2016-09-14 16:53:33 +00:00
|
|
|
va_end(list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool get_cached_log_level(const char* tag, esp_log_level_t* level)
|
|
|
|
{
|
|
|
|
// Look for `tag` in cache
|
|
|
|
int i;
|
2016-09-18 12:24:31 +00:00
|
|
|
for (i = 0; i < s_log_cache_entry_count; ++i) {
|
|
|
|
#ifdef LOG_BUILTIN_CHECKS
|
|
|
|
assert(i == 0 || s_log_cache[(i - 1) / 2].generation < s_log_cache[i].generation);
|
|
|
|
#endif
|
|
|
|
if (s_log_cache[i].tag == tag) {
|
2016-09-14 16:53:33 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-09-18 12:24:31 +00:00
|
|
|
if (i == s_log_cache_entry_count) { // Not found in cache
|
2016-09-14 16:53:33 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Return level from cache
|
2016-09-18 12:24:31 +00:00
|
|
|
*level = (esp_log_level_t) s_log_cache[i].level;
|
|
|
|
// If cache has been filled, start taking ordering into account
|
|
|
|
// (other options are: dynamically resize cache, add "dummy" entries
|
|
|
|
// to the cache; this option was chosen because code is much simpler,
|
|
|
|
// and the unfair behavior of cache will show it self at most once, when
|
|
|
|
// it has just been filled)
|
|
|
|
if (s_log_cache_entry_count == TAG_CACHE_SIZE) {
|
|
|
|
// Update item generation
|
|
|
|
s_log_cache[i].generation = s_log_cache_max_generation++;
|
|
|
|
// Restore heap ordering
|
|
|
|
heap_bubble_down(i);
|
|
|
|
}
|
2016-09-14 16:53:33 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void add_to_cache(const char* tag, esp_log_level_t level)
|
|
|
|
{
|
2016-09-18 12:24:31 +00:00
|
|
|
uint32_t generation = s_log_cache_max_generation++;
|
2016-09-14 16:53:33 +00:00
|
|
|
// First consider the case when cache is not filled yet.
|
|
|
|
// In this case, just add new entry at the end.
|
|
|
|
// This happens to satisfy binary min-heap ordering.
|
2016-09-18 12:24:31 +00:00
|
|
|
if (s_log_cache_entry_count < TAG_CACHE_SIZE) {
|
|
|
|
s_log_cache[s_log_cache_entry_count] = (cached_tag_entry_t) {
|
2016-09-14 16:53:33 +00:00
|
|
|
.generation = generation,
|
|
|
|
.level = level,
|
|
|
|
.tag = tag
|
|
|
|
};
|
2016-09-18 12:24:31 +00:00
|
|
|
++s_log_cache_entry_count;
|
2016-09-14 16:53:33 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cache is full, so we replace the oldest entry (which is at index 0
|
|
|
|
// because this is a min-heap) with the new one, and do bubble-down
|
|
|
|
// operation to restore min-heap ordering.
|
2016-09-18 12:24:31 +00:00
|
|
|
s_log_cache[0] = (cached_tag_entry_t) {
|
2016-09-14 16:53:33 +00:00
|
|
|
.tag = tag,
|
|
|
|
.level = level,
|
|
|
|
.generation = generation
|
|
|
|
};
|
|
|
|
heap_bubble_down(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool get_uncached_log_level(const char* tag, esp_log_level_t* level)
|
|
|
|
{
|
|
|
|
// Walk the linked list of all tags and see if given tag is present in the list.
|
|
|
|
// This is slow because tags are compared as strings.
|
2017-08-24 12:09:51 +00:00
|
|
|
uncached_tag_entry_t *it;
|
|
|
|
SLIST_FOREACH( it, &s_log_tags, entries ) {
|
2016-09-14 16:53:33 +00:00
|
|
|
if (strcmp(tag, it->tag) == 0) {
|
|
|
|
*level = it->level;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool should_output(esp_log_level_t level_for_message, esp_log_level_t level_for_tag)
|
|
|
|
{
|
|
|
|
return level_for_message <= level_for_tag;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void heap_bubble_down(int index)
|
|
|
|
{
|
|
|
|
while (index < TAG_CACHE_SIZE / 2) {
|
|
|
|
int left_index = index * 2 + 1;
|
|
|
|
int right_index = left_index + 1;
|
2016-09-18 12:24:31 +00:00
|
|
|
int next = (s_log_cache[left_index].generation < s_log_cache[right_index].generation) ? left_index : right_index;
|
2016-09-14 16:53:33 +00:00
|
|
|
heap_swap(index, next);
|
|
|
|
index = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void heap_swap(int i, int j)
|
|
|
|
{
|
2016-09-18 12:24:31 +00:00
|
|
|
cached_tag_entry_t tmp = s_log_cache[i];
|
|
|
|
s_log_cache[i] = s_log_cache[j];
|
|
|
|
s_log_cache[j] = tmp;
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
|
|
|
#endif //BOOTLOADER_BUILD
|
|
|
|
|
2016-11-08 03:25:40 +00:00
|
|
|
|
|
|
|
#ifndef BOOTLOADER_BUILD
|
|
|
|
#define ATTR IRAM_ATTR
|
|
|
|
#else
|
|
|
|
#define ATTR
|
|
|
|
#endif // BOOTLOADER_BUILD
|
|
|
|
|
2017-09-13 09:34:43 +00:00
|
|
|
//the variable defined in ROM is the cpu frequency in MHz.
|
|
|
|
//as a workaround before the interface for this variable
|
|
|
|
extern uint32_t g_ticks_per_us_pro;
|
2016-11-08 03:25:40 +00:00
|
|
|
|
|
|
|
uint32_t ATTR esp_log_early_timestamp()
|
2016-09-14 16:53:33 +00:00
|
|
|
{
|
2017-09-13 09:34:43 +00:00
|
|
|
return xthal_get_ccount() / (g_ticks_per_us_pro * 1000);
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef BOOTLOADER_BUILD
|
|
|
|
|
|
|
|
uint32_t IRAM_ATTR esp_log_timestamp()
|
|
|
|
{
|
|
|
|
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
|
|
|
|
return esp_log_early_timestamp();
|
|
|
|
}
|
|
|
|
static uint32_t base = 0;
|
2017-09-13 09:34:43 +00:00
|
|
|
if (base == 0 && xPortGetCoreID() == 0) {
|
2016-09-14 16:53:33 +00:00
|
|
|
base = esp_log_early_timestamp();
|
|
|
|
}
|
2016-09-18 13:06:43 +00:00
|
|
|
return base + xTaskGetTickCount() * (1000 / configTICK_RATE_HZ);
|
2016-09-14 16:53:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2016-11-08 03:25:40 +00:00
|
|
|
uint32_t esp_log_timestamp() __attribute__((alias("esp_log_early_timestamp")));
|
2016-09-14 16:53:33 +00:00
|
|
|
|
|
|
|
#endif //BOOTLOADER_BUILD
|
2017-05-31 09:20:29 +00:00
|
|
|
|
2017-08-21 14:03:53 +00:00
|
|
|
void esp_log_buffer_hex_internal(const char *tag, const void *buffer, uint16_t buff_len,
|
|
|
|
esp_log_level_t log_level)
|
2017-05-31 09:20:29 +00:00
|
|
|
{
|
2017-08-21 14:03:53 +00:00
|
|
|
if ( buff_len == 0 ) return;
|
|
|
|
char temp_buffer[BYTES_PER_LINE+3]; //for not-byte-accessible memory
|
|
|
|
char hex_buffer[3*BYTES_PER_LINE+1];
|
|
|
|
const char *ptr_line;
|
|
|
|
int bytes_cur_line;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if ( buff_len > BYTES_PER_LINE ) {
|
|
|
|
bytes_cur_line = BYTES_PER_LINE;
|
|
|
|
} else {
|
|
|
|
bytes_cur_line = buff_len;
|
2017-05-31 09:20:29 +00:00
|
|
|
}
|
2017-09-22 08:02:39 +00:00
|
|
|
if ( !esp_ptr_byte_accessible(buffer) ) {
|
2017-08-21 14:03:53 +00:00
|
|
|
//use memcpy to get around alignment issue
|
|
|
|
memcpy( temp_buffer, buffer, (bytes_cur_line+3)/4*4 );
|
|
|
|
ptr_line = temp_buffer;
|
|
|
|
} else {
|
|
|
|
ptr_line = buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
for( int i = 0; i < bytes_cur_line; i ++ ) {
|
|
|
|
sprintf( hex_buffer + 3*i, "%02x ", ptr_line[i] );
|
|
|
|
}
|
|
|
|
ESP_LOG_LEVEL( log_level, tag, "%s", hex_buffer );
|
|
|
|
buffer += bytes_cur_line;
|
|
|
|
buff_len -= bytes_cur_line;
|
|
|
|
} while( buff_len );
|
2017-05-31 09:20:29 +00:00
|
|
|
}
|
|
|
|
|
2017-08-21 14:03:53 +00:00
|
|
|
void esp_log_buffer_char_internal(const char *tag, const void *buffer, uint16_t buff_len,
|
|
|
|
esp_log_level_t log_level)
|
2017-05-31 09:20:29 +00:00
|
|
|
{
|
2017-08-21 14:03:53 +00:00
|
|
|
if ( buff_len == 0 ) return;
|
|
|
|
char temp_buffer[BYTES_PER_LINE+3]; //for not-byte-accessible memory
|
|
|
|
char char_buffer[BYTES_PER_LINE+1];
|
|
|
|
const char *ptr_line;
|
|
|
|
int bytes_cur_line;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if ( buff_len > BYTES_PER_LINE ) {
|
|
|
|
bytes_cur_line = BYTES_PER_LINE;
|
|
|
|
} else {
|
|
|
|
bytes_cur_line = buff_len;
|
2017-05-31 09:20:29 +00:00
|
|
|
}
|
2017-09-22 08:02:39 +00:00
|
|
|
if ( !esp_ptr_byte_accessible(buffer) ) {
|
2017-08-21 14:03:53 +00:00
|
|
|
//use memcpy to get around alignment issue
|
|
|
|
memcpy( temp_buffer, buffer, (bytes_cur_line+3)/4*4 );
|
|
|
|
ptr_line = temp_buffer;
|
|
|
|
} else {
|
|
|
|
ptr_line = buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
for( int i = 0; i < bytes_cur_line; i ++ ) {
|
|
|
|
sprintf( char_buffer + i, "%c", ptr_line[i] );
|
|
|
|
}
|
|
|
|
ESP_LOG_LEVEL( log_level, tag, "%s", char_buffer );
|
|
|
|
buffer += bytes_cur_line;
|
|
|
|
buff_len -= bytes_cur_line;
|
|
|
|
} while( buff_len );
|
|
|
|
}
|
|
|
|
|
|
|
|
void esp_log_buffer_hexdump_internal( const char *tag, const void *buffer, uint16_t buff_len, esp_log_level_t log_level)
|
|
|
|
{
|
|
|
|
|
|
|
|
if ( buff_len == 0 ) return;
|
|
|
|
char temp_buffer[BYTES_PER_LINE+3]; //for not-byte-accessible memory
|
|
|
|
const char *ptr_line;
|
|
|
|
//format: field[length]
|
2018-01-19 01:49:43 +00:00
|
|
|
// ADDR[10]+" "+DATA_HEX[8*3]+" "+DATA_HEX[8*3]+" |"+DATA_CHAR[8]+"|"
|
|
|
|
char hd_buffer[10+3+BYTES_PER_LINE*3+3+BYTES_PER_LINE+1+1];
|
2017-08-21 14:03:53 +00:00
|
|
|
char *ptr_hd;
|
|
|
|
int bytes_cur_line;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if ( buff_len > BYTES_PER_LINE ) {
|
|
|
|
bytes_cur_line = BYTES_PER_LINE;
|
|
|
|
} else {
|
|
|
|
bytes_cur_line = buff_len;
|
|
|
|
}
|
2017-09-22 08:02:39 +00:00
|
|
|
if ( !esp_ptr_byte_accessible(buffer) ) {
|
2017-08-21 14:03:53 +00:00
|
|
|
//use memcpy to get around alignment issue
|
|
|
|
memcpy( temp_buffer, buffer, (bytes_cur_line+3)/4*4 );
|
|
|
|
ptr_line = temp_buffer;
|
|
|
|
} else {
|
|
|
|
ptr_line = buffer;
|
|
|
|
}
|
|
|
|
ptr_hd = hd_buffer;
|
2020-06-13 12:22:21 +00:00
|
|
|
|
2017-08-21 14:03:53 +00:00
|
|
|
ptr_hd += sprintf( ptr_hd, "%p ", buffer );
|
|
|
|
for( int i = 0; i < BYTES_PER_LINE; i ++ ) {
|
|
|
|
if ( (i&7)==0 ) {
|
|
|
|
ptr_hd += sprintf( ptr_hd, " " );
|
|
|
|
}
|
|
|
|
if ( i < bytes_cur_line ) {
|
|
|
|
ptr_hd += sprintf( ptr_hd, " %02x", ptr_line[i] );
|
|
|
|
} else {
|
|
|
|
ptr_hd += sprintf( ptr_hd, " " );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ptr_hd += sprintf( ptr_hd, " |" );
|
|
|
|
for( int i = 0; i < bytes_cur_line; i ++ ) {
|
|
|
|
if ( isprint((int)ptr_line[i]) ) {
|
|
|
|
ptr_hd += sprintf( ptr_hd, "%c", ptr_line[i] );
|
|
|
|
} else {
|
|
|
|
ptr_hd += sprintf( ptr_hd, "." );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ptr_hd += sprintf( ptr_hd, "|" );
|
|
|
|
|
|
|
|
ESP_LOG_LEVEL( log_level, tag, "%s", hd_buffer );
|
|
|
|
buffer += bytes_cur_line;
|
|
|
|
buff_len -= bytes_cur_line;
|
|
|
|
} while( buff_len );
|
2017-05-31 09:20:29 +00:00
|
|
|
}
|