Merge branch 'feature/idf_size_json' into 'master'

idf_size: Support JSON output

Closes IDF-264

See merge request idf/esp-idf!4987
This commit is contained in:
Angus Gratton 2019-06-24 19:06:12 +08:00
commit 396131433a
6 changed files with 2767 additions and 61 deletions

View file

@ -97,7 +97,7 @@ Advanced Commands
- ``idf.py app``, ``idf.py bootloader``, ``idf.py partition_table`` can be used to build only the app, bootloader, or partition table from the project as applicable. - ``idf.py app``, ``idf.py bootloader``, ``idf.py partition_table`` can be used to build only the app, bootloader, or partition table from the project as applicable.
- There are matching commands ``idf.py app-flash``, etc. to flash only that single part of the project to the ESP32. - There are matching commands ``idf.py app-flash``, etc. to flash only that single part of the project to the ESP32.
- ``idf.py -p PORT erase_flash`` will use esptool.py to erase the ESP32's entire flash chip. - ``idf.py -p PORT erase_flash`` will use esptool.py to erase the ESP32's entire flash chip.
- ``idf.py size`` prints some size information about the app. ``size-components`` and ``size-files`` are similar commands which print more detailed per-component or per-source-file information, respectively. - ``idf.py size`` prints some size information about the app. ``size-components`` and ``size-files`` are similar commands which print more detailed per-component or per-source-file information, respectively. If you define variable ``-DOUTPUT_JSON=1`` when running CMake (or ``idf.py``), the output will be formatted as JSON not as human readable text.
- ``idf.py reconfigure`` re-runs CMake_ even if it doesn't seem to need re-running. This isn't necessary during normal usage, but can be useful after adding/removing files from the source tree, or when modifying CMake cache variables. For example, ``idf.py -DNAME='VALUE' reconfigure`` can be used to set variable ``NAME`` in CMake cache to value ``VALUE``. - ``idf.py reconfigure`` re-runs CMake_ even if it doesn't seem to need re-running. This isn't necessary during normal usage, but can be useful after adding/removing files from the source tree, or when modifying CMake cache variables. For example, ``idf.py -DNAME='VALUE' reconfigure`` can be used to set variable ``NAME`` in CMake cache to value ``VALUE``.
The order of multiple ``idf.py`` commands on the same invocation is not important, they will automatically be executed in the correct order for everything to take effect (ie building before flashing, erasing before flashing, etc.). The order of multiple ``idf.py`` commands on the same invocation is not important, they will automatically be executed in the correct order for everything to take effect (ie building before flashing, erasing before flashing, etc.).

View file

@ -382,20 +382,27 @@ macro(project project_name)
idf_build_get_property(idf_path IDF_PATH) idf_build_get_property(idf_path IDF_PATH)
idf_build_get_property(python PYTHON) idf_build_get_property(python PYTHON)
set(idf_size ${python} ${idf_path}/tools/idf_size.py)
if(DEFINED OUTPUT_JSON AND OUTPUT_JSON)
list(APPEND idf_size "--json")
endif()
# Add size targets, depend on map file, run idf_size.py # Add size targets, depend on map file, run idf_size.py
add_custom_target(size add_custom_target(size
DEPENDS ${project_elf} DEPENDS ${project_elf}
COMMAND ${python} ${idf_path}/tools/idf_size.py ${mapfile} COMMAND ${idf_size} ${mapfile}
) )
add_custom_target(size-files add_custom_target(size-files
DEPENDS ${project_elf} DEPENDS ${project_elf}
COMMAND ${python} ${idf_path}/tools/idf_size.py --files ${mapfile} COMMAND ${idf_size} --files ${mapfile}
) )
add_custom_target(size-components add_custom_target(size-components
DEPENDS ${project_elf} DEPENDS ${project_elf}
COMMAND ${python} ${idf_path}/tools/idf_size.py --archives ${mapfile} COMMAND ${idf_size} --archives ${mapfile}
) )
unset(idf_size)
idf_build_executable(${project_elf}) idf_build_executable(${project_elf})
__project_info("${test_components}") __project_info("${test_components}")

View file

@ -22,9 +22,13 @@
# #
from __future__ import print_function from __future__ import print_function
from __future__ import unicode_literals from __future__ import unicode_literals
from __future__ import division
import argparse import argparse
import re import collections
import json
import os.path import os.path
import re
import sys
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-" DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
@ -38,6 +42,12 @@ CHIP_SIZES = {
} }
def _json_dump(obj):
""" Pretty-print JSON object to stdout """
json.dump(obj, sys.stdout, indent=4)
print('\n')
def scan_to_header(f, header_line): def scan_to_header(f, header_line):
""" Scan forward in a file until you reach 'header_line', then return """ """ Scan forward in a file until you reach 'header_line', then return """
for line in f: for line in f:
@ -160,6 +170,11 @@ def main():
help="Triplet prefix to add before objdump executable", help="Triplet prefix to add before objdump executable",
default=DEFAULT_TOOLCHAIN_PREFIX) default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
'--json',
help="Output results as JSON",
action="store_true")
parser.add_argument( parser.add_argument(
'map_file', help='MAP file produced by linker', 'map_file', help='MAP file produced by linker',
type=argparse.FileType('r')) type=argparse.FileType('r'))
@ -176,20 +191,18 @@ def main():
args = parser.parse_args() args = parser.parse_args()
memory_config, sections = load_map_data(args.map_file) memory_config, sections = load_map_data(args.map_file)
print_summary(memory_config, sections) if not args.json or not (args.archives or args.files or args.archive_details):
print_summary(memory_config, sections, args.json)
if args.archives: if args.archives:
print("Per-archive contributions to ELF file:") print_detailed_sizes(sections, "archive", "Archive File", args.json)
print_detailed_sizes(sections, "archive", "Archive File")
if args.files: if args.files:
print("Per-file contributions to ELF file:") print_detailed_sizes(sections, "file", "Object File", args.json)
print_detailed_sizes(sections, "file", "Object File")
if args.archive_details: if args.archive_details:
print("Symbols within the archive:", args.archive_details, "(Not all symbols may be reported)") print_archive_symbols(sections, args.archive_details, args.json)
print_archive_symbols(sections, args.archive_details)
def print_summary(memory_config, sections): def print_summary(memory_config, sections, as_json=False):
def get_size(section): def get_size(section):
try: try:
return sections[section]["size"] return sections[section]["size"]
@ -202,40 +215,53 @@ def print_summary(memory_config, sections):
used_data = get_size(".dram0.data") used_data = get_size(".dram0.data")
used_bss = get_size(".dram0.bss") used_bss = get_size(".dram0.bss")
used_dram = used_data + used_bss used_dram = used_data + used_bss
try:
used_dram_ratio = used_dram / total_dram
except ZeroDivisionError:
used_dram_ratio = float('nan')
used_iram = sum(get_size(s) for s in sections if s.startswith(".iram0")) used_iram = sum(get_size(s) for s in sections if s.startswith(".iram0"))
try:
used_iram_ratio = used_iram / total_iram
except ZeroDivisionError:
used_iram_ratio = float('nan')
flash_code = get_size(".flash.text") flash_code = get_size(".flash.text")
flash_rodata = get_size(".flash.rodata") flash_rodata = get_size(".flash.rodata")
total_size = used_data + used_iram + flash_code + flash_rodata total_size = used_data + used_iram + flash_code + flash_rodata
print("Total sizes:") if as_json:
print(" DRAM .data size: %7d bytes" % used_data) _json_dump(collections.OrderedDict([
print(" DRAM .bss size: %7d bytes" % used_bss) ("dram_data", used_data),
print("Used static DRAM: %7d bytes (%7d available, %.1f%% used)" % ("dram_bss", used_bss),
(used_dram, total_dram - used_dram, ("used_dram", used_dram),
100.0 * used_dram / total_dram)) ("available_dram", total_dram - used_dram),
print("Used static IRAM: %7d bytes (%7d available, %.1f%% used)" % ("used_dram_ratio", used_dram_ratio),
(used_iram, total_iram - used_iram, ("used_iram", used_iram),
100.0 * used_iram / total_iram)) ("available_iram", total_iram - used_iram),
print(" Flash code: %7d bytes" % flash_code) ("used_iram_ratio", used_iram_ratio),
print(" Flash rodata: %7d bytes" % flash_rodata) ("flash_code", flash_code),
print("Total image size:~%7d bytes (.bin may be padded larger)" % (total_size)) ("flash_rodata", flash_rodata),
("total_size", total_size)
]))
else:
print("Total sizes:")
print(" DRAM .data size: %7d bytes" % used_data)
print(" DRAM .bss size: %7d bytes" % used_bss)
print("Used static DRAM: %7d bytes (%7d available, %.1f%% used)" %
(used_dram, total_dram - used_dram, 100.0 * used_dram_ratio))
print("Used static IRAM: %7d bytes (%7d available, %.1f%% used)" %
(used_iram, total_iram - used_iram, 100.0 * used_iram_ratio))
print(" Flash code: %7d bytes" % flash_code)
print(" Flash rodata: %7d bytes" % flash_rodata)
print("Total image size:~%7d bytes (.bin may be padded larger)" % (total_size))
def print_detailed_sizes(sections, key, header): def print_detailed_sizes(sections, key, header, as_json=False):
sizes = sizes_by_key(sections, key) sizes = sizes_by_key(sections, key)
headings = (header,
"DRAM .data",
"& .bss",
"IRAM",
"Flash code",
"& rodata",
"Total")
print("%24s %10s %6s %6s %10s %8s %7s" % headings)
result = {} result = {}
for k in sizes: for k in sizes:
v = sizes[k] v = sizes[k]
result[k] = {} result[k] = collections.OrderedDict()
result[k]["data"] = v.get(".dram0.data", 0) result[k]["data"] = v.get(".dram0.data", 0)
result[k]["bss"] = v.get(".dram0.bss", 0) result[k]["bss"] = v.get(".dram0.bss", 0)
result[k]["iram"] = sum(t for (s,t) in v.items() if s.startswith(".iram0")) result[k]["iram"] = sum(t for (s,t) in v.items() if s.startswith(".iram0"))
@ -250,20 +276,37 @@ def print_detailed_sizes(sections, key, header):
def return_header(elem): def return_header(elem):
return elem[0] return elem[0]
s = sorted(list(result.items()), key=return_header) s = sorted(list(result.items()), key=return_header)
# do a secondary sort in order to have consistent order (for diff-ing the output) # do a secondary sort in order to have consistent order (for diff-ing the output)
for k,v in sorted(s, key=return_total_size, reverse=True): s = sorted(s, key=return_total_size, reverse=True)
if ":" in k: # print subheadings for key of format archive:file
sh,k = k.split(":") if as_json:
print("%24s %10d %6d %6d %10d %8d %7d" % (k[:24], _json_dump(collections.OrderedDict(s))
v["data"], else:
v["bss"], print("Per-%s contributions to ELF file:" % key)
v["iram"], headings = (header,
v["flash_text"], "DRAM .data",
v["flash_rodata"], "& .bss",
v["total"])) "IRAM",
"Flash code",
"& rodata",
"Total")
header_format = "%24s %10d %6d %6d %10d %8d %7d"
print(header_format.replace("d", "s") % headings)
for k,v in s:
if ":" in k: # print subheadings for key of format archive:file
sh,k = k.split(":")
print(header_format % (k[:24],
v["data"],
v["bss"],
v["iram"],
v["flash_text"],
v["flash_rodata"],
v["total"]))
def print_archive_symbols(sections, archive): def print_archive_symbols(sections, archive, as_json=False):
interested_sections = [".dram0.data", ".dram0.bss", ".iram0.text", ".iram0.vectors", ".flash.text", ".flash.rodata"] interested_sections = [".dram0.data", ".dram0.bss", ".iram0.text", ".iram0.vectors", ".flash.text", ".flash.rodata"]
result = {} result = {}
for t in interested_sections: for t in interested_sections:
@ -277,15 +320,26 @@ def print_archive_symbols(sections, archive):
continue continue
s["sym_name"] = re.sub("(.text.|.literal.|.data.|.bss.|.rodata.)", "", s["sym_name"]) s["sym_name"] = re.sub("(.text.|.literal.|.data.|.bss.|.rodata.)", "", s["sym_name"])
result[section_name][s["sym_name"]] = result[section_name].get(s["sym_name"], 0) + s["size"] result[section_name][s["sym_name"]] = result[section_name].get(s["sym_name"], 0) + s["size"]
# build a new ordered dict of each section, where each entry is an ordereddict of symbols to sizes
section_symbols = collections.OrderedDict()
for t in interested_sections: for t in interested_sections:
print("\nSymbols from section:", t)
section_total = 0
s = sorted(list(result[t].items()), key=lambda k_v: k_v[0]) s = sorted(list(result[t].items()), key=lambda k_v: k_v[0])
# do a secondary sort in order to have consistent order (for diff-ing the output) # do a secondary sort in order to have consistent order (for diff-ing the output)
for key,val in sorted(s, key=lambda k_v: k_v[1], reverse=True): s = sorted(s, key=lambda k_v: k_v[1], reverse=True)
print(("%s(%d)" % (key.replace(t + ".", ""), val)), end=' ') section_symbols[t] = collections.OrderedDict(s)
section_total += val
print("\nSection total:",section_total) if as_json:
_json_dump(section_symbols)
else:
print("Symbols within the archive: %s (Not all symbols may be reported)" % (archive))
for t,s in section_symbols.items():
section_total = 0
print("\nSymbols from section:", t)
for key, val in s.items():
print(("%s(%d)" % (key.replace(t + ".", ""), val)), end=' ')
section_total += val
print("\nSection total:",section_total)
if __name__ == "__main__": if __name__ == "__main__":

File diff suppressed because it is too large Load diff

View file

@ -2,11 +2,23 @@
{ coverage debug sys \ { coverage debug sys \
&& coverage erase &> output \ && coverage erase &> output \
&& echo -e "\n***\nRunning idf_size.py..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py app.map &>> output \ && coverage run -a $IDF_PATH/tools/idf_size.py app.map &>> output \
&& echo -e "\n***\nRunning idf_size.py --archives..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --archives app.map &>> output \ && coverage run -a $IDF_PATH/tools/idf_size.py --archives app.map &>> output \
&& echo -e "\n***\nRunning idf_size.py --files..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --files app.map &>> output \ && coverage run -a $IDF_PATH/tools/idf_size.py --files app.map &>> output \
&& echo -e "\n***\nRunning idf_size.py --archive_details..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --archive_details libdriver.a app.map &>> output \ && coverage run -a $IDF_PATH/tools/idf_size.py --archive_details libdriver.a app.map &>> output \
&& echo -e "\n***]nProducing JSON output..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --json app.map &>> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --archives app.map &>> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --files app.map &>> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --archive_details libdriver.a app.map &>> output \
&& echo -e "\n***\nRunning idf_size_tests.py..." >> output \
&& coverage run -a $IDF_PATH/tools/test_idf_size/test_idf_size.py &>> output \ && coverage run -a $IDF_PATH/tools/test_idf_size/test_idf_size.py &>> output \
&& diff output expected_output \ && diff -Z output expected_output \
&& coverage report \ && coverage report \
; } || { echo 'The test for idf_size has failed. Please examine the artifacts.' ; exit 1; } ; } || { echo 'The test for idf_size has failed. Please examine the artifacts.' ; exit 1; }
# Note: "diff -Z is used because some versions of Python print trailing whitespace for JSON pretty-printing, and some don't

View file

@ -24,18 +24,19 @@ except ImportError:
if __name__ == "__main__": if __name__ == "__main__":
# Should deliver a RuntimeError as the 'test' header doesn't exist
try: try:
idf_size.scan_to_header([], 'test') idf_size.scan_to_header([], 'test')
except RuntimeError: except RuntimeError as e:
pass assert "Didn't find line" in str(e)
# Should deliver a RuntimeError as there's no content under the heading
try: try:
idf_size.load_memory_config(["Memory Configuration"]) idf_size.load_memory_config(["Memory Configuration"])
pass pass
except RuntimeError: except RuntimeError as e:
pass assert "End of file" in str(e)
try: # This used to crash with a division by zero error but now it just prints nan% due to
idf_size.print_summary({"iram0_0_seg": {"length":0}, "dram0_0_seg": {"length":0}}, {}) # zero lengths
except ZeroDivisionError: idf_size.print_summary({"iram0_0_seg": {"length":0}, "dram0_0_seg": {"length":0}}, {})
pass