components: Correct the Python coding style

This commit is contained in:
Roland Dobai 2018-12-04 13:06:46 +01:00
parent c69907a54b
commit e1e6c1ae0a
6 changed files with 326 additions and 300 deletions

13
.flake8
View File

@ -150,22 +150,15 @@ exclude =
components/unity/unity,
examples/build_system/cmake/import_lib/main/lib/tinyxml2
# autogenerated scripts
examples/provisioning/custom_config/components/custom_provisioning/python/custom_config_pb2.py,
# temporary list (should be empty)
components/app_update/dump_otadata.py,
components/app_update/gen_empty_partition.py,
components/espcoredump/espcoredump.py,
components/espcoredump/test/test_espcoredump.py,
components/nvs_flash/nvs_partition_generator/nvs_partition_gen.py,
components/partition_table/gen_esp32part.py,
components/partition_table/test_gen_esp32part_host/gen_esp32part_tests.py,
components/protocomm/python/constants_pb2.py,
components/protocomm/python/sec0_pb2.py,
components/protocomm/python/sec1_pb2.py,
components/protocomm/python/session_pb2.py,
components/ulp/esp32ulp_mapgen.py,
components/wifi_provisioning/python/wifi_config_pb2.py,
components/wifi_provisioning/python/wifi_constants_pb2.py,
examples/provisioning/custom_config/components/custom_provisioning/python/custom_config_pb2.py,
# temporary list (should be empty)
components/nvs_flash/nvs_partition_generator/nvs_partition_gen.py,
tools/ci/apply_bot_filter.py,
tools/cmake/convert_to_cmake.py,
tools/esp_app_trace/apptrace_proc.py,

View File

@ -5,6 +5,7 @@
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import sys
try:
from builtins import zip
from builtins import str
@ -15,13 +16,11 @@ except ImportError:
print('Import has failed probably because of the missing "future" package. Please install all the packages for '
'interpreter {} from the $IDF_PATH/requirements.txt file.'.format(sys.executable))
sys.exit(1)
import sys
import os
import argparse
import subprocess
import tempfile
import struct
import array
import errno
import base64
import binascii
@ -53,17 +52,18 @@ class ESPCoreDumpError(RuntimeError):
"""
super(ESPCoreDumpError, self).__init__(message)
class BinStruct(object):
"""Binary structure representation
Subclasses must specify actual structure layout using 'fields' and 'format' members.
For example, the following subclass represents structure with two fields:
f1 of size 2 bytes and 4 bytes f2. Little endian.
For example, the following subclass represents structure with two fields:
f1 of size 2 bytes and 4 bytes f2. Little endian.
class SomeStruct(BinStruct):
fields = ("f1",
"f2")
format = "<HL"
Then subclass can be used to initialize fields of underlaying structure and convert it to binary representation:
f = open('some_struct.bin', 'wb')
s = SomeStruct()
@ -89,7 +89,7 @@ class BinStruct(object):
def dump(self):
"""Returns binary representation of structure
"""
keys = self.__class__.fields
keys = self.__class__.fields
return struct.pack(self.__class__.format, *(self.__dict__[k] for k in keys))
@ -124,17 +124,17 @@ class Elf32FileHeader(BinStruct):
class Elf32ProgramHeader(BinStruct):
"""ELF32 program header
"""
fields = ("p_type",
"p_offset",
"p_vaddr",
"p_paddr",
"p_filesz",
"p_memsz",
"p_flags",
"p_align")
format = "<LLLLLLLL"
"""ELF32 program header
"""
fields = ("p_type",
"p_offset",
"p_vaddr",
"p_paddr",
"p_filesz",
"p_memsz",
"p_flags",
"p_align")
format = "<LLLLLLLL"
class Elf32NoteDesc(object):
@ -177,12 +177,12 @@ class XtensaPrStatus(BinStruct):
class ESPCoreDumpSegment(esptool.ImageSegment):
""" Wrapper class for a program segment in core ELF file, has a segment
type and flags as well as the common properties of an ImageSegment.
type and flags as well as the common properties of an ImageSegment.
"""
# segment flags
PF_X = 0x1 # Execute
PF_W = 0x2 # Write
PF_R = 0x4 # Read
PF_X = 0x1 # Execute
PF_W = 0x2 # Write
PF_R = 0x4 # Read
def __init__(self, addr, data, type, flags):
"""Constructor for program segment
@ -217,7 +217,7 @@ class ESPCoreDumpSegment(esptool.ImageSegment):
class ESPCoreDumpSection(esptool.ELFSection):
""" Wrapper class for a section in core ELF file, has a section
flags as well as the common properties of an esptool.ELFSection.
flags as well as the common properties of an esptool.ELFSection.
"""
# section flags
SHF_WRITE = 0x1
@ -255,14 +255,14 @@ class ESPCoreDumpSection(esptool.ELFSection):
class ESPCoreDumpElfFile(esptool.ELFFile):
""" Wrapper class for core dump ELF file
""" Wrapper class for core dump ELF file
"""
# ELF file type
ET_NONE = 0x0 # No file type
ET_REL = 0x1 # Relocatable file
ET_EXEC = 0x2 # Executable file
ET_DYN = 0x3 # Shared object file
ET_CORE = 0x4 # Core file
ET_NONE = 0x0 # No file type
ET_REL = 0x1 # Relocatable file
ET_EXEC = 0x2 # Executable file
ET_DYN = 0x3 # Shared object file
ET_CORE = 0x4 # Core file
# ELF file version
EV_NONE = 0x0
EV_CURRENT = 0x1
@ -359,15 +359,15 @@ class ESPCoreDumpElfFile(esptool.ELFFile):
f.seek(offs)
return f.read(size)
prog_sections = [ESPCoreDumpSection(lookup_string(n_offs), lma, read_data(offs, size), flags) for (n_offs, _type, flags, lma, size, offs) in prog_sections
if lma != 0]
prog_sections = [ESPCoreDumpSection(lookup_string(n_offs), lma, read_data(offs, size), flags)
for (n_offs, _type, flags, lma, size, offs) in prog_sections if lma != 0]
self.sections = prog_sections
def _read_program_segments(self, f, seg_table_offs, entsz, num):
"""Reads core dump program segments from ELF file
"""
f.seek(seg_table_offs)
seg_table = f.read(entsz*num)
seg_table = f.read(entsz * num)
LEN_SEG_HEADER = 0x20
if len(seg_table) == 0:
raise ESPCoreDumpError("No program header table found at offset %04x in ELF file." % seg_table_offs)
@ -387,8 +387,8 @@ class ESPCoreDumpElfFile(esptool.ELFFile):
f.seek(offs)
return f.read(size)
self.program_segments = [ESPCoreDumpSegment(vaddr, read_data(offset, filesz), type, flags) for (type, offset, vaddr, filesz,flags) in prog_segments
if vaddr != 0]
self.program_segments = [ESPCoreDumpSegment(vaddr, read_data(offset, filesz), type, flags)
for (type, offset, vaddr, filesz,flags) in prog_segments if vaddr != 0]
def add_program_segment(self, addr, data, type, flags):
"""Adds new program segment
@ -400,11 +400,11 @@ class ESPCoreDumpElfFile(esptool.ELFFile):
for ps in self.program_segments:
seg_len = len(ps.data)
if addr >= ps.addr and addr < (ps.addr + seg_len):
raise ESPCoreDumpError("Can not add overlapping region [%x..%x] to ELF file. Conflict with existing [%x..%x]." %
(addr, addr + data_sz - 1, ps.addr, ps.addr + seg_len - 1))
raise ESPCoreDumpError("Can not add overlapping region [%x..%x] to ELF file. Conflict with existing [%x..%x]." %
(addr, addr + data_sz - 1, ps.addr, ps.addr + seg_len - 1))
if (addr + data_sz) > ps.addr and (addr + data_sz) <= (ps.addr + seg_len):
raise ESPCoreDumpError("Can not add overlapping region [%x..%x] to ELF file. Conflict with existing [%x..%x]." %
(addr, addr + data_sz - 1, ps.addr, ps.addr + seg_len - 1))
raise ESPCoreDumpError("Can not add overlapping region [%x..%x] to ELF file. Conflict with existing [%x..%x]." %
(addr, addr + data_sz - 1, ps.addr, ps.addr + seg_len - 1))
# append
self.program_segments.append(ESPCoreDumpSegment(addr, data, type, flags))
@ -434,11 +434,11 @@ class ESPCoreDumpElfFile(esptool.ELFFile):
phdr.p_type = self.program_segments[i].type
phdr.p_offset = cur_off
phdr.p_vaddr = self.program_segments[i].addr
phdr.p_paddr = phdr.p_vaddr # TODO
phdr.p_paddr = phdr.p_vaddr # TODO
phdr.p_filesz = len(self.program_segments[i].data)
phdr.p_memsz = phdr.p_filesz # TODO
phdr.p_memsz = phdr.p_filesz # TODO
phdr.p_flags = self.program_segments[i].flags
phdr.p_align = 0 # TODO
phdr.p_align = 0 # TODO
f.write(phdr.dump())
cur_off += phdr.p_filesz
# write program segments
@ -463,7 +463,7 @@ class ESPCoreDumpLoader(object):
ESP32_COREDUMP_HDR_SZ = struct.calcsize(ESP32_COREDUMP_HDR_FMT)
ESP32_COREDUMP_TSK_HDR_FMT = '<3L'
ESP32_COREDUMP_TSK_HDR_SZ = struct.calcsize(ESP32_COREDUMP_TSK_HDR_FMT)
def __init__(self):
"""Base constructor for core dump loader
"""
@ -475,51 +475,51 @@ class ESPCoreDumpLoader(object):
# from "gdb/xtensa-tdep.h"
# typedef struct
# {
#0 xtensa_elf_greg_t pc;
#1 xtensa_elf_greg_t ps;
#2 xtensa_elf_greg_t lbeg;
#3 xtensa_elf_greg_t lend;
#4 xtensa_elf_greg_t lcount;
#5 xtensa_elf_greg_t sar;
#6 xtensa_elf_greg_t windowstart;
#7 xtensa_elf_greg_t windowbase;
#8..63 xtensa_elf_greg_t reserved[8+48];
#64 xtensa_elf_greg_t ar[64];
# 0 xtensa_elf_greg_t pc;
# 1 xtensa_elf_greg_t ps;
# 2 xtensa_elf_greg_t lbeg;
# 3 xtensa_elf_greg_t lend;
# 4 xtensa_elf_greg_t lcount;
# 5 xtensa_elf_greg_t sar;
# 6 xtensa_elf_greg_t windowstart;
# 7 xtensa_elf_greg_t windowbase;
# 8..63 xtensa_elf_greg_t reserved[8+48];
# 64 xtensa_elf_greg_t ar[64];
# } xtensa_elf_gregset_t;
REG_PC_IDX=0
REG_PS_IDX=1
REG_LB_IDX=2
REG_LE_IDX=3
REG_LC_IDX=4
REG_SAR_IDX=5
REG_WS_IDX=6
REG_WB_IDX=7
REG_AR_START_IDX=64
REG_AR_NUM=64
# FIXME: acc to xtensa_elf_gregset_t number of regs must be 128,
REG_PC_IDX = 0
REG_PS_IDX = 1
REG_LB_IDX = 2
REG_LE_IDX = 3
REG_LC_IDX = 4
REG_SAR_IDX = 5
# REG_WS_IDX = 6
# REG_WB_IDX = 7
REG_AR_START_IDX = 64
# REG_AR_NUM = 64
# FIXME: acc to xtensa_elf_gregset_t number of regs must be 128,
# but gdb complanis when it less then 129
REG_NUM=129
REG_NUM = 129
XT_SOL_EXIT=0
XT_SOL_PC=1
XT_SOL_PS=2
XT_SOL_NEXT=3
XT_SOL_AR_START=4
XT_SOL_AR_NUM=4
XT_SOL_FRMSZ=8
# XT_SOL_EXIT = 0
XT_SOL_PC = 1
XT_SOL_PS = 2
# XT_SOL_NEXT = 3
XT_SOL_AR_START = 4
XT_SOL_AR_NUM = 4
# XT_SOL_FRMSZ = 8
XT_STK_EXIT=0
XT_STK_PC=1
XT_STK_PS=2
XT_STK_AR_START=3
XT_STK_AR_NUM=16
XT_STK_SAR=19
XT_STK_EXCCAUSE=20
XT_STK_EXCVADDR=21
XT_STK_LBEG=22
XT_STK_LEND=23
XT_STK_LCOUNT=24
XT_STK_FRMSZ=25
XT_STK_EXIT = 0
XT_STK_PC = 1
XT_STK_PS = 2
XT_STK_AR_START = 3
XT_STK_AR_NUM = 16
XT_STK_SAR = 19
# XT_STK_EXCCAUSE = 20
# XT_STK_EXCVADDR = 21
XT_STK_LBEG = 22
XT_STK_LEND = 23
XT_STK_LCOUNT = 24
XT_STK_FRMSZ = 25
regs = [0] * REG_NUM
# TODO: support for growing up stacks
@ -541,7 +541,7 @@ class ESPCoreDumpLoader(object):
regs[REG_LB_IDX] = stack[XT_STK_LBEG]
regs[REG_LE_IDX] = stack[XT_STK_LEND]
regs[REG_LC_IDX] = stack[XT_STK_LCOUNT]
# FIXME: crashed and some running tasks (e.g. prvIdleTask) have EXCM bit set
# FIXME: crashed and some running tasks (e.g. prvIdleTask) have EXCM bit set
# and GDB can not unwind callstack properly (it implies not windowed call0)
if regs[REG_PS_IDX] & (1 << 5):
regs[REG_PS_IDX] &= ~(1 << 4)
@ -550,14 +550,14 @@ class ESPCoreDumpLoader(object):
regs[REG_PS_IDX] = stack[XT_SOL_PS]
for i in range(XT_SOL_AR_NUM):
regs[REG_AR_START_IDX + i] = stack[XT_SOL_AR_START + i]
nxt = stack[XT_SOL_NEXT]
# nxt = stack[XT_SOL_NEXT]
# TODO: remove magic hack with saved PC to get proper value
regs[REG_PC_IDX] = ((regs[REG_PC_IDX] & 0x3FFFFFFF) | 0x40000000)
if regs[REG_PC_IDX] & 0x80000000:
regs[REG_PC_IDX] = (regs[REG_PC_IDX] & 0x3fffffff) | 0x40000000;
regs[REG_PC_IDX] = (regs[REG_PC_IDX] & 0x3fffffff) | 0x40000000
if regs[REG_AR_START_IDX + 0] & 0x80000000:
regs[REG_AR_START_IDX + 0] = (regs[REG_AR_START_IDX + 0] & 0x3fffffff) | 0x40000000;
regs[REG_AR_START_IDX + 0] = (regs[REG_AR_START_IDX + 0] & 0x3fffffff) | 0x40000000
return regs
def remove_tmp_file(self, fname):
@ -587,7 +587,7 @@ class ESPCoreDumpLoader(object):
raise ESPCoreDumpLoaderError("Core dump version '%d' is not supported! Should be up to '%d'." % (coredump_ver, self.ESP32_COREDUMP_VESION))
tcbsz_aligned = tcbsz
if tcbsz_aligned % 4:
tcbsz_aligned = 4*(old_div(tcbsz_aligned,4) + 1)
tcbsz_aligned = 4 * (old_div(tcbsz_aligned,4) + 1)
core_off += self.ESP32_COREDUMP_HDR_SZ
core_elf = ESPCoreDumpElfFile()
notes = b''
@ -600,17 +600,18 @@ class ESPCoreDumpLoader(object):
else:
stack_len = stack_top - stack_end
stack_base = stack_end
stack_len_aligned = stack_len
if stack_len_aligned % 4:
stack_len_aligned = 4*(old_div(stack_len_aligned,4) + 1)
stack_len_aligned = 4 * (old_div(stack_len_aligned,4) + 1)
core_off += self.ESP32_COREDUMP_TSK_HDR_SZ
logging.info("Read TCB %d bytes @ 0x%x" % (tcbsz_aligned, tcb_addr))
data = self.read_data(core_off, tcbsz_aligned)
try:
if tcbsz != tcbsz_aligned:
core_elf.add_program_segment(tcb_addr, data[:tcbsz - tcbsz_aligned], ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
core_elf.add_program_segment(tcb_addr, data[:tcbsz - tcbsz_aligned],
ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
else:
core_elf.add_program_segment(tcb_addr, data, ESPCoreDumpElfFile.PT_LOAD, ESPCoreDumpSegment.PF_R | ESPCoreDumpSegment.PF_W)
except ESPCoreDumpError as e:
@ -632,8 +633,8 @@ class ESPCoreDumpLoader(object):
print(e)
return None
prstatus = XtensaPrStatus()
prstatus.pr_cursig = 0 # TODO: set sig only for current/failed task
prstatus.pr_pid = i # TODO: use pid assigned by OS
prstatus.pr_cursig = 0 # TODO: set sig only for current/failed task
prstatus.pr_pid = i # TODO: use pid assigned by OS
note = Elf32NoteDesc("CORE", 1, prstatus.dump() + struct.pack("<%dL" % len(task_regs), *task_regs)).dump()
notes += note
@ -650,7 +651,7 @@ class ESPCoreDumpLoader(object):
core_elf.add_program_segment(ps.addr, ps.data, ESPCoreDumpElfFile.PT_LOAD, ps.flags)
except ESPCoreDumpError as e:
logging.warning("Skip ROM segment %d bytes @ 0x%x. (Reason: %s)" % (len(ps.data), ps.addr, e))
core_elf.e_type = ESPCoreDumpElfFile.ET_CORE
core_elf.e_machine = ESPCoreDumpElfFile.EM_XTENSA
if core_fname:
@ -673,7 +674,7 @@ class ESPCoreDumpLoader(object):
class ESPCoreDumpFileLoader(ESPCoreDumpLoader):
"""Core dump file loader class
"""
def __init__(self, path, b64 = False):
def __init__(self, path, b64=False):
"""Constructor for core dump file loader
"""
super(ESPCoreDumpFileLoader, self).__init__()
@ -725,7 +726,7 @@ class ESPCoreDumpFlashLoader(ESPCoreDumpLoader):
if e == '.pyc':
self.path = self.path[:-1]
else:
self.path = tool_path
self.path = tool_path
self.port = port
self.baud = baud
self.chip = chip
@ -780,7 +781,7 @@ class ESPCoreDumpFlashLoader(ESPCoreDumpLoader):
data = self.read_data(0, self.dump_sz - self.ESP32_COREDUMP_FLASH_CRC_SZ)
data_crc = binascii.crc32(data) & 0xffffffff
if dump_crc != data_crc:
raise ESPCoreDumpLoaderError("Invalid core dump CRC %x, should be %x" % (data_crc, dump_crc))
raise ESPCoreDumpLoaderError("Invalid core dump CRC %x, should be %x" % (data_crc, dump_crc))
return super(ESPCoreDumpFlashLoader, self).create_corefile(core_fname)
@ -876,8 +877,9 @@ class GDBMIStreamConsoleHandler(GDBMIOutStreamHandler):
"""
TAG = '~'
def load_aux_elf(elf_path):
""" Loads auxilary ELF file and composes GDB command to read its symbols
""" Loads auxilary ELF file and composes GDB command to read its symbols
"""
elf = None
sym_cmd = ''
@ -888,6 +890,7 @@ def load_aux_elf(elf_path):
sym_cmd = 'add-symbol-file %s 0x%x' % (elf_path, s.addr)
return (elf, sym_cmd)
def dbg_corefile(args):
""" Command to load core dump from file or flash and run GDB debug session with it
"""
@ -911,18 +914,18 @@ def dbg_corefile(args):
loader.cleanup()
return
p = subprocess.Popen(
bufsize = 0,
args = [args.gdb,
'--nw', # ignore .gdbinit
'--core=%s' % core_fname, # core file,
'-ex', rom_sym_cmd,
args.prog],
stdin = None, stdout = None, stderr = None,
close_fds = CLOSE_FDS
)
p = subprocess.Popen(bufsize=0,
args=[args.gdb,
'--nw', # ignore .gdbinit
'--core=%s' % core_fname, # core file,
'-ex', rom_sym_cmd,
args.prog
],
stdin=None, stdout=None, stderr=None,
close_fds=CLOSE_FDS
)
p.wait()
if loader:
if not args.core and not args.save_core:
loader.remove_tmp_file(core_fname)
@ -931,13 +934,14 @@ def dbg_corefile(args):
def info_corefile(args):
""" Command to load core dump from file or flash and print it's data in user friendly form
""" Command to load core dump from file or flash and print it's data in user friendly form
"""
global CLOSE_FDS
def gdbmi_console_stream_handler(ln):
sys.stdout.write(ln)
sys.stdout.flush()
def gdbmi_read2prompt(f, out_handlers=None):
while True:
ln = f.readline().decode('utf-8').rstrip(' \r\n')
@ -953,20 +957,18 @@ def info_corefile(args):
def gdbmi_start(handlers, gdb_cmds):
gdb_args = [args.gdb,
'--quiet', # inhibit dumping info at start-up
'--nx', # inhibit window interface
'--nw', # ignore .gdbinit
'--interpreter=mi2', # use GDB/MI v2
'--core=%s' % core_fname] # core file
'--quiet', # inhibit dumping info at start-up
'--nx', # inhibit window interface
'--nw', # ignore .gdbinit
'--interpreter=mi2', # use GDB/MI v2
'--core=%s' % core_fname] # core file
for c in gdb_cmds:
gdb_args += ['-ex', c]
gdb_args.append(args.prog)
p = subprocess.Popen(
bufsize = 0,
args = gdb_args,
stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT,
close_fds = CLOSE_FDS
)
p = subprocess.Popen(bufsize=0,
args=gdb_args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=CLOSE_FDS)
gdbmi_read2prompt(p.stdout, handlers)
return p
@ -1093,13 +1095,13 @@ def info_corefile(args):
p.wait()
p.stdin.close()
p.stdout.close()
if loader:
if not args.core and not args.save_core:
loader.remove_tmp_file(core_fname)
loader.cleanup()
print('Done!')
def main():
parser = argparse.ArgumentParser(description='espcoredump.py v%s - ESP32 Core Dump Utility' % __version__, prog='espcoredump')
@ -1130,9 +1132,12 @@ def main():
parser_debug_coredump.add_argument('--debug', '-d', help='Log level (0..3)', type=int, default=2)
parser_debug_coredump.add_argument('--gdb', '-g', help='Path to gdb', default='xtensa-esp32-elf-gdb')
parser_debug_coredump.add_argument('--core', '-c', help='Path to core dump file (if skipped core dump will be read from flash)', type=str)
parser_debug_coredump.add_argument('--core-format', '-t', help='(elf, raw or b64). File specified with "-c" is an ELF ("elf"), raw (raw) or base64-encoded (b64) binary', type=str, default='elf')
parser_debug_coredump.add_argument('--off', '-o', help='Ofsset of coredump partition in flash (type "make partition_table" to see).', type=int, default=0x110000)
parser_debug_coredump.add_argument('--save-core', '-s', help='Save core to file. Othwerwise temporary core file will be deleted. Ignored with "-c"', type=str)
parser_debug_coredump.add_argument('--core-format', '-t', help='(elf, raw or b64). File specified with "-c" is an ELF ("elf"), '
'raw (raw) or base64-encoded (b64) binary', type=str, default='elf')
parser_debug_coredump.add_argument('--off', '-o', help='Ofsset of coredump partition in flash '
'(type "make partition_table" to see).', type=int, default=0x110000)
parser_debug_coredump.add_argument('--save-core', '-s', help='Save core to file. Othwerwise temporary core file will be deleted. '
'Ignored with "-c"', type=str)
parser_debug_coredump.add_argument('--rom-elf', '-r', help='Path to ROM ELF file.', type=str, default='esp32_rom.elf')
parser_debug_coredump.add_argument('prog', help='Path to program\'s ELF binary', type=str)
@ -1142,9 +1147,12 @@ def main():
parser_info_coredump.add_argument('--debug', '-d', help='Log level (0..3)', type=int, default=0)
parser_info_coredump.add_argument('--gdb', '-g', help='Path to gdb', default='xtensa-esp32-elf-gdb')
parser_info_coredump.add_argument('--core', '-c', help='Path to core dump file (if skipped core dump will be read from flash)', type=str)
parser_info_coredump.add_argument('--core-format', '-t', help='(elf, raw or b64). File specified with "-c" is an ELF ("elf"), raw (raw) or base64-encoded (b64) binary', type=str, default='elf')
parser_info_coredump.add_argument('--off', '-o', help='Ofsset of coredump partition in flash (type "make partition_table" to see).', type=int, default=0x110000)
parser_info_coredump.add_argument('--save-core', '-s', help='Save core to file. Othwerwise temporary core file will be deleted. Does not work with "-c"', type=str)
parser_info_coredump.add_argument('--core-format', '-t', help='(elf, raw or b64). File specified with "-c" is an ELF ("elf"), '
'raw (raw) or base64-encoded (b64) binary', type=str, default='elf')
parser_info_coredump.add_argument('--off', '-o', help='Offset of coredump partition in flash (type '
'"make partition_table" to see).', type=int, default=0x110000)
parser_info_coredump.add_argument('--save-core', '-s', help='Save core to file. Othwerwise temporary core file will be deleted. '
'Does not work with "-c"', type=str)
parser_info_coredump.add_argument('--rom-elf', '-r', help='Path to ROM ELF file.', type=str, default='esp32_rom.elf')
parser_info_coredump.add_argument('--print-mem', '-m', help='Print memory dump', action='store_true')
parser_info_coredump.add_argument('prog', help='Path to program\'s ELF binary', type=str)

View File

@ -18,10 +18,14 @@ import sys
import os
import unittest
idf_path = os.getenv('IDF_PATH')
if idf_path:
sys.path.insert(0, os.path.join(idf_path, 'components', 'espcoredump'))
import espcoredump
try:
import espcoredump
except ImportError:
idf_path = os.getenv('IDF_PATH')
if idf_path:
sys.path.insert(0, os.path.join(idf_path, 'components', 'espcoredump'))
import espcoredump
class TestESPCoreDumpFileLoader(unittest.TestCase):
def setUp(self):
@ -34,15 +38,16 @@ class TestESPCoreDumpFileLoader(unittest.TestCase):
def testESPCoreDumpFileLoaderWithoutB64(self):
t = espcoredump.ESPCoreDumpFileLoader(path='coredump.b64', b64=False)
self.assertIsInstance(t, espcoredump.ESPCoreDumpFileLoader) # invoke for coverage of open()
self.assertIsInstance(t, espcoredump.ESPCoreDumpFileLoader) # invoke for coverage of open()
t.cleanup()
def test_cannot_remove_dir(self):
self.dloader.remove_tmp_file(fname='.') # silent failure (but covers exception inside)
self.dloader.remove_tmp_file(fname='.') # silent failure (but covers exception inside)
def test_create_corefile(self):
self.assertEqual(self.dloader.create_corefile(core_fname=self.tmp_file, off=0, rom_elf=None), self.tmp_file)
if __name__ == '__main__':
# The purpose of these tests is to increase the code coverage at places which are sensitive to issues related to
# Python 2&3 compatibility.

View File

@ -32,7 +32,7 @@ import binascii
import errno
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature
MD5_PARTITION_BEGIN = b"\xEB\xEB" + b"\xFF" * 14 # The first 2 bytes are like magic numbers for MD5 sum
MD5_PARTITION_BEGIN = b"\xEB\xEB" + b"\xFF" * 14 # The first 2 bytes are like magic numbers for MD5 sum
PARTITION_TABLE_SIZE = 0x1000 # Size of partition table
MIN_PARTITION_SUBTYPE_APP_OTA = 0x10
@ -44,25 +44,25 @@ APP_TYPE = 0x00
DATA_TYPE = 0x01
TYPES = {
"app" : APP_TYPE,
"data" : DATA_TYPE,
"app": APP_TYPE,
"data": DATA_TYPE,
}
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h
SUBTYPES = {
APP_TYPE : {
"factory" : 0x00,
"test" : 0x20,
APP_TYPE: {
"factory": 0x00,
"test": 0x20,
},
DATA_TYPE : {
"ota" : 0x00,
"phy" : 0x01,
"nvs" : 0x02,
"coredump" : 0x03,
"nvs_keys" : 0x04,
"esphttpd" : 0x80,
"fat" : 0x81,
"spiffs" : 0x82,
DATA_TYPE: {
"ota": 0x00,
"phy": 0x01,
"nvs": 0x02,
"coredump": 0x03,
"nvs_keys": 0x04,
"esphttpd": 0x80,
"fat": 0x81,
"spiffs": 0x82,
},
}
@ -71,16 +71,19 @@ md5sum = True
secure = False
offset_part_table = 0
def status(msg):
""" Print status message to stderr """
if not quiet:
critical(msg)
def critical(msg):
""" Print critical message to stderr """
sys.stderr.write(msg)
sys.stderr.write('\n')
class PartitionTable(list):
def __init__(self):
super(PartitionTable, self).__init__(self)
@ -102,15 +105,15 @@ class PartitionTable(list):
if line.startswith("#") or len(line) == 0:
continue
try:
res.append(PartitionDefinition.from_csv(line, line_no+1))
res.append(PartitionDefinition.from_csv(line, line_no + 1))
except InputError as e:
raise InputError("Error at line %d: %s" % (line_no+1, e))
raise InputError("Error at line %d: %s" % (line_no + 1, e))
except Exception:
critical("Unexpected error parsing CSV line %d: %s" % (line_no+1, line))
critical("Unexpected error parsing CSV line %d: %s" % (line_no + 1, line))
raise
# fix up missing offsets & negative sizes
last_end = offset_part_table + PARTITION_TABLE_SIZE # first offset after partition table
last_end = offset_part_table + PARTITION_TABLE_SIZE # first offset after partition table
for e in res:
if e.offset is not None and e.offset < last_end:
if e == res[0]:
@ -149,14 +152,14 @@ class PartitionTable(list):
ptype = TYPES[ptype]
except KeyError:
try:
ptypes = int(ptype, 0)
ptype = int(ptype, 0)
except TypeError:
pass
try:
subtype = SUBTYPES[int(ptype)][subtype]
except KeyError:
try:
ptypes = int(ptype, 0)
ptype = int(ptype, 0)
except TypeError:
pass
@ -175,11 +178,11 @@ class PartitionTable(list):
# verify each partition individually
for p in self:
p.verify()
# check on duplicate name
names = [ p.name for p in self ]
duplicates = set( n for n in names if names.count(n) > 1 )
names = [p.name for p in self]
duplicates = set(n for n in names if names.count(n) > 1)
# print sorted duplicate partitions by name
if len(duplicates) != 0:
print("A list of partitions that have the same name:")
@ -187,14 +190,14 @@ class PartitionTable(list):
if len(duplicates.intersection([p.name])) != 0:
print("%s" % (p.to_csv()))
raise InputError("Partition names must be unique")
# check for overlaps
last = None
for p in sorted(self, key=lambda x:x.offset):
if p.offset < offset_part_table + PARTITION_TABLE_SIZE:
raise InputError("Partition offset 0x%x is below 0x%x" % (p.offset, offset_part_table + PARTITION_TABLE_SIZE))
if last is not None and p.offset < last.offset + last.size:
raise InputError("Partition at 0x%x overlaps 0x%x-0x%x" % (p.offset, last.offset, last.offset+last.size-1))
raise InputError("Partition at 0x%x overlaps 0x%x-0x%x" % (p.offset, last.offset, last.offset + last.size - 1))
last = p
def flash_size(self):
@ -209,17 +212,17 @@ class PartitionTable(list):
@classmethod
def from_binary(cls, b):
md5 = hashlib.md5();
md5 = hashlib.md5()
result = cls()
for o in range(0,len(b),32):
data = b[o:o+32]
data = b[o:o + 32]
if len(data) != 32:
raise InputError("Partition table length must be a multiple of 32 bytes")
if data == b'\xFF'*32:
if data == b'\xFF' * 32:
return result # got end marker
if md5sum and data[:2] == MD5_PARTITION_BEGIN[:2]: #check only the magic number part
if md5sum and data[:2] == MD5_PARTITION_BEGIN[:2]: # check only the magic number part
if data[16:] == md5.digest():
continue # the next iteration will check for the end marker
continue # the next iteration will check for the end marker
else:
raise InputError("MD5 checksums don't match! (computed: 0x%s, parsed: 0x%s)" % (md5.hexdigest(), binascii.hexlify(data[16:])))
else:
@ -231,29 +234,30 @@ class PartitionTable(list):
result = b"".join(e.to_binary() for e in self)
if md5sum:
result += MD5_PARTITION_BEGIN + hashlib.md5(result).digest()
if len(result )>= MAX_PARTITION_LENGTH:
if len(result) >= MAX_PARTITION_LENGTH:
raise InputError("Binary partition table length (%d) longer than max" % len(result))
result += b"\xFF" * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
return result
def to_csv(self, simple_formatting=False):
rows = [ "# Espressif ESP32 Partition Table",
"# Name, Type, SubType, Offset, Size, Flags" ]
rows += [ x.to_csv(simple_formatting) for x in self ]
rows = ["# Espressif ESP32 Partition Table",
"# Name, Type, SubType, Offset, Size, Flags"]
rows += [x.to_csv(simple_formatting) for x in self]
return "\n".join(rows) + "\n"
class PartitionDefinition(object):
MAGIC_BYTES = b"\xAA\x50"
ALIGNMENT = {
APP_TYPE : 0x10000,
DATA_TYPE : 0x04,
APP_TYPE: 0x10000,
DATA_TYPE: 0x04,
}
# dictionary maps flag name (as used in CSV flags list, property name)
# to bit set in flags words in binary format
FLAGS = {
"encrypted" : 0
"encrypted": 0
}
# add subtypes for the 16 OTA slot values ("ota_XX, etc.")
@ -272,7 +276,7 @@ class PartitionDefinition(object):
def from_csv(cls, line, line_no):
""" Parse a line from the CSV """
line_w_defaults = line + ",,,," # lazy way to support default fields
fields = [ f.strip() for f in line_w_defaults.split(",") ]
fields = [f.strip() for f in line_w_defaults.split(",")]
res = PartitionDefinition()
res.line_no = line_no
@ -302,7 +306,7 @@ class PartitionDefinition(object):
def maybe_hex(x):
return "0x%x" % x if x is not None else "None"
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type, self.subtype or 0,
maybe_hex(self.offset), maybe_hex(self.size))
maybe_hex(self.offset), maybe_hex(self.size))
def __str__(self):
return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type, self.subtype, self.offset or -1, self.size or -1)
@ -329,7 +333,7 @@ class PartitionDefinition(object):
def parse_subtype(self, strval):
if strval == "":
return 0 # default
return 0 # default
return parse_int(strval, SUBTYPES.get(self.type, {}))
def parse_address(self, strval):
@ -353,12 +357,14 @@ class PartitionDefinition(object):
raise ValidationError(self, "Size field is not set")
if self.name in TYPES and TYPES.get(self.name, "") != self.type:
critical("WARNING: Partition has name '%s' which is a partition type, but does not match this partition's type (0x%x). Mistake in partition table?" % (self.name, self.type))
critical("WARNING: Partition has name '%s' which is a partition type, but does not match this partition's "
"type (0x%x). Mistake in partition table?" % (self.name, self.type))
all_subtype_names = []
for names in (t.keys() for t in SUBTYPES.values()):
all_subtype_names += names
if self.name in all_subtype_names and SUBTYPES.get(self.type, {}).get(self.name, "") != self.subtype:
critical("WARNING: Partition has name '%s' which is a partition subtype, but this partition has non-matching type 0x%x and subtype 0x%x. Mistake in partition table?" % (self.name, self.type, self.subtype))
critical("WARNING: Partition has name '%s' which is a partition subtype, but this partition has "
"non-matching type 0x%x and subtype 0x%x. Mistake in partition table?" % (self.name, self.type, self.subtype))
STRUCT_FORMAT = b"<2sBBLL16sL"
@ -369,21 +375,21 @@ class PartitionDefinition(object):
res = cls()
(magic, res.type, res.subtype, res.offset,
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b)
if b"\x00" in res.name: # strip null byte padding from name string
if b"\x00" in res.name: # strip null byte padding from name string
res.name = res.name[:res.name.index(b"\x00")]
res.name = res.name.decode()
if magic != cls.MAGIC_BYTES:
raise InputError("Invalid magic bytes (%r) for partition definition" % magic)
for flag,bit in cls.FLAGS.items():
if flags & (1<<bit):
if flags & (1 << bit):
setattr(res, flag, True)
flags &= ~(1<<bit)
flags &= ~(1 << bit)
if flags != 0:
critical("WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?" % flags)
return res
def get_flags_list(self):
return [ flag for flag in self.FLAGS.keys() if getattr(self, flag) ]
return [flag for flag in self.FLAGS.keys() if getattr(self, flag)]
def to_binary(self):
flags = sum((1 << self.FLAGS[flag]) for flag in self.get_flags_list())
@ -397,14 +403,14 @@ class PartitionDefinition(object):
def to_csv(self, simple_formatting=False):
def addr_format(a, include_sizes):
if not simple_formatting and include_sizes:
for (val, suffix) in [ (0x100000, "M"), (0x400, "K") ]:
for (val, suffix) in [(0x100000, "M"), (0x400, "K")]:
if a % val == 0:
return "%d%s" % (a // val, suffix)
return "0x%x" % a
def lookup_keyword(t, keywords):
for k,v in keywords.items():
if simple_formatting == False and t == v:
if simple_formatting is False and t == v:
return k
return "%d" % t
@ -412,12 +418,12 @@ class PartitionDefinition(object):
""" colon-delimited list of flags """
return ":".join(self.get_flags_list())
return ",".join([ self.name,
lookup_keyword(self.type, TYPES),
lookup_keyword(self.subtype, SUBTYPES.get(self.type, {})),
addr_format(self.offset, False),
addr_format(self.size, True),
generate_text_flags()])
return ",".join([self.name,
lookup_keyword(self.type, TYPES),
lookup_keyword(self.subtype, SUBTYPES.get(self.type, {})),
addr_format(self.offset, False),
addr_format(self.size, True),
generate_text_flags()])
def parse_int(v, keywords={}):
@ -425,7 +431,7 @@ def parse_int(v, keywords={}):
k/m/K/M suffixes and 'keyword' value lookup.
"""
try:
for letter, multiplier in [ ("k",1024), ("m",1024*1024) ]:
for letter, multiplier in [("k", 1024), ("m", 1024 * 1024)]:
if v.lower().endswith(letter):
return parse_int(v[:-1], keywords) * multiplier
return int(v, 0)
@ -437,6 +443,7 @@ def parse_int(v, keywords={}):
except KeyError:
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ", ".join(keywords)))
def main():
global quiet
global md5sum
@ -445,10 +452,11 @@ def main():
parser = argparse.ArgumentParser(description='ESP32 partition table utility')
parser.add_argument('--flash-size', help='Optional flash size limit, checks partition table fits in flash',
nargs='?', choices=[ '1MB', '2MB', '4MB', '8MB', '16MB' ])
nargs='?', choices=['1MB', '2MB', '4MB', '8MB', '16MB'])
parser.add_argument('--disable-md5sum', help='Disable md5 checksum for the partition table', default=False, action='store_true')
parser.add_argument('--no-verify', help="Don't verify partition table fields", action='store_true')
parser.add_argument('--verify', '-v', help="Verify partition table fields (deprecated, this behaviour is enabled by default and this flag does nothing.", action='store_true')
parser.add_argument('--verify', '-v', help="Verify partition table fields (deprecated, this behaviour is "
"enabled by default and this flag does nothing.", action='store_true')
parser.add_argument('--quiet', '-q', help="Don't print non-critical status messages to stderr", action='store_true')
parser.add_argument('--offset', '-o', help='Set offset partition table', default='0x8000')
parser.add_argument('--secure', help="Require app partitions to be suitable for secure boot", action='store_true')
@ -481,7 +489,8 @@ def main():
size = size_mb * 1024 * 1024 # flash memory uses honest megabytes!
table_size = table.flash_size()
if size < table_size:
raise InputError("Partitions defined in '%s' occupy %.1fMB of flash (%d bytes) which does not fit in configured flash size %dMB. Change the flash size in menuconfig under the 'Serial Flasher Config' menu." %
raise InputError("Partitions defined in '%s' occupy %.1fMB of flash (%d bytes) which does not fit in configured "
"flash size %dMB. Change the flash size in menuconfig under the 'Serial Flasher Config' menu." %
(args.input.name, table_size / 1024.0 / 1024.0, table_size, size_mb))
# Make sure that the output directory is created
@ -490,7 +499,7 @@ def main():
if not os.path.exists(output_dir):
try:
os.makedirs(output_dir)
except OSError as exc:
except OSError as exc:
if exc.errno != errno.EEXIST:
raise

View File

@ -8,8 +8,13 @@ import subprocess
import tempfile
import os
import io
sys.path.append("..")
from gen_esp32part import *
import re
try:
import gen_esp32part
except ImportError:
sys.path.append("..")
import gen_esp32part
SIMPLE_CSV = """
# Name,Type,SubType,Offset,Size,Flags
@ -22,21 +27,21 @@ LONGER_BINARY_TABLE = b""
LONGER_BINARY_TABLE += b"\xAA\x50\x00\x00" + \
b"\x00\x00\x01\x00" + \
b"\x00\x00\x10\x00" + \
b"factory\0" + (b"\0"*8) + \
b"factory\0" + (b"\0" * 8) + \
b"\x00\x00\x00\x00"
# type 0x01, subtype 0x20,
# offset 0x110000, size 128KB
LONGER_BINARY_TABLE += b"\xAA\x50\x01\x20" + \
b"\x00\x00\x11\x00" + \
b"\x00\x02\x00\x00" + \
b"data" + (b"\0"*12) + \
b"data" + (b"\0" * 12) + \
b"\x00\x00\x00\x00"
# type 0x10, subtype 0x00,
# offset 0x150000, size 1MB
LONGER_BINARY_TABLE += b"\xAA\x50\x10\x00" + \
b"\x00\x00\x15\x00" + \
b"\x00\x10\x00\x00" + \
b"second" + (b"\0"*10) + \
b"second" + (b"\0" * 10) + \
b"\x00\x00\x00\x00"
# MD5 checksum
LONGER_BINARY_TABLE += b"\xEB\xEB" + b"\xFF" * 14
@ -49,10 +54,11 @@ def _strip_trailing_ffs(binary_table):
"""
Strip all FFs down to the last 32 bytes (terminating entry)
"""
while binary_table.endswith(b"\xFF"*64):
binary_table = binary_table[0:len(binary_table)-32]
while binary_table.endswith(b"\xFF" * 64):
binary_table = binary_table[0:len(binary_table) - 32]
return binary_table
class Py23TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
@ -64,10 +70,11 @@ class Py23TestCase(unittest.TestCase):
# This fix is used in order to avoid using the alias from the six library
self.assertRaisesRegex = self.assertRaisesRegexp
class CSVParserTests(Py23TestCase):
def test_simple_partition(self):
table = PartitionTable.from_csv(SIMPLE_CSV)
table = gen_esp32part.PartitionTable.from_csv(SIMPLE_CSV)
self.assertEqual(len(table), 1)
self.assertEqual(table[0].name, "factory")
self.assertEqual(table[0].type, 0)
@ -75,15 +82,13 @@ class CSVParserTests(Py23TestCase):
self.assertEqual(table[0].offset, 65536)
self.assertEqual(table[0].size, 1048576)
def test_require_type(self):
csv = """
# Name,Type, SubType,Offset,Size
ihavenotype,
"""
with self.assertRaisesRegex(InputError, "type"):
PartitionTable.from_csv(csv)
with self.assertRaisesRegex(gen_esp32part.InputError, "type"):
gen_esp32part.PartitionTable.from_csv(csv)
def test_type_subtype_names(self):
csv_magicnumbers = """
@ -106,9 +111,9 @@ myota_status, data, ota,, 0x100000
"""
# make two equivalent partition tables, one using
# magic numbers and one using shortcuts. Ensure they match
magic = PartitionTable.from_csv(csv_magicnumbers)
magic = gen_esp32part.PartitionTable.from_csv(csv_magicnumbers)
magic.verify()
nomagic = PartitionTable.from_csv(csv_nomagicnumbers)
nomagic = gen_esp32part.PartitionTable.from_csv(csv_nomagicnumbers)
nomagic.verify()
self.assertEqual(nomagic["myapp"].type, 0)
@ -121,17 +126,17 @@ myota_status, data, ota,, 0x100000
self.assertEqual(nomagic["mytest"], magic["mytest"])
self.assertEqual(nomagic["myota_status"], magic["myota_status"])
#self.assertEqual(nomagic.to_binary(), magic.to_binary())
# self.assertEqual(nomagic.to_binary(), magic.to_binary())
def test_unit_suffixes(self):
csv = """
# Name, Type, Subtype, Offset, Size
one_megabyte, app, factory, 64k, 1M
"""
t = PartitionTable.from_csv(csv)
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].offset, 64*1024)
self.assertEqual(t[0].size, 1*1024*1024)
self.assertEqual(t[0].offset, 64 * 1024)
self.assertEqual(t[0].size, 1 * 1024 * 1024)
def test_default_offsets(self):
csv = """
@ -141,17 +146,17 @@ second, data, 0x15,, 1M
minidata, data, 0x40,, 32K
otherapp, app, factory,, 1M
"""
t = PartitionTable.from_csv(csv)
t = gen_esp32part.PartitionTable.from_csv(csv)
# 'first'
self.assertEqual(t[0].offset, 0x010000) # 64KB boundary as it's an app image
self.assertEqual(t[0].size, 0x100000) # Size specified in CSV
self.assertEqual(t[0].offset, 0x010000) # 64KB boundary as it's an app image
self.assertEqual(t[0].size, 0x100000) # Size specified in CSV
# 'second'
self.assertEqual(t[1].offset, 0x110000) # prev offset+size
self.assertEqual(t[1].size, 0x100000) # Size specified in CSV
self.assertEqual(t[1].offset, 0x110000) # prev offset+size
self.assertEqual(t[1].size, 0x100000) # Size specified in CSV
# 'minidata'
self.assertEqual(t[2].offset, 0x210000)
# 'otherapp'
self.assertEqual(t[3].offset, 0x220000) # 64KB boundary as it's an app image
self.assertEqual(t[3].offset, 0x220000) # 64KB boundary as it's an app image
def test_negative_size_to_offset(self):
csv = """
@ -159,21 +164,21 @@ otherapp, app, factory,, 1M
first, app, factory, 0x10000, -2M
second, data, 0x15, , 1M
"""
t = PartitionTable.from_csv(csv)
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
# 'first'
self.assertEqual(t[0].offset, 0x10000) # in CSV
self.assertEqual(t[0].size, 0x200000 - t[0].offset) # Up to 2M
self.assertEqual(t[0].offset, 0x10000) # in CSV
self.assertEqual(t[0].size, 0x200000 - t[0].offset) # Up to 2M
# 'second'
self.assertEqual(t[1].offset, 0x200000) # prev offset+size
self.assertEqual(t[1].offset, 0x200000) # prev offset+size
def test_overlapping_offsets_fail(self):
csv = """
first, app, factory, 0x100000, 2M
second, app, ota_0, 0x200000, 1M
"""
with self.assertRaisesRegex(InputError, "overlap"):
t = PartitionTable.from_csv(csv)
with self.assertRaisesRegex(gen_esp32part.InputError, "overlap"):
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
def test_unique_name_fail(self):
@ -181,23 +186,24 @@ second, app, ota_0, 0x200000, 1M
first, app, factory, 0x100000, 1M
first, app, ota_0, 0x200000, 1M
"""
with self.assertRaisesRegex(InputError, "Partition names must be unique"):
t = PartitionTable.from_csv(csv)
with self.assertRaisesRegex(gen_esp32part.InputError, "Partition names must be unique"):
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
class BinaryOutputTests(Py23TestCase):
def test_binary_entry(self):
csv = """
first, 0x30, 0xEE, 0x100400, 0x300000
"""
t = PartitionTable.from_csv(csv)
t = gen_esp32part.PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 64+32)
self.assertEqual(b'\xAA\x50', tb[0:2]) # magic
self.assertEqual(b'\x30\xee', tb[2:4]) # type, subtype
self.assertEqual(len(tb), 64 + 32)
self.assertEqual(b'\xAA\x50', tb[0:2]) # magic
self.assertEqual(b'\x30\xee', tb[2:4]) # type, subtype
eo, es = struct.unpack("<LL", tb[4:12])
self.assertEqual(eo, 0x100400) # offset
self.assertEqual(es, 0x300000) # size
self.assertEqual(eo, 0x100400) # offset
self.assertEqual(es, 0x300000) # size
self.assertEqual(b"\xEB\xEB" + b"\xFF" * 14, tb[32:48])
self.assertEqual(b'\x43\x03\x3f\x33\x40\x87\x57\x51\x69\x83\x9b\x40\x61\xb1\x27\x26', tb[48:64])
@ -206,22 +212,21 @@ first, 0x30, 0xEE, 0x100400, 0x300000
first, 0x30, 0xEE, 0x100400, 0x300000
second,0x31, 0xEF, , 0x100000
"""
t = PartitionTable.from_csv(csv)
t = gen_esp32part.PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 96+32)
self.assertEqual(len(tb), 96 + 32)
self.assertEqual(b'\xAA\x50', tb[0:2])
self.assertEqual(b'\xAA\x50', tb[32:34])
def test_encrypted_flag(self):
csv = """
# Name, Type, Subtype, Offset, Size, Flags
first, app, factory,, 1M, encrypted
"""
t = PartitionTable.from_csv(csv)
t = gen_esp32part.PartitionTable.from_csv(csv)
self.assertTrue(t[0].encrypted)
tb = _strip_trailing_ffs(t.to_binary())
tr = PartitionTable.from_binary(tb)
tr = gen_esp32part.PartitionTable.from_binary(tb)
self.assertTrue(tr[0].encrypted)
@ -237,11 +242,11 @@ class BinaryParserTests(Py23TestCase):
b"\xFF" * 32
# verify that parsing 32 bytes as a table
# or as a single Definition are the same thing
t = PartitionTable.from_binary(entry)
t = gen_esp32part.PartitionTable.from_binary(entry)
self.assertEqual(len(t), 1)
t[0].verify()
e = PartitionDefinition.from_binary(entry[:32])
e = gen_esp32part.PartitionDefinition.from_binary(entry[:32])
self.assertEqual(t[0], e)
e.verify()
@ -252,14 +257,14 @@ class BinaryParserTests(Py23TestCase):
self.assertEqual(e.name, "0123456789abc")
def test_multiple_entries(self):
t = PartitionTable.from_binary(LONGER_BINARY_TABLE)
t = gen_esp32part.PartitionTable.from_binary(LONGER_BINARY_TABLE)
t.verify()
self.assertEqual(3, len(t))
self.assertEqual(t[0].type, APP_TYPE)
self.assertEqual(t[0].type, gen_esp32part.APP_TYPE)
self.assertEqual(t[0].name, "factory")
self.assertEqual(t[1].type, DATA_TYPE)
self.assertEqual(t[1].type, gen_esp32part.DATA_TYPE)
self.assertEqual(t[1].name, "data")
self.assertEqual(t[2].type, 0x10)
@ -274,16 +279,16 @@ class BinaryParserTests(Py23TestCase):
b"\x00\x00\x20\x00" + \
b"0123456789abc\0\0\0" + \
b"\x00\x00\x00\x00"
with self.assertRaisesRegex(InputError, "Invalid magic bytes"):
PartitionTable.from_binary(bad_magic)
with self.assertRaisesRegex(gen_esp32part.InputError, "Invalid magic bytes"):
gen_esp32part.PartitionTable.from_binary(bad_magic)
def test_bad_length(self):
bad_length = b"OHAI" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789"
with self.assertRaisesRegex(InputError, "32 bytes"):
PartitionTable.from_binary(bad_length)
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789"
with self.assertRaisesRegex(gen_esp32part.InputError, "32 bytes"):
gen_esp32part.PartitionTable.from_binary(bad_length)
class CSVOutputTests(Py23TestCase):
@ -292,7 +297,7 @@ class CSVOutputTests(Py23TestCase):
return list(csv.reader(source_str.split("\n")))
def test_output_simple_formatting(self):
table = PartitionTable.from_csv(SIMPLE_CSV)
table = gen_esp32part.PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(True)
c = self._readcsv(as_csv)
# first two lines should start with comments
@ -302,15 +307,15 @@ class CSVOutputTests(Py23TestCase):
self.assertEqual(row[0], "factory")
self.assertEqual(row[1], "0")
self.assertEqual(row[2], "2")
self.assertEqual(row[3], "0x10000") # reformatted as hex
self.assertEqual(row[4], "0x100000") # also hex
self.assertEqual(row[3], "0x10000") # reformatted as hex
self.assertEqual(row[4], "0x100000") # also hex
# round trip back to a PartitionTable and check is identical
roundtrip = PartitionTable.from_csv(as_csv)
roundtrip = gen_esp32part.PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
def test_output_smart_formatting(self):
table = PartitionTable.from_csv(SIMPLE_CSV)
table = gen_esp32part.PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(False)
c = self._readcsv(as_csv)
# first two lines should start with comments
@ -324,9 +329,10 @@ class CSVOutputTests(Py23TestCase):
self.assertEqual(row[4], "1M")
# round trip back to a PartitionTable and check is identical
roundtrip = PartitionTable.from_csv(as_csv)
roundtrip = gen_esp32part.PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
class CommandLineTests(Py23TestCase):
def test_basic_cmdline(self):
@ -340,11 +346,11 @@ class CommandLineTests(Py23TestCase):
# run gen_esp32part.py to convert binary file to CSV
output = subprocess.check_output([sys.executable, "../gen_esp32part.py",
binpath, csvpath], stderr=subprocess.STDOUT)
binpath, csvpath], stderr=subprocess.STDOUT)
# reopen the CSV and check the generated binary is identical
self.assertNotIn(b"WARNING", output)
with open(csvpath, 'r') as f:
from_csv = PartitionTable.from_csv(f.read())
from_csv = gen_esp32part.PartitionTable.from_csv(f.read())
self.assertEqual(_strip_trailing_ffs(from_csv.to_binary()), LONGER_BINARY_TABLE)
# run gen_esp32part.py to conver the CSV to binary again
@ -372,30 +378,29 @@ class VerificationTests(Py23TestCase):
# Name,Type, SubType,Offset,Size
app,app, factory, 32K, 1M
"""
with self.assertRaisesRegex(ValidationError,
r"Offset.+not aligned"):
t = PartitionTable.from_csv(csv)
with self.assertRaisesRegex(gen_esp32part.ValidationError, r"Offset.+not aligned"):
t = gen_esp32part.PartitionTable.from_csv(csv)
t.verify()
def test_warnings(self):
try:
sys.stderr = io.StringIO() # capture stderr
csv_1 = "app, 1, 2, 32K, 1M\n"
PartitionTable.from_csv(csv_1).verify()
gen_esp32part.PartitionTable.from_csv(csv_1).verify()
self.assertIn("WARNING", sys.stderr.getvalue())
self.assertIn("partition type", sys.stderr.getvalue())
sys.stderr = io.StringIO()
csv_2 = "ota_0, app, ota_1, , 1M\n"
PartitionTable.from_csv(csv_2).verify()
gen_esp32part.PartitionTable.from_csv(csv_2).verify()
self.assertIn("WARNING", sys.stderr.getvalue())
self.assertIn("partition subtype", sys.stderr.getvalue())
finally:
sys.stderr = sys.__stderr__
class PartToolTests(Py23TestCase):
def _run_parttool(self, csvcontents, args, info):
@ -403,8 +408,9 @@ class PartToolTests(Py23TestCase):
with open(csvpath, "w") as f:
f.write(csvcontents)
try:
output = subprocess.check_output([sys.executable, "../parttool.py"] + args.split(" ")
+ ["--partition-table-file", csvpath , "get_partition_info", "--info", info], stderr=subprocess.STDOUT)
output = subprocess.check_output([sys.executable, "../parttool.py"] + args.split(" ")
+ ["--partition-table-file", csvpath, "get_partition_info", "--info", info],
stderr=subprocess.STDOUT)
self.assertNotIn(b"WARNING", output)
m = re.search(b"0x[0-9a-fA-F]+", output)
return m.group(0) if m else ""
@ -418,7 +424,9 @@ otadata, data, ota, 0xd000, 0x2000
phy_init, data, phy, 0xf000, 0x1000
factory, app, factory, 0x10000, 1M
"""
rpt = lambda args, info: self._run_parttool(csv, args, info)
def rpt(args, info):
return self._run_parttool(csv, args, info)
self.assertEqual(
rpt("--partition-type=data --partition-subtype=nvs -q", "offset"), b"0x9000")
@ -437,7 +445,9 @@ phy_init, data, phy, 0xf000, 0x1000
ota_0, app, ota_0, 0x30000, 1M
ota_1, app, ota_1, , 1M
"""
rpt = lambda args, info: self._run_parttool(csv, args, info)
def rpt(args, info):
return self._run_parttool(csv, args, info)
self.assertEqual(
rpt("--partition-type=app --partition-subtype=ota_1 -q", "offset"), b"0x130000")
@ -448,5 +458,6 @@ ota_1, app, ota_1, , 1M
self._run_parttool(csv_mod, "--partition-boot-default -q", "offset"),
b"0x130000") # now default is ota_1
if __name__ =="__main__":
if __name__ == "__main__":
unittest.main()

View File

@ -7,27 +7,28 @@
from optparse import OptionParser
BASE_ADDR = 0x50000000;
BASE_ADDR = 0x50000000
def gen_ld_h_from_sym(f_sym, f_ld, f_h):
f_ld.write("/* Variable definitions for ESP32ULP linker\n");
f_ld.write(" * This file is generated automatically by esp32ulp_mapgen.py utility.\n");
f_ld.write(" */\n\n");
f_h.write("// Variable definitions for ESP32ULP\n");
f_h.write("// This file is generated automatically by esp32ulp_mapgen.py utility\n\n");
f_h.write("#pragma once\n\n");
f_ld.write("/* Variable definitions for ESP32ULP linker\n")
f_ld.write(" * This file is generated automatically by esp32ulp_mapgen.py utility.\n")
f_ld.write(" */\n\n")
f_h.write("// Variable definitions for ESP32ULP\n")
f_h.write("// This file is generated automatically by esp32ulp_mapgen.py utility\n\n")
f_h.write("#pragma once\n\n")
for line in f_sym:
for line in f_sym:
name, _, addr_str = line.split()
addr = int(addr_str, 16) + BASE_ADDR;
f_h.write("extern uint32_t ulp_{0};\n".format(name));
addr = int(addr_str, 16) + BASE_ADDR
f_h.write("extern uint32_t ulp_{0};\n".format(name))
f_ld.write("PROVIDE ( ulp_{0} = 0x{1:08x} );\n".format(name, addr))
def main():
description = ( "This application generates .h and .ld files for symbols defined in input file. "
"The input symbols file can be generated using nm utility like this: "
"esp32-ulp-nm -g -f posix <elf_file> > <symbols_file>" );
description = ("This application generates .h and .ld files for symbols defined in input file. "
"The input symbols file can be generated using nm utility like this: "
"esp32-ulp-nm -g -f posix <elf_file> > <symbols_file>")
parser = OptionParser(description=description)
parser.add_option("-s", "--symfile", dest="symfile",
@ -44,11 +45,10 @@ def main():
parser.print_help()
return 1
with open(options.outputfile + ".h", 'w') as f_h, \
open(options.outputfile + ".ld", 'w') as f_ld, \
open(options.symfile) as f_sym: \
with open(options.outputfile + ".h", 'w') as f_h, open(options.outputfile + ".ld", 'w') as f_ld, open(options.symfile) as f_sym:
gen_ld_h_from_sym(f_sym, f_ld, f_h)
return 0
if __name__ == "__main__":
exit(main());
exit(main())