Merge branch 'test/support_multi_stage_unit_test_case' into 'master'

unit-test-app: support multi stage unit test case

See merge request idf/esp-idf!2139
This commit is contained in:
Ivan Grokhotkov 2018-04-03 18:20:16 +08:00
commit d7c8866896
9 changed files with 280 additions and 51 deletions

View file

@ -84,6 +84,30 @@ DUT2 (slave) console::
Once the signal is set from DUT2, you need to press "Enter" on DUT1, then DUT1 unblocks from ``unity_wait_for_signal`` and starts to change GPIO level.
Add multiple stages test cases
-------------------------------
The normal test cases are expected to finish without reset (or only need to check if reset happens). Sometimes we want to run some specific test after certain kinds of reset.
For example, we want to test if reset reason is correct after wakeup from deep sleep. We need to create deep sleep reset first and then check the reset reason.
To support this, we can define multiple stages test case, to group a set of test functions together::
static void trigger_deepsleep(void)
{
esp_sleep_enable_timer_wakeup(2000);
esp_deep_sleep_start();
}
void check_deepsleep_reset_reason()
{
RESET_REASON reason = rtc_get_reset_reason(0);
TEST_ASSERT(reason == DEEPSLEEP_RESET);
}
TEST_CASE_MULTIPLE_STAGES("reset reason check for deepsleep", "[esp32]", trigger_deepsleep, check_deepsleep_reset_reason);
Multiple stages test cases present a group of test functions to users. It need user interactions (select case and select different stages) to run the case.
Building unit test app
----------------------
@ -123,7 +147,7 @@ When unit test app is idle, press "Enter" will make it print test menu with all
(10) "global initializers run in the correct order" [cxx]
(11) "before scheduler has started, static initializers work correctly" [cxx]
(12) "adc2 work with wifi" [adc]
(13) "gpio master/slave test example" [ignore][misc][test_env=UT_T2_1]
(13) "gpio master/slave test example" [ignore][misc][test_env=UT_T2_1][multi_device]
(1) "gpio_master_test"
(2) "gpio_slave_test"
(14) "SPI Master clockdiv calculation routines" [spi]
@ -132,6 +156,9 @@ When unit test app is idle, press "Enter" will make it print test menu with all
(17) "SPI Master no response when switch from host1 (HSPI) to host2 (VSPI)" [spi]
(18) "SPI Master DMA test, TX and RX in different regions" [spi]
(19) "SPI Master DMA test: length, start, not aligned" [spi]
(20) "reset reason check for deepsleep" [esp32][test_env=UT_T2_1][multi_stage]
(1) "trigger_deepsleep"
(2) "check_deepsleep_reset_reason"
Normal case will print the case name and description. Master slave cases will also print the sub-menu (the registered test function names).
@ -145,7 +172,10 @@ Test cases can be run by inputting one of the following:
- An asterisk to run all test cases
After you select multiple devices test case, it will print sub menu::
``[multi_device]`` and ``[multi_stage]`` tags tell the test runner whether a test case is a multiple devices or multiple stages test case.
These tags are automatically added by ```TEST_CASE_MULTIPLE_STAGES`` and ``TEST_CASE_MULTIPLE_DEVICES`` macros.
After you select a multiple devices test case, it will print sub menu::
Running gpio master/slave test example...
gpio master/slave test example
@ -153,3 +183,14 @@ After you select multiple devices test case, it will print sub menu::
(2) "gpio_slave_test"
You need to input number to select the test running on the DUT.
Similar to multiple devices test cases, multiple stages test cases will also print sub menu::
Running reset reason check for deepsleep...
reset reason check for deepsleep
(1) "trigger_deepsleep"
(2) "check_deepsleep_reset_reason"
First time you execute this case, input ``1`` to run first stage (trigger deepsleep).
After DUT is rebooted and able to run test cases, select this case again and input ``2`` to run the second stage.
The case only passes if the last stage passes and all previous stages trigger reset.

View file

@ -17,7 +17,8 @@ from Utility import CIAssignTest
class Group(CIAssignTest.Group):
SORT_KEYS = ["Test App", "SDK", "test environment"]
SORT_KEYS = ["Test App", "SDK", "test environment", "multi_device", "multi_stage"]
CI_JOB_MATCH_KEYS = ["Test App", "SDK", "test environment"]
MAX_CASE = 30
ATTR_CONVERT_TABLE = {
"execution_time": "execution time"
@ -36,35 +37,53 @@ class Group(CIAssignTest.Group):
assert test_app[:3] == "UT_"
return test_app[3:]
def _create_extra_data(self):
def _create_extra_data(self, test_function):
case_data = []
for case in self.case_list:
if self._get_case_attr(case, "cmd set") == "multiple_devices_case":
case_data.append({
"config": self._get_ut_config(self._get_case_attr(case, "Test App")),
"name": self._get_case_attr(case, "summary"),
"child case num": self._get_case_attr(case, "child case num")
})
else:
case_data.append({
"config": self._get_ut_config(self._get_case_attr(case, "Test App")),
"name": self._get_case_attr(case, "summary"),
"reset": self._get_case_attr(case, "reset") ,
})
one_case_data = {
"config": self._get_ut_config(self._get_case_attr(case, "Test App")),
"name": self._get_case_attr(case, "summary"),
"reset": self._get_case_attr(case, "reset"),
}
if test_function in ["run_multiple_devices_cases", "run_multiple_stage_cases"]:
try:
one_case_data["child case num"] = self._get_case_attr(case, "child case num")
except KeyError as e:
print("multiple devices/stages cases must contains at least two test functions")
print("case name: {}".format(one_case_data["name"]))
raise e
case_data.append(one_case_data)
return case_data
def _map_test_function(self):
"""
determine which test function to use according to current test case
:return: test function name to use
"""
if self.filters["multi_device"] == "Yes":
test_function = "run_multiple_devices_cases"
elif self.filters["multi_stage"] == "Yes":
test_function = "run_multiple_stage_cases"
else:
test_function = "run_unit_test_cases"
return test_function
def output(self):
"""
output data for job configs
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
test_function = self._map_test_function()
output_data = {
# we don't need filter for test function, as UT uses a few test functions for all cases
"CaseConfig": [
{
"name": self.case_list[0]["cmd set"] if isinstance(self.case_list[0]["cmd set"], str) else self.case_list[0]["cmd set"][0],
"extra_data": self._create_extra_data(),
"name": test_function,
"extra_data": self._create_extra_data(test_function),
}
]
}

View file

@ -53,11 +53,18 @@ class Group(object):
MAX_EXECUTION_TIME = 30
MAX_CASE = 15
SORT_KEYS = ["env_tag"]
# Matching CI job rules could be different from the way we want to group test cases.
# For example, when assign unit test cases, different test cases need to use different test functions.
# We need to put them into different groups.
# But these groups can be assigned to jobs with same tags, as they use the same test environment.
CI_JOB_MATCH_KEYS = SORT_KEYS
def __init__(self, case):
self.execution_time = 0
self.case_list = [case]
self.filters = dict(zip(self.SORT_KEYS, [self._get_case_attr(case, x) for x in self.SORT_KEYS]))
self.ci_job_match_keys = dict(zip(self.CI_JOB_MATCH_KEYS,
[self._get_case_attr(case, x) for x in self.CI_JOB_MATCH_KEYS]))
@staticmethod
def _get_case_attr(case, attr):

View file

@ -41,7 +41,7 @@ class Job(dict):
if "case group" in self:
# this job is already assigned
break
for value in group.filters.values():
for value in group.ci_job_match_keys.values():
if value not in self["tags"]:
break
else:

View file

@ -106,6 +106,34 @@ void unity_run_all_tests();
static void UNITY_TEST_UID(test_func_) (void)
/*
* Multiple stages test cases will handle the case that test steps are separated by DUT reset.
* e.g: we want to verify some function after SW reset, WDT reset or deep sleep reset.
*
* First argument is a free-form description,
* second argument is (by convention) a list of identifiers, each one in square brackets.
* subsequent arguments are names test functions separated by reset.
* e.g:
* TEST_CASE_MULTIPLE_STAGES("run light sleep after deep sleep","[sleep]", goto_deepsleep, light_sleep_after_deep_sleep_wakeup);
* */
#define TEST_CASE_MULTIPLE_STAGES(name_, desc_, ...) \
UNITY_TEST_FN_SET(__VA_ARGS__); \
static void __attribute__((constructor)) UNITY_TEST_UID(test_reg_helper_) () \
{ \
static struct test_desc_t UNITY_TEST_UID(test_desc_) = { \
.name = name_, \
.desc = desc_"[multi_stage]", \
.fn = UNITY_TEST_UID(test_functions), \
.file = __FILE__, \
.line = __LINE__, \
.test_fn_count = PP_NARG(__VA_ARGS__), \
.test_fn_name = UNITY_TEST_UID(test_fn_name), \
.next = NULL \
}; \
unity_testcase_register( & UNITY_TEST_UID(test_desc_) ); \
}
/*
* First argument is a free-form description,
* second argument is (by convention) a list of identifiers, each one in square brackets.
@ -120,7 +148,7 @@ void unity_run_all_tests();
{ \
static struct test_desc_t UNITY_TEST_UID(test_desc_) = { \
.name = name_, \
.desc = desc_, \
.desc = desc_"[multi_device]", \
.fn = UNITY_TEST_UID(test_functions), \
.file = __FILE__, \
.line = __LINE__, \
@ -130,6 +158,7 @@ void unity_run_all_tests();
}; \
unity_testcase_register( & UNITY_TEST_UID(test_desc_) ); \
}
/**
* Note: initialization of test_desc_t fields above has to be done exactly
* in the same order as the fields are declared in the structure.

View file

@ -145,13 +145,13 @@ void unity_testcase_register(struct test_desc_t* desc)
}
}
/* print the multiple devices case name and its sub-menu
/* print the multiple function case name and its sub-menu
* e.g:
* (1) spi master/slave case
* (1)master case
* (2)slave case
* */
static void print_multiple_devices_test_menu(const struct test_desc_t* test_ms)
static void print_multiple_function_test_menu(const struct test_desc_t* test_ms)
{
unity_printf("%s\n", test_ms->name);
for (int i = 0; i < test_ms->test_fn_count; i++)
@ -160,12 +160,12 @@ static void print_multiple_devices_test_menu(const struct test_desc_t* test_ms)
}
}
void multiple_devices_option(const struct test_desc_t* test_ms)
void multiple_function_option(const struct test_desc_t* test_ms)
{
int selection;
char cmdline[256] = {0};
print_multiple_devices_test_menu(test_ms);
print_multiple_function_test_menu(test_ms);
while(strlen(cmdline) == 0)
{
/* Flush anything already in the RX buffer */
@ -175,7 +175,7 @@ void multiple_devices_option(const struct test_desc_t* test_ms)
UartRxString((uint8_t*) cmdline, sizeof(cmdline) - 1);
if(strlen(cmdline) == 0) {
/* if input was newline, print a new menu */
print_multiple_devices_test_menu(test_ms);
print_multiple_function_test_menu(test_ms);
}
}
selection = atoi((const char *) cmdline) - 1;
@ -194,7 +194,7 @@ static void unity_run_single_test(const struct test_desc_t* test)
if(test->test_fn_count == 1) {
UnityDefaultTestRun(test->fn[0], test->name, test->line);
} else {
multiple_devices_option(test);
multiple_function_option(test);
}
}

View file

@ -8,4 +8,10 @@ test_env:
omitted: "UT_T1_1"
reset:
default: "POWERON_RESET"
omitted: " "
omitted: " "
multi_device:
default: "Yes"
omitted: "No"
multi_stage:
default: "Yes"
omitted: "No"

View file

@ -103,9 +103,7 @@ class Parser(object):
self.test_env_tags.update({tc["test environment"]: [tc["ID"]]})
if function_count > 1:
tc.update({"cmd set": "multiple_devices_case",
"child case num": function_count})
del tc['reset']
tc.update({"child case num": function_count})
# only add cases need to be executed
test_cases.append(tc)
@ -191,7 +189,9 @@ class Parser(object):
"test environment": prop["test_env"],
"reset": prop["reset"],
"sub module": self.module_map[prop["module"]]['sub module'],
"summary": name})
"summary": name,
"multi_device": prop["multi_device"],
"multi_stage": prop["multi_stage"]})
return test_case
def dump_test_cases(self, test_cases):

View file

@ -23,8 +23,13 @@ from IDF.IDFApp import UT
UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
RESET_PATTERN = re.compile(r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))")
EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
ABORT_PATTERN = re.compile(r"(abort\(\) was called at PC 0x[a-eA-E\d]{8} on core \d)")
FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
UT_TIMEOUT = 30
def format_test_case_config(test_case_data):
"""
convert the test case data to unified format.
@ -101,7 +106,7 @@ def format_test_case_config(test_case_data):
@TinyFW.test_method(app=UT, dut=IDF.IDFDUT, chip="ESP32", module="unit_test",
execution_time=1, env_tag="UT_T1_1")
def test_unit_test_case(env, extra_data):
def run_unit_test_cases(env, extra_data):
"""
extra_data can be three types of value
1. as string:
@ -119,12 +124,6 @@ def test_unit_test_case(env, extra_data):
case_config = format_test_case_config(extra_data)
# compile the patterns for expect only once
reset_pattern = re.compile(r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))")
exception_pattern = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
abort_pattern = re.compile(r"(abort\(\) was called at PC 0x[a-eA-E\d]{8} on core \d)")
finish_pattern = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
# we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing
# Before exit test function this flag is used to log if the case fails
@ -199,11 +198,11 @@ def test_unit_test_case(env, extra_data):
while not test_finish:
try:
dut.expect_any((reset_pattern, handle_exception_reset), # reset pattern
(exception_pattern, handle_exception_reset), # exception pattern
(abort_pattern, handle_exception_reset), # abort pattern
(finish_pattern, handle_test_finish), # test finish pattern
(UT_APP_BOOT_UP_DONE, handle_reset_finish), # reboot finish pattern
dut.expect_any((RESET_PATTERN, handle_exception_reset),
(EXCEPTION_PATTERN, handle_exception_reset),
(ABORT_PATTERN, handle_exception_reset),
(FINISH_PATTERN, handle_test_finish),
(UT_APP_BOOT_UP_DONE, handle_reset_finish),
timeout=UT_TIMEOUT)
except ExpectTimeout:
Utility.console_log("Timeout in expect", color="orange")
@ -340,7 +339,7 @@ def case_run(duts, ut_config, env, one_case, failed_cases):
@TinyFW.test_method(app=UT, dut=IDF.IDFDUT, chip="ESP32", module="master_slave_test_case", execution_time=1,
env_tag="UT_T2_1")
def multiple_devices_case(env, extra_data):
def run_multiple_devices_cases(env, extra_data):
"""
extra_data can be two types of value
1. as dict:
@ -374,11 +373,139 @@ def multiple_devices_case(env, extra_data):
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
@TinyFW.test_method(app=UT, dut=IDF.IDFDUT, chip="ESP32", module="unit_test",
execution_time=1, env_tag="UT_T1_1")
def run_multiple_stage_cases(env, extra_data):
"""
extra_data can be 2 types of value
1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
3. as list of string or dict:
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
:param extra_data: the case name or case list or case dictionary
:return: None
"""
case_config = format_test_case_config(extra_data)
# we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing
# Before exit test function this flag is used to log if the case fails
failed_cases = []
for ut_config in case_config:
dut = env.get_dut("unit-test-app", app_path=ut_config)
dut.start_app()
for one_case in case_config[ut_config]:
dut.reset()
dut.write("-", flush=False)
dut.expect_any(UT_APP_BOOT_UP_DONE,
"0 Tests 0 Failures 0 Ignored")
exception_reset_list = []
for test_stage in range(one_case["child case num"]):
# select multi stage test case name
dut.write("\"{}\"".format(one_case["name"]))
dut.expect("Running " + one_case["name"] + "...")
# select test function for current stage
dut.write(str(test_stage + 1))
# we want to set this flag in callbacks (inner functions)
# use list here so we can use append to set this flag
stage_finish = list()
def last_stage():
return test_stage == one_case["child case num"] - 1
def check_reset():
if one_case["reset"]:
assert exception_reset_list # reboot but no exception/reset logged. should never happen
result = False
if len(one_case["reset"]) == len(exception_reset_list):
for i, exception in enumerate(exception_reset_list):
if one_case["reset"][i] not in exception:
break
else:
result = True
if not result:
Utility.console_log("""Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}"""
.format(one_case["reset"], exception_reset_list),
color="orange")
else:
# we allow omit reset in multi stage cases
result = True
return result
# expect callbacks
def one_case_finish(result):
""" one test finished, let expect loop break and log result """
# handle test finish
result = result and check_reset()
if result:
Utility.console_log("Success: " + one_case["name"], color="green")
else:
failed_cases.append(one_case["name"])
Utility.console_log("Failed: " + one_case["name"], color="red")
stage_finish.append("break")
def handle_exception_reset(data):
"""
just append data to exception list.
exception list will be checked in ``handle_reset_finish``, once reset finished.
"""
exception_reset_list.append(data[0])
def handle_test_finish(data):
""" test finished without reset """
# in this scenario reset should not happen
if int(data[1]):
# case ignored
Utility.console_log("Ignored: " + one_case["name"], color="orange")
# only passed in last stage will be regarded as real pass
if last_stage():
one_case_finish(not int(data[0]))
else:
Utility.console_log("test finished before enter last stage", color="orange")
one_case_finish(False)
def handle_next_stage(data):
""" reboot finished. we goto next stage """
if last_stage():
# already last stage, should never goto next stage
Utility.console_log("didn't finish at last stage", color="orange")
one_case_finish(False)
else:
stage_finish.append("continue")
while not stage_finish:
try:
dut.expect_any((RESET_PATTERN, handle_exception_reset),
(EXCEPTION_PATTERN, handle_exception_reset),
(ABORT_PATTERN, handle_exception_reset),
(FINISH_PATTERN, handle_test_finish),
(UT_APP_BOOT_UP_DONE, handle_next_stage),
timeout=UT_TIMEOUT)
except ExpectTimeout:
Utility.console_log("Timeout in expect", color="orange")
one_case_finish(False)
break
if stage_finish[0] == "break":
# test breaks on current stage
break
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
if __name__ == '__main__':
multiple_devices_case(extra_data={"name": "gpio master/slave test example",
"child case num": 2,
"config": "release",
"env_tag": "UT_T2_1"})
run_multiple_devices_cases(extra_data={"name": "gpio master/slave test example",
"child case num": 2,
"config": "release",
"env_tag": "UT_T2_1"})