diff --git a/components/idf_test/component.mk b/components/idf_test/component.mk new file mode 100755 index 000000000..c2c4c03a1 --- /dev/null +++ b/components/idf_test/component.mk @@ -0,0 +1,5 @@ +# +# Component Makefile +# +# (Uses default behaviour of compiling all source files in directory, adding 'include' to include path.) + diff --git a/components/idf_test/include/idf_performance.h b/components/idf_test/include/idf_performance.h new file mode 100644 index 000000000..e4339a129 --- /dev/null +++ b/components/idf_test/include/idf_performance.h @@ -0,0 +1,15 @@ + +/* @brief macro to print IDF performance + * @param mode : performance item name. a string pointer. + * @param value_fmt: print format and unit of the value, for example: "%02fms", "%dKB" + * @param value : the performance value. +*/ +#define IDF_LOG_PERFORMANCE(item, value_fmt, value) \ + printf("[Performance][%s]: "value_fmt"\n", item, value) + + +/* declare the performance here */ +#define IDF_PERFORMANCE_MAX_HTTPS_REQUEST_BIN_SIZE 610 +#define IDF_PERFORMANCE_MAX_FREERTOS_SPINLOCK_CYCLES_PER_OP 200 +#define IDF_PERFORMANCE_MAX_FREERTOS_SPINLOCK_CYCLES_PER_OP_UNICORE 130 +#define IDF_PERFORMANCE_MAX_ESP_TIMER_GET_TIME_PER_CALL 1000 diff --git a/tools/tiny-test-fw/DUT.py b/tools/tiny-test-fw/DUT.py index abb0ce800..1c6526709 100644 --- a/tools/tiny-test-fw/DUT.py +++ b/tools/tiny-test-fw/DUT.py @@ -47,6 +47,8 @@ import functools import serial from serial.tools import list_ports +import Utility + if sys.version_info[0] == 2: import Queue as _queue else: @@ -72,6 +74,17 @@ def _expect_lock(func): return handler +def _decode_data(data): + """ for python3, if the data is bytes, then decode it to string """ + if isinstance(data, bytes): + # convert bytes to string + try: + data = data.decode("utf-8", "ignore") + except UnicodeDecodeError: + data = data.decode("iso8859-1", ) + return data + + class _DataCache(_queue.Queue): """ Data cache based on Queue. Allow users to process data cache based on bytes instead of Queue." @@ -94,13 +107,7 @@ class _DataCache(_queue.Queue): try: data = self.get(timeout=timeout) - if isinstance(data, bytes): - # convert bytes to string - try: - data = data.decode("utf-8", "ignore") - except UnicodeDecodeError: - data = data.decode("iso8859-1",) - self.data_cache += data + self.data_cache += _decode_data(data) except _queue.Empty: # don't do anything when on update for cache pass @@ -122,18 +129,48 @@ class _DataCache(_queue.Queue): class _RecvThread(threading.Thread): + PERFORMANCE_PATTERN = re.compile(r"\[Performance]\[(\w+)]: ([^\r\n]+)\r?\n") + def __init__(self, read, data_cache): super(_RecvThread, self).__init__() self.exit_event = threading.Event() self.setDaemon(True) self.read = read self.data_cache = data_cache + # cache the last line of recv data for collecting performance + self._line_cache = str() + + def collect_performance(self, data): + """ collect performance """ + if data: + decoded_data = _decode_data(data) + + matches = self.PERFORMANCE_PATTERN.findall(self._line_cache + decoded_data) + for match in matches: + Utility.console_log("[Performance][{}]: {}".format(match[0], match[1]), + color="orange") + + # cache incomplete line to later process + lines = decoded_data.splitlines(True) + last_line = lines[-1] + + if last_line[-1] != "\n": + if len(lines) == 1: + # only one line and the line is not finished, then append this to cache + self._line_cache += lines[-1] + else: + # more than one line and not finished, replace line cache + self._line_cache = lines[-1] + else: + # line finishes, flush cache + self._line_cache = str() def run(self): while not self.exit_event.isSet(): data = self.read(1000) if data: self.data_cache.put(data) + self.collect_performance(data) def exit(self): self.exit_event.set() @@ -522,11 +559,7 @@ class SerialDUT(BaseDUT): timestamp = time.time() timestamp = "{}:{}".format(time.strftime("%m-%d %H:%M:%S", time.localtime(timestamp)), str(timestamp % 1)[2:5]) - try: - formatted_data = "[{}]:\r\n{}\r\n".format(timestamp, data.decode("utf-8", "ignore")) - except UnicodeDecodeError: - # if utf-8 fail, use iso-8859-1 (single char codec with range 0-255) - formatted_data = "[{}]:\r\n{}\r\n".format(timestamp, data.decode("iso8859-1",)) + formatted_data = "[{}]:\r\n{}\r\n".format(timestamp, _decode_data(data)) return formatted_data def _port_open(self): diff --git a/tools/tiny-test-fw/IDF/__init__.py b/tools/tiny-test-fw/IDF/__init__.py index 8975a5299..5e1a4d6fc 100644 --- a/tools/tiny-test-fw/IDF/__init__.py +++ b/tools/tiny-test-fw/IDF/__init__.py @@ -11,9 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import os +import re import TinyFW -from IDF.IDFApp import Example, UT +import Utility +from IDF.IDFApp import IDFApp, Example, UT from IDF.IDFDUT import IDFDUT @@ -34,3 +37,43 @@ def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", # not use partial function as define as function support auto generating document return TinyFW.test_method(app=app, dut=dut, chip=chip, module=module, execution_time=execution_time, **kwargs) + + +def log_performance(item, value): + """ + do print performance with pre-defined format to console + + :param item: performance item name + :param value: performance value + """ + Utility.console_log("[Performance][{}]: {}".format(item, value), "orange") + + +def check_performance(item, value): + """ + check if idf performance meet pass standard + + :param item: performance item name + :param value: performance item value + :raise: AssertionError: if check fails + """ + ret = True + standard_value = 0 + + idf_path = IDFApp.get_sdk_path() + performance_file = os.path.join(idf_path, "components", "idf_test", "include", "idf_performance.h") + + if os.path.exists(performance_file): + with open(performance_file, "r") as f: + data = f.read() + match = re.search(r"#define\s+IDF_PERFORMANCE_(MIN|MAX)_{}\s+([\d.]+)".format(item.upper()), data) + if match: + op = match.group(1) + standard_value = float(match.group(2)) + if op == "MAX": + ret = value <= standard_value + else: + ret = value >= standard_value + if not ret: + raise AssertionError("[Performance] {} value is {}, doesn't meet pass standard {}" + .format(item, value, standard_value)) diff --git a/tools/tiny-test-fw/TinyFW.py b/tools/tiny-test-fw/TinyFW.py index 5eace4e4a..09b950c58 100644 --- a/tools/tiny-test-fw/TinyFW.py +++ b/tools/tiny-test-fw/TinyFW.py @@ -25,6 +25,7 @@ import xunitgen import Env import DUT import App +import Utility XUNIT_FILE_NAME = "XUNIT_RESULT.xml" @@ -32,40 +33,6 @@ XUNIT_RECEIVER = xunitgen.EventReceiver() XUNIT_DEFAULT_TEST_SUITE = "test-suite" -_COLOR_CODES = { - "white": '\033[0m', - "red": '\033[31m', - "green": '\033[32m', - "orange": '\033[33m', - "blue": '\033[34m', - "purple": '\033[35m', - "W": '\033[0m', - "R": '\033[31m', - "G": '\033[32m', - "O": '\033[33m', - "B": '\033[34m', - "P": '\033[35m' -} - - -def console_log(data, color="white"): - """ - log data to console. - (if not flush console log, Gitlab-CI won't update logs during job execution) - - :param data: data content - :param color: color - """ - if color not in _COLOR_CODES: - color = "white" - color_codes = _COLOR_CODES[color] - print(color_codes + data) - if color not in ["white", "W"]: - # reset color to white for later logs - print(_COLOR_CODES["white"] + "\r") - sys.stdout.flush() - - class DefaultEnvConfig(object): """ default test configs. There're 3 places to set configs, priority is (high -> low): @@ -187,7 +154,7 @@ def test_method(**kwargs): XUNIT_FILE_NAME) XUNIT_RECEIVER.begin_case(test_func.__name__, time.time(), test_func_file_name) try: - console_log("starting running test: " + test_func.__name__, color="green") + Utility.console_log("starting running test: " + test_func.__name__, color="green") # execute test function test_func(env_inst, extra_data) # if finish without exception, test result is True @@ -208,9 +175,9 @@ def test_method(**kwargs): XUNIT_DEFAULT_TEST_SUITE)) if result: - console_log("Test Succeed: " + test_func.__name__, color="green") + Utility.console_log("Test Succeed: " + test_func.__name__, color="green") else: - console_log(("Test Fail: " + test_func.__name__), color="red") + Utility.console_log(("Test Fail: " + test_func.__name__), color="red") TestResult.set_result(result, test_func.__name__) return result diff --git a/tools/tiny-test-fw/Utility/__init__.py b/tools/tiny-test-fw/Utility/__init__.py index e69de29bb..5480694c5 100644 --- a/tools/tiny-test-fw/Utility/__init__.py +++ b/tools/tiny-test-fw/Utility/__init__.py @@ -0,0 +1,35 @@ +import sys + + +_COLOR_CODES = { + "white": '\033[0m', + "red": '\033[31m', + "green": '\033[32m', + "orange": '\033[33m', + "blue": '\033[34m', + "purple": '\033[35m', + "W": '\033[0m', + "R": '\033[31m', + "G": '\033[32m', + "O": '\033[33m', + "B": '\033[34m', + "P": '\033[35m' +} + + +def console_log(data, color="white"): + """ + log data to console. + (if not flush console log, Gitlab-CI won't update logs during job execution) + + :param data: data content + :param color: color + """ + if color not in _COLOR_CODES: + color = "white" + color_codes = _COLOR_CODES[color] + print(color_codes + data) + if color not in ["white", "W"]: + # reset color to white for later logs + print(_COLOR_CODES["white"] + "\r") + sys.stdout.flush() \ No newline at end of file diff --git a/tools/unit-test-app/components/unity/include/unity.h b/tools/unit-test-app/components/unity/include/unity.h index 3ffc14c0b..596c806c7 100644 --- a/tools/unit-test-app/components/unity/include/unity.h +++ b/tools/unit-test-app/components/unity/include/unity.h @@ -16,6 +16,9 @@ extern "C" #define UNITY_INCLUDE_CONFIG_H #include "unity_internals.h" +/* include performance pass standards header file */ +#include "idf_performance.h" + void setUp(void); void tearDown(void); @@ -285,6 +288,20 @@ void tearDown(void); #define TEST_ASSERT_DOUBLE_IS_NOT_NAN_MESSAGE(actual, message) UNITY_TEST_ASSERT_DOUBLE_IS_NOT_NAN((actual), __LINE__, (message)) #define TEST_ASSERT_DOUBLE_IS_NOT_DETERMINATE_MESSAGE(actual, message) UNITY_TEST_ASSERT_DOUBLE_IS_NOT_DETERMINATE((actual), __LINE__, (message)) +/* For performance check with unity test on IDF */ +/* These macros should only be used with ESP-IDF. + * To use performance check, we need to first define pass standard in idf_performance.h. + */ +#define TEST_PERFORMANCE_LESS_THAN(name, value_fmt, value) do { \ + printf("[Performance]["#name"]: "value_fmt"\n", value); \ + TEST_ASSERT(value < IDF_PERFORMANCE_MAX_##name); \ +} while(0) + +#define TEST_PERFORMANCE_GREATER_THAN(name, value_fmt, value) do { \ + printf("[Performance]["#name"]: "value_fmt"\n", value); \ + TEST_ASSERT(value > IDF_PERFORMANCE_MIN_##name); \ +} while(0) + /* end of UNITY_FRAMEWORK_H */ #ifdef __cplusplus }