twister: add support for levels and test configuration

Add support test levels and the ability to assign a specific test to one
or more levels. Using command line options of twister it is then possible
to select a level and just execute the tests included in this level.

Additionally, a test configuration allows definiing level
dependencies and additional inclusion of tests into a specific level if
the test itself does not have this information already.

In the configuration file you can include complete components using
regular expressions and you can specify which test level to import from
the same file, making management of levels easier.

To help with testing outside of upstream CI infrastructure, additional
options are available in the configuration file, which can be hosted
locally. As of now, those options are available:

- Ability to ignore default platforms as defined in board definitions
  (Those are mostly emulation platforms used to run tests in upstream
  CI)
- Option to specify your own list of default platforms overriding what
  upstream defines.
- Ability to override build_onl_all options used in some testscases.
  This will treat tests or sample as any other just build for default
  platforms you specify in the configuation file or on the command line.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2022-11-21 15:17:21 -05:00 committed by Carles Cufí
parent 4c97dd546a
commit 14d88f8425
9 changed files with 224 additions and 48 deletions

View file

@ -65,6 +65,7 @@ class TwisterConfigParser:
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"levels": {"type": "list", "default": []},
"harness": {"type": "str", "default": "test"},
"harness_config": {"type": "map", "default": {}},
"seed": {"type": "int", "default": 0},

View file

@ -268,6 +268,13 @@ structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
"Default to html. "
"Valid options are html, xml, csv, txt, coveralls, sonarqube.")
parser.add_argument("--test-config", action="store", default=os.path.join(ZEPHYR_BASE, "tests", "test_config.yaml"),
help="Path to file with plans and test configurations.")
parser.add_argument("--level", action="store",
help="Test level to be used. By default, no levels are used for filtering"
"and do the selection based on existing filters.")
parser.add_argument(
"-D", "--all-deltas", action="store_true",
help="Show all footprint deltas, positive or negative. Implies "
@ -744,6 +751,8 @@ class TwisterEnv:
self.hwm = None
self.test_config = options.test_config
def discover(self):
self.check_zephyr_version()
self.get_toolchain()

View file

@ -59,6 +59,11 @@ class Filters:
SKIP = 'Skip filter'
class TestLevel:
name = None
levels = []
scenarios = []
class TestPlan:
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
@ -70,6 +75,8 @@ class TestPlan:
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
tc_schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "test-config-schema.yaml")
SAMPLE_FILENAME = 'sample.yaml'
TESTSUITE_FILENAME = 'testcase.yaml'
@ -90,12 +97,54 @@ class TestPlan:
self.instances = dict()
self.warnings = 0
self.scenarios = []
self.hwm = env.hwm
# used during creating shorter build paths
self.link_dir_counter = 0
self.modules = []
self.run_individual_testsuite = []
self.levels = []
self.test_config = {}
def get_level(self, name):
level = next((l for l in self.levels if l.name == name), None)
return level
def parse_configuration(self, config_file):
if os.path.exists(config_file):
tc_schema = scl.yaml_load(self.tc_schema_path)
self.test_config = scl.yaml_load_verify(config_file, tc_schema)
else:
raise TwisterRuntimeError(f"File {config_file} not found.")
levels = self.test_config.get('levels', [])
# Do first pass on levels to get initial data.
for level in levels:
adds = []
for s in level.get('adds', []):
r = re.compile(s)
adds.extend(list(filter(r.fullmatch, self.scenarios)))
tl = TestLevel()
tl.name = level['name']
tl.scenarios = adds
tl.levels = level.get('inherits', [])
self.levels.append(tl)
# Go over levels again to resolve inheritance.
for level in levels:
inherit = level.get('inherits', [])
_level = self.get_level(level['name'])
if inherit:
for inherted_level in inherit:
_inherited = self.get_level(inherted_level)
_inherited_scenarios = _inherited.scenarios
level_scenarios = _level.scenarios
level_scenarios.extend(_inherited_scenarios)
def find_subtests(self):
sub_tests = self.options.sub_test
@ -122,6 +171,11 @@ class TestPlan:
raise TwisterRuntimeError("No test cases found at the specified location...")
self.find_subtests()
# get list of scenarios we have parsed into one list
for _, ts in self.testsuites.items():
self.scenarios.append(ts.id)
self.parse_configuration(config_file=self.env.test_config)
self.add_configurations()
if self.load_errors:
@ -251,10 +305,7 @@ class TestPlan:
return 1
def report_duplicates(self):
all_identifiers = []
for _, ts in self.testsuites.items():
all_identifiers.append(ts.id)
dupes = [item for item, count in collections.Counter(all_identifiers).items() if count > 1]
dupes = [item for item, count in collections.Counter(self.scenarios).items() if count > 1]
if dupes:
print("Tests with duplicate identifiers:")
for dupe in dupes:
@ -358,6 +409,7 @@ class TestPlan:
logger.debug("Reading platform configuration files under %s..." %
board_root)
platform_config = self.test_config.get('platforms', {})
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
@ -365,37 +417,47 @@ class TestPlan:
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if not platform.twister:
continue
self.platforms.append(platform)
if not platform_config.get('override_default_platforms', False):
if platform.default:
logger.debug(f"adding {platform.name} to default platforms")
self.default_platforms.append(platform.name)
else:
if platform.name in platform_config.get('default_platforms', []):
logger.debug(f"adding {platform.name} to default platforms")
self.default_platforms.append(platform.name)
# support board@revision
# if there is already an existed <board>_<revision>.yaml, then use it to
# load platform directly, otherwise, iterate the directory to
# get all valid board revision based on each <board>_<revision>.conf.
if not "@" in platform.name:
tmp_dir = os.listdir(os.path.dirname(file))
for item in tmp_dir:
# Need to make sure the revision matches
# the permitted patterns as described in
# cmake/modules/extensions.cmake.
revision_patterns = ["[A-Z]",
"[0-9]+",
"(0|[1-9][0-9]*)(_[0-9]+)*(_[0-9]+)*"]
for pattern in revision_patterns:
result = re.match(f"{platform.name}_(?P<revision>{pattern})\\.conf", item)
if result:
revision = result.group("revision")
yaml_file = f"{platform.name}_{revision}.yaml"
if yaml_file not in tmp_dir:
platform_revision = copy.deepcopy(platform)
revision = revision.replace("_", ".")
platform_revision.name = f"{platform.name}@{revision}"
platform_revision.default = False
self.platforms.append(platform_revision)
# support board@revision
# if there is already an existed <board>_<revision>.yaml, then use it to
# load platform directly, otherwise, iterate the directory to
# get all valid board revision based on each <board>_<revision>.conf.
if not "@" in platform.name:
tmp_dir = os.listdir(os.path.dirname(file))
for item in tmp_dir:
# Need to make sure the revision matches
# the permitted patterns as described in
# cmake/modules/extensions.cmake.
revision_patterns = ["[A-Z]",
"[0-9]+",
"(0|[1-9][0-9]*)(_[0-9]+)*(_[0-9]+)*"]
break
for pattern in revision_patterns:
result = re.match(f"{platform.name}_(?P<revision>{pattern})\\.conf", item)
if result:
revision = result.group("revision")
yaml_file = f"{platform.name}_{revision}.yaml"
if yaml_file not in tmp_dir:
platform_revision = copy.deepcopy(platform)
revision = revision.replace("_", ".")
platform_revision.name = f"{platform.name}@{revision}"
platform_revision.default = False
self.platforms.append(platform_revision)
break
except RuntimeError as e:
@ -434,7 +496,6 @@ class TestPlan:
try:
parsed_data = TwisterConfigParser(suite_yaml_path, self.suite_schema)
parsed_data.load()
subcases, ztest_suite_names = scan_testsuite_path(suite_path)
for name in parsed_data.scenarios.keys():
@ -552,7 +613,6 @@ class TestPlan:
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
@ -572,7 +632,7 @@ class TestPlan:
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
_platforms = list(filter(lambda p: p.default, self.platforms))
_platforms = list(filter(lambda p: p.name in self.default_platforms, self.platforms))
platforms = []
# default platforms that can't be run are dropped from the list of
# the default platforms list. Default platforms should always be
@ -586,13 +646,13 @@ class TestPlan:
else:
platforms = self.platforms
platform_config = self.test_config.get('platforms', {})
logger.info("Building initial testsuite list...")
keyed_tests = {}
for ts_name, ts in self.testsuites.items():
if ts.build_on_all and not platform_filter:
if ts.build_on_all and not platform_filter and platform_config.get('increased_platform_scope', True):
platform_scope = self.platforms
elif ts.integration_platforms and self.options.integration:
self.verify_platforms_existence(
@ -606,15 +666,17 @@ class TestPlan:
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if ts.platform_allow and not platform_filter and not integration:
if ts.platform_allow and not platform_filter and not integration and platform_config.get('increased_platform_scope', True):
self.verify_platforms_existence(
ts.platform_allow, f"{ts_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in ts.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in ts.platform_allow, \
_platform_scope = list(filter(lambda item: item.name in ts.platform_allow, \
self.platforms))
if len(_platform_scope) > 0:
platform_scope = _platform_scope[:1]
# list of instances per testsuite, aka configurations.
@ -648,6 +710,12 @@ class TestPlan:
if not set(ts.modules).issubset(set(self.modules)):
instance.add_filter(f"one or more required modules not available: {','.join(ts.modules)}", Filters.TESTSUITE)
if self.options.level:
tl = self.get_level(self.options.level)
planned_scenarios = tl.scenarios
if ts.id not in planned_scenarios and not set(ts.levels).intersection(set(tl.levels)):
instance.add_filter("Not part of requested test plan", Filters.TESTSUITE)
if runnable and not instance.run:
instance.add_filter("Not runnable on device", Filters.PLATFORM)
@ -779,7 +847,7 @@ class TestPlan:
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda ts: ts.platform.default, instance_list))
instances = list(filter(lambda ts: ts.platform.name in self.default_platforms, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in ts.integration_platforms, instance_list))

View file

@ -0,0 +1,44 @@
#
# Schema to validate a YAML file describing a Zephyr test configuration.
#
type: map
mapping:
"platforms":
type: map
required: false
mapping:
"override_default_platforms":
type: bool
required: false
"increased_platform_scope":
type: bool
required: false
"default_platforms":
type: seq
required: false
sequence:
- type: str
"levels":
type: seq
required: false
sequence:
- type: map
required: false
mapping:
"name":
type: str
required: true
"description":
type: str
required: false
"adds":
type: seq
required: false
sequence:
- type: str
"inherits":
type: seq
required: false
sequence:
- type: str

View file

@ -64,6 +64,12 @@ mapping:
"ignore_qemu_crash":
type: bool
required: false
"levels":
type: seq
required: false
sequence:
- type: str
enum: ["smoke", "unit", "integration", "acceptance", "system", "regression"]
"testcases":
type: seq
required: false
@ -237,6 +243,12 @@ mapping:
"filter":
type: str
required: false
"levels":
type: seq
required: false
sequence:
- type: str
enum: ["smoke", "unit", "integration", "acceptance", "system", "regression"]
"integration_platforms":
type: seq
required: false

View file

@ -38,8 +38,9 @@ def tesenv_obj(test_data, testsuites_dir, tmpdir_factory):
parser = add_parse_arguments()
options = parse_arguments(parser, [])
env = TwisterEnv(options)
env.board_roots = [test_data +"board_config/1_level/2_level/"]
env.test_roots = [testsuites_dir + '/tests', testsuites_dir + '/samples']
env.board_roots = [os.path.join(test_data, "board_config", "1_level", "2_level")]
env.test_roots = [os.path.join(testsuites_dir, 'tests', testsuites_dir, 'samples')]
env.test_config = os.path.join(test_data, "test_config.yaml")
env.outdir = tmpdir_factory.mktemp("sanity_out_demo")
return env
@ -52,6 +53,7 @@ def testplan_obj(test_data, class_env, testsuites_dir, tmpdir_factory):
env.test_roots = [testsuites_dir + '/tests', testsuites_dir + '/samples']
env.outdir = tmpdir_factory.mktemp("sanity_out_demo")
plan = TestPlan(env)
plan.parse_configuration(config_file=env.test_config)
return plan
@pytest.fixture(name='all_testsuites_dict')
@ -67,8 +69,9 @@ def testsuites_dict(class_testplan):
def all_platforms_list(test_data, class_testplan):
""" Pytest fixture to call add_configurations function of
Testsuite class and return the Platforms list"""
class_testplan.env.board_roots = [os.path.abspath(test_data + "board_config")]
class_testplan.env.board_roots = [os.path.abspath(os.path.join(test_data, "board_config"))]
plan = TestPlan(class_testplan.env)
plan.parse_configuration(config_file=class_testplan.env.test_config)
plan.add_configurations()
return plan.platforms

View file

@ -0,0 +1,19 @@
platforms:
override_default_platforms: false
increased_platform_scope: true
levels:
- name: smoke
description: >
A plan to be used verifying basic zephyr features on hardware.
adds:
- kernel.threads.*
- kernel.timer.behavior
- arch.interrupt
- boards.*
- name: acceptance
description: >
More coverage
adds:
- kernel.*
- arch.interrupt
- boards.*

View file

@ -54,6 +54,7 @@ def test_add_configurations(test_data, class_env, board_root_dir):
"""
class_env.board_roots = [os.path.abspath(test_data + board_root_dir)]
plan = TestPlan(class_env)
plan.parse_configuration(config_file=class_env.test_config)
if board_root_dir == "board_config":
plan.add_configurations()
assert sorted(plan.default_platforms) == sorted(['demo_board_1', 'demo_board_3'])
@ -62,9 +63,9 @@ def test_add_configurations(test_data, class_env, board_root_dir):
assert sorted(plan.default_platforms) != sorted(['demo_board_1'])
def test_get_all_testsuites(class_env, all_testsuites_dict):
def test_get_all_testsuites(class_testplan, all_testsuites_dict):
""" Testing get_all_testsuites function of TestPlan class in Twister """
plan = TestPlan(class_env)
plan = class_testplan
plan.testsuites = all_testsuites_dict
expected_tests = ['sample_test.app', 'test_a.check_1.1a',
'test_a.check_1.1c',
@ -79,9 +80,9 @@ def test_get_all_testsuites(class_env, all_testsuites_dict):
'test_d.check_1.unit_1b', 'test_config.main']
assert sorted(plan.get_all_tests()) == sorted(expected_tests)
def test_get_platforms(class_env, platforms_list):
def test_get_platforms(class_testplan, platforms_list):
""" Testing get_platforms function of TestPlan class in Twister """
plan = TestPlan(class_env)
plan = class_testplan
plan.platforms = platforms_list
platform = plan.get_platform("demo_board_1")
assert isinstance(platform, Platform)
@ -106,13 +107,13 @@ TESTDATA_PART1 = [
@pytest.mark.parametrize("tc_attribute, tc_value, plat_attribute, plat_value, expected_discards",
TESTDATA_PART1)
def test_apply_filters_part1(class_env, all_testsuites_dict, platforms_list,
def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list,
tc_attribute, tc_value, plat_attribute, plat_value, expected_discards):
""" Testing apply_filters function of TestPlan class in Twister
Part 1: Response of apply_filters function have
appropriate values according to the filters
"""
plan = TestPlan(class_env)
plan = class_testplan
if tc_attribute is None and plat_attribute is None:
plan.apply_filters()

19
tests/test_config.yaml Normal file
View file

@ -0,0 +1,19 @@
platforms:
override_default_platforms: false
increased_platform_scope: true
levels:
- name: smoke
description: >
A plan to be used verifying basic zephyr features on hardware.
adds:
- kernel.threads.*
- kernel.timer.behavior
- arch.interrupt
- boards.*
- name: acceptance
description: >
More coverage
adds:
- kernel.*
- arch.interrupt
- boards.*