sanitycheck: support testcases in yaml instead of ini

This commit changes the syntax of the testcase files and changes the
behaviour and configuration of the sanitycheck script.

To avoid having multiple files with different syntax for boards,
samples, tests; this change unifies the syntax and uses YAML instead of
INI.

We maintain the current keywords used in the old syntax and maintain the
flexibility of adding tests with different configuration by using YAML
list configuration. On top of that, the following features are added:

- We now scan for board configurations in the boards directory and look
for a YAML file describing a board and how it should be tested. This
eliminates the need for listing boards per architecture in a special ini
file under scripts/.

- We define hardware information charachterstics in the board YAML file
that helps identifying if a certain test should run on that board or
not. For example, we can specify the available RAM in the board and
filter tests that would require more RAM than the board can handle.

- Boards can be set as default for testing meaning that we always run a
test case (build and run of possible) when sanitycheck is called without
any arguments. Previously this was done only by selecting the first
board defined for a specific architecture.

- Tests can be configured to run on all possible boards, this is to make
sure we always build some basic tests for all boards to catch issues
with the core kernel features.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2017-04-04 18:47:49 -04:00 committed by Anas Nashif
parent 295897c191
commit a792a3d410
9 changed files with 207 additions and 229 deletions

37
scripts/ini2yaml.py Executable file
View file

@ -0,0 +1,37 @@
#!/usr/bin/env python
import ConfigParser, os
import yaml
import sys
sample = False
in_file = sys.argv[1]
if sys.argv[2] == 'sample':
sample = True
out_file = os.path.join(os.path.dirname(in_file), sys.argv[2] + ".yaml")
config = ConfigParser.ConfigParser()
config.readfp(open(sys.argv[1]))
y = {'tests': 'tests'}
tests = []
for section in config.sections():
tc = {}
for opt in config.options(section):
value = config.get(section, opt)
if value in ['false', 'true']:
tc[opt] = True if value == 'true' else False
else:
tc[opt] = value
test = { section : tc}
tests.append(test)
y['tests'] = tests
if sample:
y['sample'] = { 'name': "TBD", 'description': "TBD" }
with open(out_file, "w") as f:
yaml.dump(y, f, width=50, indent=4, default_flow_style=False)

View file

@ -1,4 +0,0 @@
[arch]
name = arc
platforms = arduino_101_sss quark_se_c1000_ss_devboard em_starterkit panther_ss
supported_toolchains = issm zephyr

View file

@ -1,17 +0,0 @@
[arch]
name = arm
platforms = qemu_cortex_m3 frdm_k64f arduino_due nucleo_f103rb stm32_mini_a15
olimexino_stm32 96b_nitrogen nrf52_pca10040 hexiwear_k64
nrf51_pca10028 nucleo_f401re 96b_carbon nrf51_blenano
arduino_101_ble cc3200_launchxl quark_se_c1000_ble bbc_microbit
v2m_beetle nucleo_l476rg nrf52840_pca10056 nucleo_f411re
stm3210c_eval nucleo_f334r8 stm32373c_eval mps2_an385 frdm_kw41z
sam_e70_xplained curie_ble nrf52_blenano2 hexiwear_kw40z
cc3220sf_launchxl frdm_kl25z disco_l475_iot1 nucleo_l432kc
nucleo_f413zh stm32l496g_disco stm32f4_disco 96b_carbon_nrf51
stm32f469i_disco nucleo_f412zg sam4s_xplained olimex_stm32_e407
supported_toolchains = zephyr gccarmemb
[qemu_cortex_m3]
qemu_support = true

View file

@ -1,7 +0,0 @@
[arch]
name = nios2
platforms = qemu_nios2 altera_max10
supported_toolchains = zephyr
[qemu_nios2]
qemu_support = true

View file

@ -1,8 +0,0 @@
[arch]
name = riscv32
platforms = qemu_riscv32 zedboard_pulpino
supported_toolchains = zephyr
[qemu_riscv32]
qemu_support = true

View file

@ -1,7 +0,0 @@
[arch]
name = unit
platforms = unit_testing
supported_toolchains = zephyr
[unit_testing]
qemu_support = false

View file

@ -1,20 +0,0 @@
[arch]
name = x86
platforms = qemu_x86_iamcu arduino_101 qemu_x86 minnowboard galileo quark_d2000_crb quark_se_c1000_devboard
tinytile panther arduino_101_mcuboot
supported_toolchains = zephyr
[qemu_x86]
qemu_support = true
[qemu_x86_iamcu]
qemu_support = true
[quark_d2000_crb]
supported_toolchains = issm zephyr
[arduino_101]
supported_toolchains = issm zephyr
[quark_se_c1000_devboard]
supported_toolchains = issm zephyr

View file

@ -1,33 +0,0 @@
[arch]
name = xtensa
platforms = qemu_xtensa xt-sim xt-sim_XRC_FUSION_AON_ALL_LM
xt-sim_hifi2_std xt-sim_hifi3_bd5 xt-sim_D_233L
xt-sim_D_212GP xt-sim_D_108mini
supported_toolchains = xcc
[qemu_xtensa]
qemu_support = true
supported_toolchains = zephyr
[xt-sim]
qemu_support = true
[xt-sim_XRC_FUSION_AON_ALL_LM]
qemu_support = true
[xt-sim_hifi2_std]
qemu_support = true
[xt-sim_hifi3_bd5]
qemu_support = true
[xt-sim_D_233L]
qemu_support = true
[xt-sim_D_212GP]
qemu_support = true
[xt-sim_D_108mini]
qemu_support = true

View file

@ -1,4 +1,5 @@
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
"""Zephyr Sanity Tests
This script scans for the set of unit test applications in the git
@ -7,59 +8,74 @@ build each test case on one platform per architecture, using a precedence
list defined in an architecture configuration file, and if possible
run the tests in the QEMU emulator.
Test cases are detected by the presence of a 'testcase.ini' file in
the application's project directory. This file may contain one or
more blocks, each identifying a test scenario. The title of the block
is a name for the test case, which only needs to be unique for the
test cases specified in that testcase.ini file. The full canonical
name for each test case is <path to test case>/<ini block>.
Test cases are detected by the presence of a 'testcase.yaml' or a sample.yaml
files in the application's project directory. This file may contain one or more
blocks, each identifying a test scenario. The title of the block is a name for
the test case, which only needs to be unique for the test cases specified in
that testcase meta-data. The full canonical name for each test case is <path to
test case>/<block>.
Each testcase.ini block can define the following key/value pairs:
Each test block in the testcase meta data can define the following key/value pairs:
tags = <list of tags> (required)
tags: <list of tags> (required)
A set of string tags for the testcase. Usually pertains to
functional domains but can be anything. Command line invocations
of this script can filter the set of tests to run based on tag.
skip = <True|False> (default False)
skip: <True|False> (default False)
skip testcase unconditionally. This can be used for broken tests.
slow = <True|False> (default False)
slow: <True|False> (default False)
Don't run this test case unless --enable-slow was passed in on the
command line. Intended for time-consuming test cases that are only
run under certain circumstances, like daily builds. These test cases
are still compiled.
extra_args = <list of extra arguments>
extra_args: <list of extra arguments>
Extra arguments to pass to Make when building or running the
test case.
build_only = <True|False> (default False)
build_only: <True|False> (default False)
If true, don't try to run the test under QEMU even if the
selected platform supports it.
timeout = <number of seconds>
build_on_all: <True|False> (default False)
If true, attempt to build test on all available platforms.
depends_on: <list of features>
A board or platform can announce what features it supports, this option
will enable the test only those platforms that provide this feature.
min_ram: <integer>
minimum amount of RAM needed for this test to build and run. This is
compared with information provided by the board metadata.
min_flash: <integer>
minimum amount of ROM needed for this test to build and run. This is
compared with information provided by the board metadata.
timeout: <number of seconds>
Length of time to run test in QEMU before automatically killing it.
Default to 60 seconds.
arch_whitelist = <list of arches, such as x86, arm, arc>
arch_whitelist: <list of arches, such as x86, arm, arc>
Set of architectures that this test case should only be run for.
arch_exclude = <list of arches, such as x86, arm, arc>
arch_exclude: <list of arches, such as x86, arm, arc>
Set of architectures that this test case should not run on.
platform_whitelist = <list of platforms>
platform_whitelist: <list of platforms>
Set of platforms that this test case should only be run for.
platform_exclude = <list of platforms>
platform_exclude: <list of platforms>
Set of platforms that this test case should not run on.
extra_sections = <list of extra binary sections>
extra_sections: <list of extra binary sections>
When computing sizes, sanitycheck will report errors if it finds
extra, unexpected sections in the Zephyr binary unless they are named
here. They will not be included in the size calculation.
filter = <expression>
filter: <expression>
Filter whether the testcase should be run by evaluating an expression
against an environment containing the following values:
@ -121,28 +137,10 @@ Each testcase.ini block can define the following key/value pairs:
Would match it.
Architectures and platforms are defined in an architecture configuration
file which are stored by default in scripts/sanity_chk/arches/. These
each define an [arch] block with the following key/value pairs:
name = <arch name>
The name of the arch. Example: x86
platforms = <list of supported platforms in order of precedence>
List of supported platforms for this arch. The ordering here
is used to select a default platform to build for that arch.
For every platform defined, there must be a corresponding block for it
in the arch configuration file. This block can be empty if there are
no special definitions for that arch. Options are:
qemu_support = <True|False> (default False)
Indicates whether binaries for this platform can run under QEMU
The set of test cases that actually run depends on directives in the
testcase and architecture .ini file and options passed in on the command
line. If there is any confusion, running with -v or --discard-report
can help show why particular test cases were skipped.
The set of test cases that actually run depends on directives in the testcase
filed and options passed in on the command line. If there is any confusion,
running with -v or --discard-report can help show why particular test cases
were skipped.
Metrics (such as pass/fail state and binary size) for the last code
release are stored in scripts/sanity_chk/sanity_last_release.csv.
@ -176,6 +174,7 @@ import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
from collections import OrderedDict
from itertools import islice
import yaml
if "ZEPHYR_BASE" not in os.environ:
sys.stderr.write("$ZEPHYR_BASE environment variable undefined.\n")
@ -803,7 +802,7 @@ class MakeGenerator:
by execute() will be keyed by its .name field.
"""
args = ti.test.extra_args[:]
args.extend(["ARCH=%s" % ti.platform.arch.name,
args.extend(["ARCH=%s" % ti.platform.arch,
"BOARD=%s" % ti.platform.name])
args.extend(extra_args)
if (ti.platform.qemu_support and (not ti.build_only) and
@ -916,9 +915,13 @@ testcase_valid_keys = {"tags" : {"type" : "set", "required" : True},
"type" : {"type" : "str", "default": "integration"},
"extra_args" : {"type" : "list"},
"build_only" : {"type" : "bool", "default" : False},
"build_on_all" : {"type" : "bool", "default" : False},
"skip" : {"type" : "bool", "default" : False},
"slow" : {"type" : "bool", "default" : False},
"timeout" : {"type" : "int", "default" : 60},
"min_ram" : {"type" : "int", "default" : 8},
"depends_on": {"type" : "set"},
"min_flash" : {"type" : "int", "default" : 32},
"arch_whitelist" : {"type" : "set"},
"arch_exclude" : {"type" : "set"},
"extra_sections" : {"type" : "list", "default" : []},
@ -928,37 +931,33 @@ testcase_valid_keys = {"tags" : {"type" : "set", "required" : True},
class SanityConfigParser:
"""Class to read architecture and test case .ini files with semantic checking
"""Class to read test case files with semantic checking
"""
def __init__(self, filename):
"""Instantiate a new SanityConfigParser object
@param filename Source .ini file to read
@param filename Source .yaml file to read
"""
cp = configparser.SafeConfigParser()
cp.readfp(open(filename))
with open(filename, 'r') as stream:
cp = yaml.load(stream)
self.filename = filename
self.cp = cp
def _cast_value(self, value, typestr):
if type(value) is str:
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(v)
return float(value)
elif typestr == "int":
return int(v)
return int(value)
elif typestr == "bool":
v = v.lower()
if v == "true" or v == "1":
return True
elif v == "" or v == "false" or v == "0":
return False
raise ConfigurationError(self.filename,
"bad value for boolean: '%s'" % value)
return value
elif typestr.startswith("list"):
vs = v.split()
@ -977,20 +976,24 @@ class SanityConfigParser:
else:
raise ConfigurationError(self.filename, "unknown type '%s'" % value)
def section(self,name):
for s in self.sections():
if name in s:
return s.get(name, {})
def sections(self):
"""Get the set of sections within the .ini file
"""Get the set of test sections within the .yaml file
@return a list of string section names"""
return self.cp.sections()
return self.cp['tests']
def get_section(self, section, valid_keys):
"""Get a dictionary representing the keys/values within a section
@param section The section in the .ini file to retrieve data from
@param section The section in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this section. Each key in this dictionary is a key that could
be specified, if a key is given in the .ini file which isn't in
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
@ -1010,13 +1013,7 @@ class SanityConfigParser:
"""
d = {}
cp = self.cp
if not cp.has_section(section):
# Just fill it with defaults
cp.add_section(section)
for k, v in cp.items(section):
for k, v in self.section(section).items():
if k not in valid_keys:
raise ConfigurationError(self.filename,
"Unknown config key '%s' in definition for '%s'"
@ -1055,7 +1052,7 @@ class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
def __init__(self, arch, name, plat_dict):
def __init__(self, cfile):
"""Constructor.
@param arch Architecture object for this platform
@ -1064,41 +1061,40 @@ class Platform:
in the architecture configuration file which has lots of metadata.
See the Architecture class.
"""
self.name = name
self.qemu_support = plat_dict["qemu_support"]
self.arch = arch
self.supported_toolchains = arch.supported_toolchains
if plat_dict["supported_toolchains"]:
self.supported_toolchains = plat_dict["supported_toolchains"]
# Gets populated in a separate step
scp = SanityConfigParser(cfile)
cp = scp.cp
self.name = cp['identifier']
# if no RAM size is specified by the board, take a default of 128K
self.ram = cp.get("ram", 128)
testing = cp.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = cp.get("flash", 512)
self.supported = set(cp.get("supported", []))
self.qemu_support = True if cp.get('type', "na") == 'qemu' else False
self.arch = cp['arch']
self.supported_toolchains = cp.get("toolchain", [])
self.defconfig = None
pass
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch.name)
return "<%s on %s>" % (self.name, self.arch)
class Architecture:
"""Class representing metadata for a particular architecture
"""
def __init__(self, cfile):
def __init__(self, name, platforms):
"""Architecture constructor
@param cfile Path to Architecture configuration file, which gives
info about the arch and all the platforms for it
"""
cp = SanityConfigParser(cfile)
self.platforms = []
self.platforms = platforms
arch = cp.get_section("arch", arch_valid_keys)
self.name = arch["name"]
self.supported_toolchains = arch["supported_toolchains"]
for plat_name in arch["platforms"]:
verbose("Platform: %s" % plat_name)
plat_dict = cp.get_section(plat_name, platform_valid_keys)
self.platforms.append(Platform(self, plat_name, plat_dict))
self.name = name
def __repr__(self):
return "<arch %s>" % self.name
@ -1110,12 +1106,12 @@ class TestCase:
def __init__(self, testcase_root, workdir, name, tc_dict, inifile):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads testcase.ini files.
Multiple TestCase instances may be generated from a single testcase.ini,
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to a section within that file.
We need to have a unique name for every single test case. Since
a testcase.ini can define multiple tests, the canonical name for
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root Absolute path to the root directory where
@ -1126,9 +1122,9 @@ class TestCase:
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.ini defines multiple tests
the testcase.yaml defines multiple tests
@param tc_dict Dictionary with section values for this test case
from the testcase.ini file
from the testcase.yaml file
"""
self.code_location = os.path.join(testcase_root, workdir)
self.type = tc_dict["type"]
@ -1142,7 +1138,11 @@ class TestCase:
self.tc_filter = tc_dict["filter"]
self.timeout = tc_dict["timeout"]
self.build_only = tc_dict["build_only"]
self.build_on_all = tc_dict["build_on_all"]
self.slow = tc_dict["slow"]
self.min_ram = tc_dict["min_ram"]
self.depends_on = tc_dict["depends_on"]
self.min_flash = tc_dict["min_flash"]
self.extra_sections = tc_dict["extra_sections"]
self.path = os.path.join(os.path.basename(os.path.abspath(testcase_root)),
workdir, name)
@ -1231,41 +1231,40 @@ class TestSuite:
for dirpath, dirnames, filenames in os.walk(testcase_root,
topdown=True):
verbose("scanning %s" % dirpath)
if "testcase.ini" in filenames:
verbose("Found test case in " + dirpath)
if "sample.yaml" in filenames or "testcase.yaml" in filenames:
verbose("Found possible test case in " + dirpath)
dirnames[:] = []
ini_path = os.path.join(dirpath, "testcase.ini")
cp = SanityConfigParser(ini_path)
if "sample.yaml" in filenames:
yaml_path = os.path.join(dirpath, "sample.yaml")
else:
yaml_path = os.path.join(dirpath, "testcase.yaml")
cp = SanityConfigParser(yaml_path)
workdir = os.path.relpath(dirpath, testcase_root)
for section in cp.sections():
tc_dict = cp.get_section(section, testcase_valid_keys)
tc = TestCase(testcase_root, workdir, section, tc_dict,
ini_path)
name = list(section.keys())[0]
tc_dict = cp.get_section(name, testcase_valid_keys)
tc = TestCase(testcase_root, workdir, name, tc_dict,
yaml_path)
self.testcases[tc.name] = tc
debug("Reading architecture configuration files under %s..." % arch_root)
debug("Reading platform configuration files under %s..." % arch_root)
for dirpath, dirnames, filenames in os.walk(arch_root):
for filename in filenames:
if filename.endswith(".ini"):
if filename.endswith(".yaml"):
fn = os.path.join(dirpath, filename)
verbose("Found arch configuration " + fn)
arch = Architecture(fn)
self.arches[arch.name] = arch
self.platforms.extend(arch.platforms)
verbose("Found plaform configuration " + fn)
platform = Platform(fn)
self.platforms.append(platform)
arches = []
for p in self.platforms:
arches.append(p.arch)
for a in list(set(arches)):
aplatforms = [ p for p in self.platforms if p.arch == a ]
arch = Architecture(a, aplatforms)
self.arches[a] = arch
# Build up a list of boards based on the presence of
# boards/*/*_defconfig files. We want to make sure that the arch.ini
# files are not missing any boards
all_plats = [plat.name for plat in self.platforms]
for dirpath, dirnames, filenames in os.walk(os.path.join(ZEPHYR_BASE,
"boards")):
for filename in filenames:
if filename.endswith("_defconfig"):
board_name = filename.replace("_defconfig", "")
if board_name not in all_plats:
error("Platform '%s' not specified in any arch .ini file and will not be tested"
% board_name)
self.instances = {}
def get_last_failed(self):
@ -1311,13 +1310,15 @@ class TestSuite:
dlist = {}
for tc_name, tc in self.testcases.items():
for arch_name, arch in self.arches.items():
instance_list = []
for plat in arch.platforms:
instance = TestInstance(tc, plat, self.outdir)
if (arch_name == "unit") != (tc.type == "unit"):
continue
if tc.build_on_all:
platform_filter = []
if tc.skip:
continue
@ -1348,12 +1349,24 @@ class TestSuite:
if platform_filter and plat.name not in platform_filter:
continue
if plat.ram <= tc.min_ram:
continue
if set(plat.ignore_tags) & tc.tags:
continue
if not tc.depends_on.issubset(set(plat.supported)):
continue
if plat.flash < tc.min_flash:
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
continue
if tc.tc_filter and (plat in arch.platforms[:platform_limit] or all_plats or platform_filter):
if tc.tc_filter and (plat.default or all_plats or platform_filter):
args = tc.extra_args[:]
args.extend(["ARCH=" + plat.arch.name,
args.extend(["ARCH=" + plat.arch,
"BOARD=" + plat.name, "config-sanitycheck"])
args.extend(extra_args)
# FIXME would be nice to use a common outdir for this so that
@ -1400,6 +1413,9 @@ class TestSuite:
discards[instance] = "Skip filter"
continue
if tc.build_on_all:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = "Command line testcase tag filter"
continue
@ -1444,6 +1460,22 @@ class TestSuite:
discards[instance] = "Not supported by the toolchain"
continue
if plat.ram <= tc.min_ram:
discards[instance] = "Not enough RAM"
continue
if not tc.depends_on.issubset(set(plat.supported)):
discards[instance] = "No hardware support"
continue
if plat.flash< tc.min_flash:
discards[instance] = "Not enough FLASH"
continue
if set(plat.ignore_tags) & tc.tags:
discards[instance] = "Excluded tags per platform"
continue
defconfig = {"ARCH" : arch.name, "PLATFORM" : plat.name}
defconfig.update(os.environ)
for p, tdefconfig in tc.defconfig.items():
@ -1468,10 +1500,15 @@ class TestSuite:
# Every platform in this arch was rejected already
continue
if default_platforms:
if default_platforms and not tc.build_on_all:
if not tc.platform_whitelist:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
else:
self.add_instances(instance_list[:platform_limit])
for instance in instance_list[platform_limit:]:
discards[instance] = "Not in first %d platform(s) for arch" % platform_limit
for instance in list(filter(lambda tc: not tc.platform.default, instance_list)):
discards[instance] = "Not a default test platform"
else:
self.add_instances(instance_list)
self.discards = discards
@ -1516,7 +1553,7 @@ class TestSuite:
cw.writeheader()
for instance, reason in self.discards.items():
rowdict = {"test" : i.test.name,
"arch" : i.platform.arch.name,
"arch" : i.platform.arch,
"platform" : i.platform.name,
"reason" : reason}
cw.writerow(rowdict)
@ -1636,7 +1673,7 @@ class TestSuite:
for name, goal in self.goals.items():
i = self.instances[name]
rowdict = {"test" : i.test.name,
"arch" : i.platform.arch.name,
"arch" : i.platform.arch,
"platform" : i.platform.name,
"extra_args" : " ".join(i.test.extra_args),
"qemu" : i.platform.qemu_support}
@ -1661,9 +1698,9 @@ def parse_arguments():
parser.add_argument("-p", "--platform", action="append",
help="Platform filter for testing. This option may be used multiple "
"times. Testcases will only be built/run on the platforms "
"specified. If this option is not used, then N platforms will "
"automatically be chosen from each arch to build and test, "
"where N is provided by the --platform-limit option.")
"specified. If this option is not used, then platforms marked "
"as default in the platform metadata file will be chosen "
"to build and test. ")
parser.add_argument("-L", "--platform-limit", action="store", type=int,
metavar="N", default=1,
help="Controls what platforms are tested if --platform or "
@ -1769,7 +1806,7 @@ def parse_arguments():
"called multiple times. Defaults to the 'samples' and "
"'tests' directories in the Zephyr tree.")
parser.add_argument("-A", "--arch-root",
default="%s/scripts/sanity_chk/arches" % ZEPHYR_BASE,
default="%s/boards" % ZEPHYR_BASE,
help="Directory to search for arch configuration files. All .ini "
"files in the directory will be processed.")
parser.add_argument("-z", "--size", action="append",