2016-02-22 22:28:10 +01:00
|
|
|
#!/usr/bin/env python3
|
2017-04-05 00:47:49 +02:00
|
|
|
# vim: set syntax=python ts=4 :
|
2020-03-24 19:40:28 +01:00
|
|
|
# Copyright (c) 2020 Intel Corporation
|
2019-04-06 15:08:09 +02:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2015-07-17 21:03:52 +02:00
|
|
|
"""Zephyr Sanity Tests
|
|
|
|
|
2019-04-08 23:02:34 +02:00
|
|
|
Also check the "User and Developer Guides" at https://docs.zephyrproject.org/
|
|
|
|
|
2015-07-17 21:03:52 +02:00
|
|
|
This script scans for the set of unit test applications in the git
|
|
|
|
repository and attempts to execute them. By default, it tries to
|
|
|
|
build each test case on one platform per architecture, using a precedence
|
2017-06-16 00:31:54 +02:00
|
|
|
list defined in an architecture configuration file, and if possible
|
2019-06-22 17:04:10 +02:00
|
|
|
run the tests in any available emulators or simulators on the system.
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
Test cases are detected by the presence of a 'testcase.yaml' or a sample.yaml
|
|
|
|
files in the application's project directory. This file may contain one or more
|
|
|
|
blocks, each identifying a test scenario. The title of the block is a name for
|
|
|
|
the test case, which only needs to be unique for the test cases specified in
|
|
|
|
that testcase meta-data. The full canonical name for each test case is <path to
|
|
|
|
test case>/<block>.
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2017-12-05 21:28:44 +01:00
|
|
|
Each test block in the testcase meta data can define the following key/value
|
|
|
|
pairs:
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2017-10-04 22:14:27 +02:00
|
|
|
tags: <list of tags> (required)
|
2015-07-17 21:03:52 +02:00
|
|
|
A set of string tags for the testcase. Usually pertains to
|
|
|
|
functional domains but can be anything. Command line invocations
|
|
|
|
of this script can filter the set of tests to run based on tag.
|
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
skip: <True|False> (default False)
|
2015-10-12 19:10:57 +02:00
|
|
|
skip testcase unconditionally. This can be used for broken tests.
|
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
slow: <True|False> (default False)
|
2019-01-03 23:17:43 +01:00
|
|
|
Don't build or run this test case unless --enable-slow was passed
|
|
|
|
in on the command line. Intended for time-consuming test cases
|
|
|
|
that are only run under certain circumstances, like daily
|
|
|
|
builds.
|
2016-02-10 22:39:00 +01:00
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
extra_args: <list of extra arguments>
|
2017-11-09 12:25:02 +01:00
|
|
|
Extra cache entries to pass to CMake when building or running the
|
2015-07-17 21:03:52 +02:00
|
|
|
test case.
|
|
|
|
|
2017-10-17 15:00:33 +02:00
|
|
|
extra_configs: <list of extra configurations>
|
|
|
|
Extra configuration options to be merged with a master prj.conf
|
|
|
|
when building or running the test case.
|
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
build_only: <True|False> (default False)
|
2019-04-08 23:02:34 +02:00
|
|
|
If true, don't try to run the test even if the selected platform
|
|
|
|
supports it.
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
build_on_all: <True|False> (default False)
|
|
|
|
If true, attempt to build test on all available platforms.
|
|
|
|
|
|
|
|
depends_on: <list of features>
|
|
|
|
A board or platform can announce what features it supports, this option
|
|
|
|
will enable the test only those platforms that provide this feature.
|
|
|
|
|
|
|
|
min_ram: <integer>
|
|
|
|
minimum amount of RAM needed for this test to build and run. This is
|
|
|
|
compared with information provided by the board metadata.
|
|
|
|
|
|
|
|
min_flash: <integer>
|
|
|
|
minimum amount of ROM needed for this test to build and run. This is
|
|
|
|
compared with information provided by the board metadata.
|
|
|
|
|
|
|
|
timeout: <number of seconds>
|
2019-06-22 17:04:10 +02:00
|
|
|
Length of time to run test in emulator before automatically killing it.
|
2015-07-17 21:03:52 +02:00
|
|
|
Default to 60 seconds.
|
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
arch_whitelist: <list of arches, such as x86, arm, arc>
|
2015-07-17 21:03:52 +02:00
|
|
|
Set of architectures that this test case should only be run for.
|
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
arch_exclude: <list of arches, such as x86, arm, arc>
|
2015-10-05 16:02:45 +02:00
|
|
|
Set of architectures that this test case should not run on.
|
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
platform_whitelist: <list of platforms>
|
2015-10-05 16:02:45 +02:00
|
|
|
Set of platforms that this test case should only be run for.
|
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
platform_exclude: <list of platforms>
|
2015-10-05 16:02:45 +02:00
|
|
|
Set of platforms that this test case should not run on.
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
extra_sections: <list of extra binary sections>
|
2016-11-29 21:21:59 +01:00
|
|
|
When computing sizes, sanitycheck will report errors if it finds
|
|
|
|
extra, unexpected sections in the Zephyr binary unless they are named
|
|
|
|
here. They will not be included in the size calculation.
|
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
filter: <expression>
|
2016-03-24 22:46:00 +01:00
|
|
|
Filter whether the testcase should be run by evaluating an expression
|
|
|
|
against an environment containing the following values:
|
|
|
|
|
|
|
|
{ ARCH : <architecture>,
|
|
|
|
PLATFORM : <platform>,
|
2016-08-08 19:24:59 +02:00
|
|
|
<all CONFIG_* key/value pairs in the test's generated defconfig>,
|
2019-01-09 14:46:42 +01:00
|
|
|
<all DT_* key/value pairs in the test's generated device tree file>,
|
|
|
|
<all CMake key/value pairs in the test's generated CMakeCache.txt file>,
|
2016-08-08 19:24:59 +02:00
|
|
|
*<env>: any environment variable available
|
2016-03-24 22:46:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
The grammar for the expression language is as follows:
|
|
|
|
|
|
|
|
expression ::= expression "and" expression
|
|
|
|
| expression "or" expression
|
|
|
|
| "not" expression
|
|
|
|
| "(" expression ")"
|
|
|
|
| symbol "==" constant
|
|
|
|
| symbol "!=" constant
|
|
|
|
| symbol "<" number
|
|
|
|
| symbol ">" number
|
|
|
|
| symbol ">=" number
|
|
|
|
| symbol "<=" number
|
|
|
|
| symbol "in" list
|
2016-06-02 21:27:54 +02:00
|
|
|
| symbol ":" string
|
2016-03-24 22:46:00 +01:00
|
|
|
| symbol
|
|
|
|
|
|
|
|
list ::= "[" list_contents "]"
|
|
|
|
|
|
|
|
list_contents ::= constant
|
|
|
|
| list_contents "," constant
|
|
|
|
|
|
|
|
constant ::= number
|
|
|
|
| string
|
|
|
|
|
|
|
|
|
|
|
|
For the case where expression ::= symbol, it evaluates to true
|
|
|
|
if the symbol is defined to a non-empty string.
|
|
|
|
|
|
|
|
Operator precedence, starting from lowest to highest:
|
|
|
|
|
|
|
|
or (left associative)
|
|
|
|
and (left associative)
|
|
|
|
not (right associative)
|
|
|
|
all comparison operators (non-associative)
|
|
|
|
|
|
|
|
arch_whitelist, arch_exclude, platform_whitelist, platform_exclude
|
|
|
|
are all syntactic sugar for these expressions. For instance
|
|
|
|
|
|
|
|
arch_exclude = x86 arc
|
|
|
|
|
|
|
|
Is the same as:
|
|
|
|
|
|
|
|
filter = not ARCH in ["x86", "arc"]
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2016-06-02 21:27:54 +02:00
|
|
|
The ':' operator compiles the string argument as a regular expression,
|
|
|
|
and then returns a true value only if the symbol's value in the environment
|
2019-07-12 16:54:35 +02:00
|
|
|
matches. For example, if CONFIG_SOC="stm32f107xc" then
|
2016-06-02 21:27:54 +02:00
|
|
|
|
2019-07-12 16:54:35 +02:00
|
|
|
filter = CONFIG_SOC : "stm.*"
|
2016-06-02 21:27:54 +02:00
|
|
|
|
|
|
|
Would match it.
|
|
|
|
|
2017-04-05 00:47:49 +02:00
|
|
|
The set of test cases that actually run depends on directives in the testcase
|
|
|
|
filed and options passed in on the command line. If there is any confusion,
|
2019-11-20 12:47:27 +01:00
|
|
|
running with -v or examining the discard report (sanitycheck_discard.csv)
|
|
|
|
can help show why particular test cases were skipped.
|
2015-07-17 21:03:52 +02:00
|
|
|
|
|
|
|
Metrics (such as pass/fail state and binary size) for the last code
|
|
|
|
release are stored in scripts/sanity_chk/sanity_last_release.csv.
|
|
|
|
To update this, pass the --all --release options.
|
|
|
|
|
2016-10-25 01:00:58 +02:00
|
|
|
To load arguments from a file, write '+' before the file name, e.g.,
|
|
|
|
+file_name. File content must be one or more valid arguments separated by
|
|
|
|
line break instead of white spaces.
|
|
|
|
|
2015-07-17 21:03:52 +02:00
|
|
|
Most everyday users will run with no arguments.
|
2019-01-03 23:17:43 +01:00
|
|
|
|
2015-07-17 21:03:52 +02:00
|
|
|
"""
|
|
|
|
|
2019-01-21 15:48:46 +01:00
|
|
|
import os
|
2015-07-17 21:03:52 +02:00
|
|
|
import argparse
|
|
|
|
import sys
|
2019-12-06 17:37:40 +01:00
|
|
|
import logging
|
2020-03-24 19:40:28 +01:00
|
|
|
import time
|
2020-05-07 18:02:48 +02:00
|
|
|
import itertools
|
2020-03-24 19:40:28 +01:00
|
|
|
import shutil
|
2017-05-14 03:31:53 +02:00
|
|
|
from collections import OrderedDict
|
2020-03-24 19:40:28 +01:00
|
|
|
import multiprocessing
|
2017-05-14 03:31:53 +02:00
|
|
|
from itertools import islice
|
2020-03-24 19:40:28 +01:00
|
|
|
import csv
|
|
|
|
from colorama import Fore
|
2020-04-14 01:05:44 +02:00
|
|
|
from pathlib import Path
|
2019-12-12 15:58:28 +01:00
|
|
|
|
2020-05-07 18:02:48 +02:00
|
|
|
|
2020-03-24 19:40:28 +01:00
|
|
|
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
|
|
|
if not ZEPHYR_BASE:
|
2020-04-14 01:05:44 +02:00
|
|
|
# This file has been zephyr/scripts/sanitycheck for years,
|
|
|
|
# and that is not going to change anytime soon. Let the user
|
|
|
|
# run this script as ./scripts/sanitycheck without making them
|
|
|
|
# set ZEPHYR_BASE.
|
|
|
|
ZEPHYR_BASE = str(Path(__file__).resolve().parents[1])
|
|
|
|
|
|
|
|
# Propagate this decision to child processes.
|
|
|
|
os.environ['ZEPHYR_BASE'] = ZEPHYR_BASE
|
|
|
|
|
|
|
|
print(f'ZEPHYR_BASE unset, using "{ZEPHYR_BASE}"')
|
2020-02-08 00:38:16 +01:00
|
|
|
|
2019-12-01 19:55:11 +01:00
|
|
|
try:
|
2020-03-24 19:40:28 +01:00
|
|
|
from anytree import RenderTree, Node, find
|
2019-12-01 19:55:11 +01:00
|
|
|
except ImportError:
|
|
|
|
print("Install the anytree module to use the --test-tree option")
|
|
|
|
|
2019-11-25 14:19:25 +01:00
|
|
|
try:
|
|
|
|
from tabulate import tabulate
|
|
|
|
except ImportError:
|
|
|
|
print("Install tabulate python module with pip to use --device-testing option.")
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2020-03-24 19:40:28 +01:00
|
|
|
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
|
2016-03-24 22:46:00 +01:00
|
|
|
|
2020-03-24 19:40:28 +01:00
|
|
|
from sanitylib import HardwareMap, TestSuite, SizeCalculator, CoverageTool
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2019-12-06 17:37:40 +01:00
|
|
|
logger = logging.getLogger('sanitycheck')
|
|
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
|
2015-08-17 22:16:11 +02:00
|
|
|
def size_report(sc):
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info(sc.filename)
|
|
|
|
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
|
2015-10-07 23:25:51 +02:00
|
|
|
for i in range(len(sc.sections)):
|
|
|
|
v = sc.sections[i]
|
|
|
|
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
|
2019-12-12 15:58:28 +01:00
|
|
|
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
|
|
|
|
v["type"]))
|
2015-10-07 23:25:51 +02:00
|
|
|
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
|
2019-12-12 15:58:28 +01:00
|
|
|
(sc.rom_size, sc.ram_size))
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("")
|
2015-08-17 22:16:11 +02:00
|
|
|
|
2019-12-12 15:58:28 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
def export_tests(filename, tests):
|
|
|
|
with open(filename, "wt") as csvfile:
|
|
|
|
fieldnames = ['section', 'subsection', 'title', 'reference']
|
|
|
|
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
|
|
|
|
for test in tests:
|
|
|
|
data = test.split(".")
|
|
|
|
if len(data) > 1:
|
|
|
|
subsec = " ".join(data[1].split("_")).title()
|
|
|
|
rowdict = {
|
|
|
|
"section": data[0].capitalize(),
|
|
|
|
"subsection": subsec,
|
|
|
|
"title": test,
|
|
|
|
"reference": test
|
|
|
|
}
|
|
|
|
cw.writerow(rowdict)
|
|
|
|
else:
|
2020-05-07 13:47:51 +02:00
|
|
|
logger.error("{} can't be exported: ".format(test))
|
2019-11-23 16:47:33 +01:00
|
|
|
|
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
def parse_arguments():
|
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description=__doc__,
|
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
|
|
parser.fromfile_prefix_chars = "+"
|
2017-12-05 21:28:44 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
case_select = parser.add_argument_group("Test case selection",
|
|
|
|
"""
|
|
|
|
Artificially long but functional example:
|
|
|
|
$ ./scripts/sanitycheck -v \\
|
|
|
|
--testcase-root tests/ztest/base \\
|
|
|
|
--testcase-root tests/kernel \\
|
|
|
|
--test tests/ztest/base/testing.ztest.verbose_0 \\
|
|
|
|
--test tests/kernel/fifo/fifo_api/kernel.fifo.poll
|
|
|
|
|
|
|
|
"kernel.fifo.poll" is one of the test section names in
|
|
|
|
__/fifo_api/testcase.yaml
|
|
|
|
""")
|
|
|
|
|
|
|
|
parser.add_argument("--force-toolchain", action="store_true",
|
|
|
|
help="Do not filter based on toolchain, use the set "
|
|
|
|
" toolchain unconditionally")
|
|
|
|
parser.add_argument(
|
|
|
|
"-p", "--platform", action="append",
|
|
|
|
help="Platform filter for testing. This option may be used multiple "
|
|
|
|
"times. Testcases will only be built/run on the platforms "
|
|
|
|
"specified. If this option is not used, then platforms marked "
|
|
|
|
"as default in the platform metadata file will be chosen "
|
|
|
|
"to build and test. ")
|
|
|
|
|
|
|
|
parser.add_argument("-P", "--exclude-platform", action="append", default=[],
|
|
|
|
help="""Exclude platforms and do not build or run any tests
|
|
|
|
on those platforms. This option can be called multiple times.
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
"-a", "--arch", action="append",
|
|
|
|
help="Arch filter for testing. Takes precedence over --platform. "
|
|
|
|
"If unspecified, test all arches. Multiple invocations "
|
|
|
|
"are treated as a logical 'or' relationship")
|
|
|
|
parser.add_argument(
|
|
|
|
"-t", "--tag", action="append",
|
|
|
|
help="Specify tags to restrict which tests to run by tag value. "
|
|
|
|
"Default is to not do any tag filtering. Multiple invocations "
|
|
|
|
"are treated as a logical 'or' relationship")
|
|
|
|
parser.add_argument("-e", "--exclude-tag", action="append",
|
|
|
|
help="Specify tags of tests that should not run. "
|
|
|
|
"Default is to run all tests with all tags.")
|
|
|
|
case_select.add_argument(
|
|
|
|
"-f",
|
|
|
|
"--only-failed",
|
|
|
|
action="store_true",
|
|
|
|
help="Run only those tests that failed the previous sanity check "
|
|
|
|
"invocation.")
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
"--retry-failed", type=int, default=0,
|
|
|
|
help="Retry failing tests again, up to the number of times specified.")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-07-09 15:46:45 +02:00
|
|
|
parser.add_argument(
|
|
|
|
"--retry-interval", type=int, default=60,
|
|
|
|
help="Retry failing tests after specified period of time.")
|
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
test_xor_subtest = case_select.add_mutually_exclusive_group()
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
test_xor_subtest.add_argument(
|
|
|
|
"-s", "--test", action="append",
|
|
|
|
help="Run only the specified test cases. These are named by "
|
|
|
|
"<path/relative/to/Zephyr/base/section.name.in.testcase.yaml>")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
test_xor_subtest.add_argument(
|
|
|
|
"--sub-test", action="append",
|
|
|
|
help="""Recursively find sub-test functions and run the entire
|
|
|
|
test section where they were found, including all sibling test
|
|
|
|
functions. Sub-tests are named by:
|
|
|
|
section.name.in.testcase.yaml.function_name_without_test_prefix
|
|
|
|
Example: kernel.fifo.poll.fifo_loop
|
|
|
|
""")
|
2018-10-08 16:19:41 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-l", "--all", action="store_true",
|
|
|
|
help="Build/test on all platforms. Any --platform arguments "
|
|
|
|
"ignored.")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-o", "--report-dir",
|
|
|
|
help="""Output reports containing results of the test run into the
|
|
|
|
specified directory.
|
|
|
|
The output will be both in CSV and JUNIT format
|
|
|
|
(sanitycheck.csv and sanitycheck.xml).
|
|
|
|
""")
|
2019-01-25 03:50:59 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"--report-name",
|
|
|
|
help="""Create a report with a custom name.
|
|
|
|
""")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-04-22 15:39:42 +02:00
|
|
|
parser.add_argument(
|
|
|
|
"--report-suffix",
|
|
|
|
help="""Add a suffix to all generated file names, for example to add a
|
|
|
|
version or a commit ID.
|
|
|
|
""")
|
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--report-excluded",
|
|
|
|
action="store_true",
|
|
|
|
help="""List all tests that are never run based on current scope and
|
|
|
|
coverage. If you are looking for accurate results, run this with
|
|
|
|
--all, but this will take a while...""")
|
2018-02-11 09:33:55 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--compare-report",
|
|
|
|
help="Use this report file for size comparison")
|
2018-02-11 09:33:55 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-B", "--subset",
|
|
|
|
help="Only run a subset of the tests, 1/4 for running the first 25%%, "
|
|
|
|
"3/5 means run the 3rd fifth of the total. "
|
|
|
|
"This option is useful when running a large number of tests on "
|
|
|
|
"different hosts to speed up execution time.")
|
2018-02-11 09:33:55 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-N", "--ninja", action="store_true",
|
|
|
|
help="Use the Ninja generator with CMake")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-y", "--dry-run", action="store_true",
|
|
|
|
help="""Create the filtered list of test cases, but don't actually
|
|
|
|
run them. Useful if you're just interested in the discard report
|
|
|
|
generated for every run and saved in the specified output
|
|
|
|
directory (sanitycheck_discard.csv).
|
|
|
|
""")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--list-tags", action="store_true",
|
|
|
|
help="list all tags in selected tests")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
case_select.add_argument("--list-tests", action="store_true",
|
|
|
|
help="""List of all sub-test functions recursively found in
|
|
|
|
all --testcase-root arguments. Note different sub-tests can share
|
|
|
|
the same section name and come from different directories.
|
|
|
|
The output is flattened and reports --sub-test names only,
|
|
|
|
not their directories. For instance net.socket.getaddrinfo_ok
|
|
|
|
and net.socket.fd_set belong to different directories.
|
|
|
|
""")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
case_select.add_argument("--test-tree", action="store_true",
|
|
|
|
help="""Output the testsuite in a tree form""")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
case_select.add_argument("--list-test-duplicates", action="store_true",
|
|
|
|
help="""List tests with duplicate identifiers.
|
|
|
|
""")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--export-tests", action="store",
|
|
|
|
metavar="FILENAME",
|
2020-05-07 18:02:48 +02:00
|
|
|
help="Export tests case meta-data to a file in CSV format."
|
|
|
|
"Test instances can be exported per target by supplying "
|
|
|
|
"the platform name using --platform option. (tests for only "
|
|
|
|
" one platform can be exported at a time)")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--timestamps",
|
|
|
|
action="store_true",
|
|
|
|
help="Print all messages with time stamps")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-r", "--release", action="store_true",
|
|
|
|
help="Update the benchmark database with the results of this test "
|
|
|
|
"run. Intended to be run by CI when tagging an official "
|
|
|
|
"release. This database is used as a basis for comparison "
|
|
|
|
"when looking for deltas in metrics such as footprint")
|
2020-07-16 23:25:19 +02:00
|
|
|
|
|
|
|
parser.add_argument("-W", "--disable-warnings-as-errors", action="store_true",
|
2020-03-24 02:28:38 +01:00
|
|
|
help="Treat warning conditions as errors")
|
2020-07-16 23:25:19 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-v",
|
|
|
|
"--verbose",
|
|
|
|
action="count",
|
|
|
|
default=0,
|
|
|
|
help="Emit debugging information, call multiple times to increase "
|
|
|
|
"verbosity")
|
|
|
|
parser.add_argument(
|
|
|
|
"-i", "--inline-logs", action="store_true",
|
|
|
|
help="Upon test failure, print relevant log data to stdout "
|
|
|
|
"instead of just a path to it")
|
|
|
|
parser.add_argument("--log-file", metavar="FILENAME", action="store",
|
|
|
|
help="log also to file")
|
|
|
|
parser.add_argument(
|
|
|
|
"-m", "--last-metrics", action="store_true",
|
|
|
|
help="Instead of comparing metrics from the last --release, "
|
|
|
|
"compare with the results of the previous sanity check "
|
|
|
|
"invocation")
|
|
|
|
parser.add_argument(
|
|
|
|
"-u",
|
|
|
|
"--no-update",
|
|
|
|
action="store_true",
|
|
|
|
help="do not update the results of the last run of the sanity "
|
|
|
|
"checks")
|
2020-05-28 14:02:54 +02:00
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
"-G",
|
|
|
|
"--integration",
|
|
|
|
action="store_true",
|
|
|
|
help="Run integration tests")
|
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
case_select.add_argument(
|
|
|
|
"-F",
|
|
|
|
"--load-tests",
|
|
|
|
metavar="FILENAME",
|
|
|
|
action="store",
|
|
|
|
help="Load list of tests and platforms to be run from file.")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
case_select.add_argument(
|
|
|
|
"-E",
|
|
|
|
"--save-tests",
|
|
|
|
metavar="FILENAME",
|
|
|
|
action="store",
|
|
|
|
help="Append list of tests and platforms to be run to file.")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
test_or_build = parser.add_mutually_exclusive_group()
|
|
|
|
test_or_build.add_argument(
|
|
|
|
"-b", "--build-only", action="store_true",
|
|
|
|
help="Only build the code, do not execute any of it in QEMU")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
test_or_build.add_argument(
|
|
|
|
"--test-only", action="store_true",
|
|
|
|
help="""Only run device tests with current artifacts, do not build
|
|
|
|
the code""")
|
|
|
|
parser.add_argument(
|
|
|
|
"--cmake-only", action="store_true",
|
|
|
|
help="Only run cmake, do not build or run.")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-M", "--runtime-artifact-cleanup", action="store_true",
|
|
|
|
help="Delete artifacts of passing tests.")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-j", "--jobs", type=int,
|
|
|
|
help="Number of jobs for building, defaults to number of CPU threads, "
|
|
|
|
"overcommited by factor 2 when --build-only")
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"--show-footprint", action="store_true",
|
|
|
|
help="Show footprint statistics and deltas since last release."
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"-H", "--footprint-threshold", type=float, default=5,
|
|
|
|
help="When checking test case footprint sizes, warn the user if "
|
|
|
|
"the new app size is greater then the specified percentage "
|
|
|
|
"from the last release. Default is 5. 0 to warn on any "
|
|
|
|
"increase on app size")
|
|
|
|
parser.add_argument(
|
|
|
|
"-D", "--all-deltas", action="store_true",
|
|
|
|
help="Show all footprint deltas, positive or negative. Implies "
|
|
|
|
"--footprint-threshold=0")
|
|
|
|
parser.add_argument(
|
|
|
|
"-O", "--outdir",
|
|
|
|
default=os.path.join(os.getcwd(), "sanity-out"),
|
|
|
|
help="Output directory for logs and binaries. "
|
|
|
|
"Default is 'sanity-out' in the current directory. "
|
|
|
|
"This directory will be cleaned unless '--no-clean' is set. "
|
|
|
|
"The '--clobber-output' option controls what cleaning does.")
|
|
|
|
parser.add_argument(
|
|
|
|
"-c", "--clobber-output", action="store_true",
|
|
|
|
help="Cleaning the output directory will simply delete it instead "
|
|
|
|
"of the default policy of renaming.")
|
|
|
|
parser.add_argument(
|
|
|
|
"-n", "--no-clean", action="store_true",
|
|
|
|
help="Re-use the outdir before building. Will result in "
|
|
|
|
"faster compilation since builds will be incremental.")
|
|
|
|
case_select.add_argument(
|
|
|
|
"-T", "--testcase-root", action="append", default=[],
|
|
|
|
help="Base directory to recursively search for test cases. All "
|
|
|
|
"testcase.yaml files under here will be processed. May be "
|
|
|
|
"called multiple times. Defaults to the 'samples/' and "
|
|
|
|
"'tests/' directories at the base of the Zephyr tree.")
|
|
|
|
|
|
|
|
board_root_list = ["%s/boards" % ZEPHYR_BASE,
|
|
|
|
"%s/scripts/sanity_chk/boards" % ZEPHYR_BASE]
|
2019-11-23 16:47:33 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-A", "--board-root", action="append", default=board_root_list,
|
|
|
|
help="""Directory to search for board configuration files. All .yaml
|
|
|
|
files in the directory will be processed. The directory should have the same
|
|
|
|
structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-z", "--size", action="append",
|
|
|
|
help="Don't run sanity checks. Instead, produce a report to "
|
|
|
|
"stdout detailing RAM/ROM sizes on the specified filenames. "
|
|
|
|
"All other command line arguments ignored.")
|
|
|
|
parser.add_argument(
|
|
|
|
"-S", "--enable-slow", action="store_true",
|
|
|
|
help="Execute time-consuming test cases that have been marked "
|
|
|
|
"as 'slow' in testcase.yaml. Normally these are only built.")
|
2020-05-01 20:57:00 +02:00
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
|
"-K", "--force-platform", action="store_true",
|
|
|
|
help="""Force testing on selected platforms,
|
|
|
|
even if they are excluded in the test configuration"""
|
|
|
|
)
|
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"--disable-unrecognized-section-test", action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Skip the 'unrecognized section' test.")
|
|
|
|
parser.add_argument("-R", "--enable-asserts", action="store_true",
|
|
|
|
default=True,
|
|
|
|
help="deprecated, left for compatibility")
|
|
|
|
parser.add_argument("--disable-asserts", action="store_false",
|
|
|
|
dest="enable_asserts",
|
|
|
|
help="deprecated, left for compatibility")
|
|
|
|
parser.add_argument("-Q", "--error-on-deprecations", action="store_false",
|
|
|
|
help="Error on deprecation warnings.")
|
|
|
|
parser.add_argument("--enable-size-report", action="store_true",
|
|
|
|
help="Enable expensive computation of RAM/ROM segment sizes.")
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-x", "--extra-args", action="append", default=[],
|
|
|
|
help="""Extra CMake cache entries to define when building test cases.
|
|
|
|
May be called multiple times. The key-value entries will be
|
|
|
|
prefixed with -D before being passed to CMake.
|
2019-12-19 18:33:51 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
E.g
|
|
|
|
"sanitycheck -x=USE_CCACHE=0"
|
|
|
|
will translate to
|
|
|
|
"cmake -DUSE_CCACHE=0"
|
2019-12-19 18:33:51 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
which will ultimately disable ccache.
|
|
|
|
"""
|
|
|
|
)
|
2015-08-17 22:16:11 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"--device-testing", action="store_true",
|
|
|
|
help="Test on device directly. Specify the serial device to "
|
|
|
|
"use with the --device-serial option.")
|
2019-07-03 19:19:29 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"-X", "--fixture", action="append", default=[],
|
|
|
|
help="Specify a fixture that a board might support")
|
2020-03-31 12:22:30 +02:00
|
|
|
|
|
|
|
serial = parser.add_mutually_exclusive_group()
|
|
|
|
serial.add_argument("--device-serial",
|
|
|
|
help="""Serial device for accessing the board
|
|
|
|
(e.g., /dev/ttyACM0)
|
|
|
|
""")
|
|
|
|
|
|
|
|
serial.add_argument("--device-serial-pty",
|
|
|
|
help="""Script for controlling pseudoterminal.
|
|
|
|
Sanitycheck believes that it interacts with a terminal
|
|
|
|
when it actually interacts with the script.
|
|
|
|
|
|
|
|
E.g "sanitycheck --device-testing
|
|
|
|
--device-serial-pty <script>
|
|
|
|
""")
|
2015-08-17 22:16:11 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--generate-hardware-map",
|
|
|
|
help="""Probe serial devices connected to this platform
|
|
|
|
and create a hardware map file to be used with
|
|
|
|
--device-testing
|
|
|
|
""")
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2020-04-14 01:50:51 +02:00
|
|
|
parser.add_argument("--persistent-hardware-map", action='store_true',
|
|
|
|
help="""With --generate-hardware-map, tries to use
|
|
|
|
persistent names for serial devices on platforms
|
|
|
|
that support this feature (currently only Linux).
|
|
|
|
""")
|
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--hardware-map",
|
|
|
|
help="""Load hardware map from a file. This will be used
|
|
|
|
for testing on hardware that is listed in the file.
|
|
|
|
""")
|
2019-11-25 14:19:25 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"--west-flash", nargs='?', const=[],
|
|
|
|
help="""Uses west instead of ninja or make to flash when running with
|
|
|
|
--device-testing. Supports comma-separated argument list.
|
2019-11-25 14:19:25 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
E.g "sanitycheck --device-testing --device-serial /dev/ttyACM0
|
|
|
|
--west-flash="--board-id=foobar,--erase"
|
|
|
|
will translate to "west flash -- --board-id=foobar --erase"
|
2019-11-25 14:19:25 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
NOTE: device-testing must be enabled to use this option.
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--west-runner",
|
|
|
|
help="""Uses the specified west runner instead of default when running
|
|
|
|
with --west-flash.
|
2019-11-25 14:19:25 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
E.g "sanitycheck --device-testing --device-serial /dev/ttyACM0
|
|
|
|
--west-flash --west-runner=pyocd"
|
|
|
|
will translate to "west flash --runner pyocd"
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
NOTE: west-flash must be enabled to use this option.
|
|
|
|
"""
|
|
|
|
)
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
valgrind_asan_group = parser.add_mutually_exclusive_group()
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
valgrind_asan_group.add_argument(
|
|
|
|
"--enable-valgrind", action="store_true",
|
|
|
|
help="""Run binary through valgrind and check for several memory access
|
|
|
|
errors. Valgrind needs to be installed on the host. This option only
|
|
|
|
works with host binaries such as those generated for the native_posix
|
|
|
|
configuration and is mutual exclusive with --enable-asan.
|
|
|
|
""")
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
valgrind_asan_group.add_argument(
|
|
|
|
"--enable-asan", action="store_true",
|
|
|
|
help="""Enable address sanitizer to check for several memory access
|
|
|
|
errors. Libasan needs to be installed on the host. This option only
|
|
|
|
works with host binaries such as those generated for the native_posix
|
|
|
|
configuration and is mutual exclusive with --enable-valgrind.
|
|
|
|
""")
|
2019-12-01 17:41:22 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument(
|
|
|
|
"--enable-lsan", action="store_true",
|
|
|
|
help="""Enable leak sanitizer to check for heap memory leaks.
|
|
|
|
Libasan needs to be installed on the host. This option only
|
|
|
|
works with host binaries such as those generated for the native_posix
|
|
|
|
configuration and when --enable-asan is given.
|
|
|
|
""")
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2020-07-06 16:00:57 +02:00
|
|
|
parser.add_argument(
|
|
|
|
"--enable-ubsan", action="store_true",
|
|
|
|
help="""Enable undefined behavior sanitizer to check for undefined
|
|
|
|
behaviour during program execution. It uses an optional runtime library
|
|
|
|
to provide better error diagnostics. This option only works with host
|
|
|
|
binaries such as those generated for the native_posix configuration.
|
|
|
|
""")
|
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--enable-coverage", action="store_true",
|
|
|
|
help="Enable code coverage using gcov.")
|
2019-12-18 16:41:27 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("-C", "--coverage", action="store_true",
|
|
|
|
help="Generate coverage reports. Implies "
|
2020-05-06 13:11:39 +02:00
|
|
|
"--enable-coverage.")
|
2019-11-25 14:19:25 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--coverage-platform", action="append", default=[],
|
|
|
|
help="Plarforms to run coverage reports on. "
|
|
|
|
"This option may be used multiple times. "
|
|
|
|
"Default to what was selected with --platform.")
|
2019-12-18 16:41:27 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--gcov-tool", default=None,
|
|
|
|
help="Path to the gcov tool to use for code coverage "
|
|
|
|
"reports")
|
2019-12-12 15:58:28 +01:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
parser.add_argument("--coverage-tool", choices=['lcov', 'gcovr'], default='lcov',
|
|
|
|
help="Tool to use to generate coverage report.")
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2020-03-24 02:28:38 +01:00
|
|
|
return parser.parse_args()
|
2019-12-12 15:58:28 +01:00
|
|
|
|
2020-03-24 19:40:28 +01:00
|
|
|
|
2019-11-25 14:19:25 +01:00
|
|
|
def main():
|
|
|
|
start_time = time.time()
|
|
|
|
|
2019-12-07 01:20:48 +01:00
|
|
|
options = parse_arguments()
|
2019-12-06 17:37:40 +01:00
|
|
|
|
|
|
|
# Cleanup
|
|
|
|
if options.no_clean or options.only_failed or options.test_only:
|
|
|
|
if os.path.exists(options.outdir):
|
2020-01-03 04:31:20 +01:00
|
|
|
print("Keeping artifacts untouched")
|
2019-12-06 17:37:40 +01:00
|
|
|
elif os.path.exists(options.outdir):
|
2020-01-03 03:44:22 +01:00
|
|
|
if options.clobber_output:
|
|
|
|
print("Deleting output directory {}".format(options.outdir))
|
|
|
|
shutil.rmtree(options.outdir)
|
|
|
|
else:
|
|
|
|
for i in range(1, 100):
|
|
|
|
new_out = options.outdir + ".{}".format(i)
|
|
|
|
if not os.path.exists(new_out):
|
|
|
|
print("Renaming output directory to {}".format(new_out))
|
|
|
|
shutil.move(options.outdir, new_out)
|
|
|
|
break
|
2019-12-06 17:37:40 +01:00
|
|
|
|
|
|
|
os.makedirs(options.outdir, exist_ok=True)
|
|
|
|
|
|
|
|
# create file handler which logs even debug messages
|
|
|
|
if options.log_file:
|
|
|
|
fh = logging.FileHandler(options.log_file)
|
|
|
|
else:
|
|
|
|
fh = logging.FileHandler(os.path.join(options.outdir, "sanitycheck.log"))
|
|
|
|
|
|
|
|
fh.setLevel(logging.DEBUG)
|
|
|
|
|
|
|
|
# create console handler with a higher log level
|
|
|
|
ch = logging.StreamHandler()
|
|
|
|
|
2020-03-30 01:02:51 +02:00
|
|
|
VERBOSE = options.verbose
|
2019-12-06 17:37:40 +01:00
|
|
|
if VERBOSE > 1:
|
|
|
|
ch.setLevel(logging.DEBUG)
|
|
|
|
else:
|
|
|
|
ch.setLevel(logging.INFO)
|
|
|
|
|
|
|
|
# create formatter and add it to the handlers
|
2019-12-08 17:58:00 +01:00
|
|
|
if options.timestamps:
|
|
|
|
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
else:
|
2019-12-08 18:11:43 +01:00
|
|
|
formatter = logging.Formatter('%(levelname)-7s - %(message)s')
|
2019-12-08 17:58:00 +01:00
|
|
|
|
2019-12-06 17:37:40 +01:00
|
|
|
formatter_file = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
|
|
ch.setFormatter(formatter)
|
|
|
|
fh.setFormatter(formatter_file)
|
|
|
|
|
|
|
|
# add the handlers to logger
|
|
|
|
logger.addHandler(ch)
|
|
|
|
logger.addHandler(fh)
|
2019-11-25 14:19:25 +01:00
|
|
|
|
|
|
|
hwm = HardwareMap()
|
|
|
|
if options.generate_hardware_map:
|
2020-04-14 01:50:51 +02:00
|
|
|
hwm.scan_hw(persistent=options.persistent_hardware_map)
|
2019-11-25 14:19:25 +01:00
|
|
|
hwm.write_map(options.generate_hardware_map)
|
2019-06-22 17:04:10 +02:00
|
|
|
return
|
|
|
|
|
2019-11-25 14:19:25 +01:00
|
|
|
if not options.device_testing and options.hardware_map:
|
|
|
|
hwm.load_hardware_map(options.hardware_map)
|
|
|
|
|
2019-12-08 18:11:43 +01:00
|
|
|
logger.info("Available devices:")
|
2019-11-25 14:19:25 +01:00
|
|
|
table = []
|
2019-12-18 16:41:27 +01:00
|
|
|
hwm.dump(hwmap=hwm.connected_hardware, connected_only=True)
|
2019-11-25 14:19:25 +01:00
|
|
|
return
|
2019-06-22 17:04:10 +02:00
|
|
|
|
2019-06-18 18:37:46 +02:00
|
|
|
if options.west_runner and not options.west_flash:
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.error("west-runner requires west-flash to be enabled")
|
2019-06-18 18:37:46 +02:00
|
|
|
sys.exit(1)
|
|
|
|
|
2019-07-09 23:21:30 +02:00
|
|
|
if options.west_flash and not options.device_testing:
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.error("west-flash requires device-testing to be enabled")
|
2019-07-09 23:21:30 +02:00
|
|
|
sys.exit(1)
|
|
|
|
|
2018-06-21 09:30:20 +02:00
|
|
|
if options.coverage:
|
|
|
|
options.enable_coverage = True
|
2019-11-23 23:25:36 +01:00
|
|
|
|
|
|
|
if not options.coverage_platform:
|
|
|
|
options.coverage_platform = options.platform
|
2018-06-21 09:30:20 +02:00
|
|
|
|
2017-12-30 19:01:45 +01:00
|
|
|
if options.size:
|
|
|
|
for fn in options.size:
|
2016-11-29 21:21:59 +01:00
|
|
|
size_report(SizeCalculator(fn, []))
|
2015-08-17 22:16:11 +02:00
|
|
|
sys.exit(0)
|
|
|
|
|
2017-12-30 19:01:45 +01:00
|
|
|
if options.subset:
|
|
|
|
subset, sets = options.subset.split("/")
|
2017-05-14 03:31:53 +02:00
|
|
|
if int(subset) > 0 and int(sets) >= int(subset):
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("Running only a subset: %s/%s" % (subset, sets))
|
2017-05-14 03:31:53 +02:00
|
|
|
else:
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.error("You have provided a wrong subset value: %s." % options.subset)
|
2017-05-14 03:31:53 +02:00
|
|
|
return
|
|
|
|
|
2017-12-30 19:01:45 +01:00
|
|
|
if not options.testcase_root:
|
|
|
|
options.testcase_root = [os.path.join(ZEPHYR_BASE, "tests"),
|
2019-12-12 15:58:28 +01:00
|
|
|
os.path.join(ZEPHYR_BASE, "samples")]
|
2016-04-08 20:52:13 +02:00
|
|
|
|
2019-12-09 21:23:43 +01:00
|
|
|
if options.show_footprint or options.compare_report or options.release:
|
|
|
|
options.enable_size_report = True
|
|
|
|
|
2019-12-10 18:26:00 +01:00
|
|
|
suite = TestSuite(options.board_root, options.testcase_root, options.outdir)
|
|
|
|
|
|
|
|
# Set testsuite options from command line.
|
|
|
|
suite.build_only = options.build_only
|
2019-12-10 22:38:45 +01:00
|
|
|
suite.cmake_only = options.cmake_only
|
2020-01-16 18:23:00 +01:00
|
|
|
suite.cleanup = options.runtime_artifact_cleanup
|
2019-12-10 22:38:45 +01:00
|
|
|
suite.test_only = options.test_only
|
2019-12-10 18:26:00 +01:00
|
|
|
suite.enable_slow = options.enable_slow
|
|
|
|
suite.device_testing = options.device_testing
|
2020-05-21 15:11:40 +02:00
|
|
|
suite.fixtures = options.fixture
|
2019-12-10 18:26:00 +01:00
|
|
|
suite.enable_asan = options.enable_asan
|
|
|
|
suite.enable_lsan = options.enable_lsan
|
2020-07-06 16:00:57 +02:00
|
|
|
suite.enable_ubsan = options.enable_ubsan
|
2019-12-10 18:26:00 +01:00
|
|
|
suite.enable_coverage = options.enable_coverage
|
|
|
|
suite.enable_valgrind = options.enable_valgrind
|
|
|
|
suite.coverage_platform = options.coverage_platform
|
2019-12-10 22:31:22 +01:00
|
|
|
suite.inline_logs = options.inline_logs
|
2019-12-10 22:38:45 +01:00
|
|
|
suite.enable_size_report = options.enable_size_report
|
2020-01-14 15:47:27 +01:00
|
|
|
suite.extra_args = options.extra_args
|
2020-03-24 01:01:23 +01:00
|
|
|
suite.west_flash = options.west_flash
|
|
|
|
suite.west_runner = options.west_runner
|
2020-03-30 01:02:51 +02:00
|
|
|
suite.verbose = VERBOSE
|
2020-07-16 23:25:19 +02:00
|
|
|
suite.warnings_as_errors = not options.disable_warnings_as_errors
|
2020-05-28 14:02:54 +02:00
|
|
|
suite.integration = options.integration
|
2020-03-24 01:01:23 +01:00
|
|
|
|
|
|
|
if options.ninja:
|
|
|
|
suite.generator_cmd = "ninja"
|
|
|
|
suite.generator = "Ninja"
|
|
|
|
else:
|
|
|
|
suite.generator_cmd = "make"
|
|
|
|
suite.generator = "Unix Makefiles"
|
2019-12-05 17:02:02 +01:00
|
|
|
|
|
|
|
# Set number of jobs
|
|
|
|
if options.jobs:
|
|
|
|
suite.jobs = options.jobs
|
|
|
|
elif options.build_only:
|
|
|
|
suite.jobs = multiprocessing.cpu_count() * 2
|
|
|
|
else:
|
|
|
|
suite.jobs = multiprocessing.cpu_count()
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("JOBS: %d" % suite.jobs)
|
2019-12-05 17:02:02 +01:00
|
|
|
|
2020-03-11 00:38:02 +01:00
|
|
|
run_individual_tests = []
|
|
|
|
|
|
|
|
if options.test:
|
|
|
|
run_individual_tests = options.test
|
|
|
|
|
|
|
|
suite.add_testcases(testcase_filter=run_individual_tests)
|
2019-06-22 17:04:10 +02:00
|
|
|
suite.add_configurations()
|
|
|
|
|
|
|
|
if options.device_testing:
|
|
|
|
if options.hardware_map:
|
2019-11-25 14:19:25 +01:00
|
|
|
hwm.load_hardware_map(options.hardware_map)
|
|
|
|
suite.connected_hardware = hwm.connected_hardware
|
2019-06-22 17:04:10 +02:00
|
|
|
if not options.platform:
|
|
|
|
options.platform = []
|
2019-11-25 14:19:25 +01:00
|
|
|
for platform in hwm.connected_hardware:
|
2019-06-22 17:04:10 +02:00
|
|
|
if platform['connected']:
|
|
|
|
options.platform.append(platform['platform'])
|
|
|
|
|
2020-03-31 12:22:30 +02:00
|
|
|
elif options.device_serial or options.device_serial_pty:
|
2019-06-22 17:04:10 +02:00
|
|
|
if options.platform and len(options.platform) == 1:
|
2020-03-31 12:22:30 +02:00
|
|
|
if options.device_serial:
|
|
|
|
hwm.load_device_from_cmdline(options.device_serial,
|
|
|
|
options.platform[0],
|
|
|
|
False)
|
|
|
|
else:
|
|
|
|
hwm.load_device_from_cmdline(options.device_serial_pty,
|
|
|
|
options.platform[0],
|
|
|
|
True)
|
|
|
|
|
2019-12-16 15:22:21 +01:00
|
|
|
suite.connected_hardware = hwm.connected_hardware
|
2019-06-22 17:04:10 +02:00
|
|
|
else:
|
2020-03-31 12:22:30 +02:00
|
|
|
logger.error("""When --device-testing is used with
|
|
|
|
--device-serial or --device-serial-pty,
|
|
|
|
only one platform is allowed""")
|
2019-06-22 17:04:10 +02:00
|
|
|
|
|
|
|
if suite.load_errors:
|
2018-04-10 20:32:51 +02:00
|
|
|
sys.exit(1)
|
|
|
|
|
2018-02-24 15:32:14 +01:00
|
|
|
if options.list_tags:
|
|
|
|
tags = set()
|
2019-06-22 17:04:10 +02:00
|
|
|
for _, tc in suite.testcases.items():
|
2018-02-24 15:32:14 +01:00
|
|
|
tags = tags.union(tc.tags)
|
|
|
|
|
|
|
|
for t in tags:
|
|
|
|
print("- {}".format(t))
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2020-05-07 18:02:48 +02:00
|
|
|
if not options.platform and (options.list_tests or options.test_tree or options.list_test_duplicates \
|
|
|
|
or options.sub_test or options.export_tests):
|
2019-06-14 19:45:34 +02:00
|
|
|
cnt = 0
|
2019-11-18 16:49:17 +01:00
|
|
|
all_tests = suite.get_all_tests()
|
2019-06-14 19:45:34 +02:00
|
|
|
|
2020-05-07 16:33:55 +02:00
|
|
|
if options.export_tests:
|
|
|
|
export_tests(options.export_tests, all_tests)
|
|
|
|
return
|
|
|
|
|
2019-11-18 17:16:21 +01:00
|
|
|
if options.list_test_duplicates:
|
|
|
|
import collections
|
|
|
|
dupes = [item for item, count in collections.Counter(all_tests).items() if count > 1]
|
|
|
|
if dupes:
|
|
|
|
print("Tests with duplicate identifiers:")
|
|
|
|
for dupe in dupes:
|
|
|
|
print("- {}".format(dupe))
|
|
|
|
for dc in suite.get_testcase(dupe):
|
|
|
|
print(" - {}".format(dc))
|
|
|
|
else:
|
|
|
|
print("No duplicates found.")
|
|
|
|
return
|
|
|
|
|
2018-07-12 16:25:22 +02:00
|
|
|
if options.sub_test:
|
2019-11-18 19:22:56 +01:00
|
|
|
for st in options.sub_test:
|
|
|
|
subtests = suite.get_testcase(st)
|
|
|
|
for sti in subtests:
|
|
|
|
run_individual_tests.append(sti.name)
|
|
|
|
|
2018-07-12 16:25:22 +02:00
|
|
|
if run_individual_tests:
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("Running the following tests:")
|
2019-11-18 16:49:17 +01:00
|
|
|
for test in run_individual_tests:
|
|
|
|
print(" - {}".format(test))
|
2018-07-12 16:25:22 +02:00
|
|
|
else:
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("Tests not found")
|
2018-07-12 16:25:22 +02:00
|
|
|
return
|
|
|
|
|
2019-12-01 19:55:11 +01:00
|
|
|
elif options.list_tests or options.test_tree:
|
|
|
|
if options.test_tree:
|
|
|
|
testsuite = Node("Testsuite")
|
|
|
|
samples = Node("Samples", parent=testsuite)
|
|
|
|
tests = Node("Tests", parent=testsuite)
|
|
|
|
|
2019-12-01 19:24:33 +01:00
|
|
|
for test in sorted(all_tests):
|
2018-07-12 16:25:22 +02:00
|
|
|
cnt = cnt + 1
|
2019-12-01 19:55:11 +01:00
|
|
|
if options.list_tests:
|
|
|
|
print(" - {}".format(test))
|
|
|
|
|
|
|
|
if options.test_tree:
|
|
|
|
if test.startswith("sample."):
|
|
|
|
sec = test.split(".")
|
|
|
|
area = find(samples, lambda node: node.name == sec[1] and node.parent == samples)
|
|
|
|
if not area:
|
|
|
|
area = Node(sec[1], parent=samples)
|
|
|
|
|
|
|
|
t = Node(test, parent=area)
|
|
|
|
else:
|
|
|
|
sec = test.split(".")
|
|
|
|
area = find(tests, lambda node: node.name == sec[0] and node.parent == tests)
|
|
|
|
if not area:
|
|
|
|
area = Node(sec[0], parent=tests)
|
|
|
|
|
|
|
|
if area and len(sec) > 2:
|
|
|
|
subarea = find(area, lambda node: node.name == sec[1] and node.parent == area)
|
|
|
|
if not subarea:
|
|
|
|
subarea = Node(sec[1], parent=area)
|
|
|
|
|
|
|
|
t = Node(test, parent=subarea)
|
|
|
|
|
|
|
|
if options.list_tests:
|
|
|
|
print("{} total.".format(cnt))
|
|
|
|
|
|
|
|
if options.test_tree:
|
|
|
|
for pre, _, node in RenderTree(testsuite):
|
|
|
|
print("%s%s" % (pre, node.name))
|
2018-07-12 16:25:22 +02:00
|
|
|
return
|
2018-04-15 06:12:58 +02:00
|
|
|
|
2017-09-02 18:32:08 +02:00
|
|
|
discards = []
|
2019-06-14 19:45:34 +02:00
|
|
|
|
2020-04-22 15:39:42 +02:00
|
|
|
if options.report_suffix:
|
|
|
|
last_run = os.path.join(options.outdir, "sanitycheck_{}.csv".format(options.report_suffix))
|
|
|
|
else:
|
|
|
|
last_run = os.path.join(options.outdir, "sanitycheck.csv")
|
|
|
|
|
2019-06-22 17:04:10 +02:00
|
|
|
if options.only_failed:
|
2020-07-09 15:46:45 +02:00
|
|
|
suite.load_from_file(last_run, filter_status=['skipped', 'passed'])
|
2019-11-25 14:19:25 +01:00
|
|
|
suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
|
2019-06-22 17:04:10 +02:00
|
|
|
elif options.load_tests:
|
2020-07-17 11:13:50 +02:00
|
|
|
suite.load_from_file(options.load_tests, filter_status=['skipped'])
|
2020-06-30 23:09:33 +02:00
|
|
|
suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
|
2019-06-22 17:04:10 +02:00
|
|
|
elif options.test_only:
|
2020-03-09 20:24:30 +01:00
|
|
|
suite.load_from_file(last_run, filter_status=['skipped'])
|
sanitycheck: set selected_plaforms for --test-only
When running with --test-only we get:
Traceback (most recent call last):
File "./scripts/sanitycheck", line 1168, in <module>
main()
File "./scripts/sanitycheck", line 1160, in main
options.only_failed)
File "scripts/sanity_chk/sanitylib.py", line 2543, in save_reports
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
File "scripts/sanity_chk/sanitylib.py", line 3220, in xunit_report
return fails, passes, errors, skips
UnboundLocalError: local variable 'fails' referenced before assignment
This is due to the fact that selected_platforms was not set.
Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
2020-08-12 19:07:36 +02:00
|
|
|
suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
|
2019-06-22 17:04:10 +02:00
|
|
|
else:
|
2019-12-07 01:20:48 +01:00
|
|
|
discards = suite.apply_filters(
|
|
|
|
build_only=options.build_only,
|
|
|
|
enable_slow=options.enable_slow,
|
|
|
|
platform=options.platform,
|
2020-01-21 23:19:36 +01:00
|
|
|
exclude_platform=options.exclude_platform,
|
2019-12-07 01:20:48 +01:00
|
|
|
arch=options.arch,
|
|
|
|
tag=options.tag,
|
|
|
|
exclude_tag=options.exclude_tag,
|
|
|
|
force_toolchain=options.force_toolchain,
|
|
|
|
all=options.all,
|
|
|
|
run_individual_tests=run_individual_tests,
|
2020-05-01 20:57:00 +02:00
|
|
|
device_testing=options.device_testing,
|
|
|
|
force_platform=options.force_platform
|
2019-12-07 01:20:48 +01:00
|
|
|
|
|
|
|
)
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2020-05-07 18:02:48 +02:00
|
|
|
if (options.export_tests or options.list_tests) and options.platform:
|
|
|
|
if len(options.platform) > 1:
|
|
|
|
logger.error("When exporting tests, only one platform "
|
|
|
|
"should be specified.")
|
|
|
|
return
|
|
|
|
|
|
|
|
for p in options.platform:
|
|
|
|
inst = suite.get_platform_instances(p)
|
|
|
|
if options.export_tests:
|
|
|
|
tests = [x.testcase.cases for x in inst.values()]
|
|
|
|
merged = list(itertools.chain(*tests))
|
|
|
|
export_tests(options.export_tests, merged)
|
|
|
|
return
|
|
|
|
|
|
|
|
count = 0
|
|
|
|
for i in inst.values():
|
|
|
|
for c in i.testcase.cases:
|
|
|
|
print(f"- {c}")
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
print(f"Tests found: {count}")
|
|
|
|
return
|
|
|
|
|
2018-01-13 03:56:59 +01:00
|
|
|
if VERBOSE > 1 and discards:
|
2018-01-13 13:57:42 +01:00
|
|
|
# if we are using command line platform filter, no need to list every
|
|
|
|
# other platform as excluded, we know that already.
|
|
|
|
# Show only the discards that apply to the selected platforms on the
|
|
|
|
# command line
|
|
|
|
|
2016-02-22 22:28:10 +01:00
|
|
|
for i, reason in discards.items():
|
2018-01-13 13:57:42 +01:00
|
|
|
if options.platform and i.platform.name not in options.platform:
|
|
|
|
continue
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.debug(
|
2017-12-05 21:28:44 +01:00
|
|
|
"{:<25} {:<50} {}SKIPPED{}: {}".format(
|
|
|
|
i.platform.name,
|
2019-06-22 17:04:10 +02:00
|
|
|
i.testcase.name,
|
2019-12-16 15:36:40 +01:00
|
|
|
Fore.YELLOW,
|
|
|
|
Fore.RESET,
|
2017-12-05 21:28:44 +01:00
|
|
|
reason))
|
2017-09-02 18:32:08 +02:00
|
|
|
|
2019-06-14 19:45:34 +02:00
|
|
|
if options.report_excluded:
|
2019-11-18 16:49:17 +01:00
|
|
|
all_tests = suite.get_all_tests()
|
2019-06-14 19:45:34 +02:00
|
|
|
to_be_run = set()
|
2019-12-12 15:58:28 +01:00
|
|
|
for i, p in suite.instances.items():
|
2019-06-22 17:04:10 +02:00
|
|
|
to_be_run.update(p.testcase.cases)
|
2019-06-14 19:45:34 +02:00
|
|
|
|
2019-06-22 17:04:10 +02:00
|
|
|
if all_tests - to_be_run:
|
2019-06-14 19:45:34 +02:00
|
|
|
print("Tests that never build or run:")
|
2019-06-22 17:04:10 +02:00
|
|
|
for not_run in all_tests - to_be_run:
|
2019-06-14 19:45:34 +02:00
|
|
|
print("- {}".format(not_run))
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2017-12-30 19:01:45 +01:00
|
|
|
if options.subset:
|
2020-01-23 00:10:17 +01:00
|
|
|
suite.instances = OrderedDict(sorted(suite.instances.items(),
|
|
|
|
key=lambda x: x[0][x[0].find("/") + 1:]))
|
|
|
|
|
2017-12-30 19:01:45 +01:00
|
|
|
subset, sets = options.subset.split("/")
|
2020-02-05 00:22:16 +01:00
|
|
|
subset = int(subset)
|
|
|
|
sets = int(sets)
|
2019-06-22 17:04:10 +02:00
|
|
|
total = len(suite.instances)
|
2020-02-05 00:22:16 +01:00
|
|
|
per_set = int(total / sets)
|
|
|
|
num_extra_sets = total - (per_set * sets)
|
|
|
|
|
|
|
|
# Try and be more fair for rounding error with integer division
|
|
|
|
# so the last subset doesn't get overloaded, we add 1 extra to
|
|
|
|
# subsets 1..num_extra_sets.
|
|
|
|
if subset <= num_extra_sets:
|
|
|
|
start = (subset - 1) * (per_set + 1)
|
|
|
|
end = start + per_set + 1
|
2017-05-14 03:31:53 +02:00
|
|
|
else:
|
2020-02-05 00:22:16 +01:00
|
|
|
base = num_extra_sets * (per_set + 1)
|
|
|
|
start = ((subset - num_extra_sets - 1) * per_set) + base
|
2017-05-14 03:31:53 +02:00
|
|
|
end = start + per_set
|
|
|
|
|
2019-06-22 17:04:10 +02:00
|
|
|
sliced_instances = islice(suite.instances.items(), start, end)
|
|
|
|
suite.instances = OrderedDict(sliced_instances)
|
|
|
|
|
|
|
|
if options.save_tests:
|
|
|
|
suite.csv_report(options.save_tests)
|
|
|
|
return
|
|
|
|
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("%d test configurations selected, %d configurations discarded due to filters." %
|
2020-08-13 15:20:13 +02:00
|
|
|
(len(suite.instances), len(discards)))
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2019-11-21 19:00:18 +01:00
|
|
|
if options.device_testing:
|
|
|
|
print("\nDevice testing on:")
|
2019-12-18 16:41:27 +01:00
|
|
|
hwm.dump(suite.connected_hardware, suite.selected_platforms)
|
2019-11-25 14:19:25 +01:00
|
|
|
print("")
|
2019-11-21 19:00:18 +01:00
|
|
|
|
2017-12-30 19:01:45 +01:00
|
|
|
if options.dry_run:
|
2019-06-22 17:04:10 +02:00
|
|
|
duration = time.time() - start_time
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("Completed in %d seconds" % (duration))
|
2015-07-17 21:03:52 +02:00
|
|
|
return
|
|
|
|
|
2019-06-22 17:04:10 +02:00
|
|
|
retries = options.retry_failed + 1
|
|
|
|
completed = 0
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2020-08-13 15:20:13 +02:00
|
|
|
suite.update_counting()
|
2019-06-22 17:04:10 +02:00
|
|
|
suite.start_time = start_time
|
2018-02-16 03:07:24 +01:00
|
|
|
|
2019-06-22 17:04:10 +02:00
|
|
|
while True:
|
|
|
|
completed += 1
|
2015-08-14 23:27:38 +02:00
|
|
|
|
2019-06-22 17:04:10 +02:00
|
|
|
if completed > 1:
|
2019-12-12 15:58:28 +01:00
|
|
|
logger.info("%d Iteration:" % (completed))
|
2020-07-09 15:46:45 +02:00
|
|
|
time.sleep(options.retry_interval) # waiting for the system to settle down
|
2019-06-22 17:04:10 +02:00
|
|
|
suite.total_done = suite.total_tests - suite.total_failed
|
2020-07-09 15:46:45 +02:00
|
|
|
suite.total_failed = suite.total_errors
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2019-12-10 22:38:45 +01:00
|
|
|
suite.execute()
|
2019-12-06 17:37:40 +01:00
|
|
|
print("")
|
2019-06-22 17:04:10 +02:00
|
|
|
|
|
|
|
retries = retries - 1
|
2020-07-09 15:46:45 +02:00
|
|
|
if retries == 0 or suite.total_failed == suite.total_errors:
|
2019-06-22 17:04:10 +02:00
|
|
|
break
|
|
|
|
|
|
|
|
suite.misc_reports(options.compare_report, options.show_footprint,
|
2019-12-12 15:58:28 +01:00
|
|
|
options.all_deltas, options.footprint_threshold, options.last_metrics)
|
2015-07-17 21:03:52 +02:00
|
|
|
|
2019-11-24 13:42:06 +01:00
|
|
|
suite.duration = time.time() - start_time
|
2020-08-13 15:20:13 +02:00
|
|
|
suite.update_counting()
|
2019-11-24 13:42:06 +01:00
|
|
|
suite.summary(options.disable_unrecognized_section_test)
|
|
|
|
|
2017-12-30 19:01:45 +01:00
|
|
|
if options.coverage:
|
2020-04-07 15:34:05 +02:00
|
|
|
if not options.gcov_tool:
|
2019-09-12 14:44:08 +02:00
|
|
|
use_system_gcov = False
|
2019-07-08 21:02:13 +02:00
|
|
|
|
|
|
|
for plat in options.coverage_platform:
|
2019-06-22 17:04:10 +02:00
|
|
|
ts_plat = suite.get_platform(plat)
|
2019-09-12 14:44:08 +02:00
|
|
|
if ts_plat and (ts_plat.type in {"native", "unit"}):
|
|
|
|
use_system_gcov = True
|
2019-07-08 21:02:13 +02:00
|
|
|
|
2019-09-12 14:44:08 +02:00
|
|
|
if use_system_gcov or "ZEPHYR_SDK_INSTALL_DIR" not in os.environ:
|
2019-07-08 21:02:13 +02:00
|
|
|
options.gcov_tool = "gcov"
|
|
|
|
else:
|
|
|
|
options.gcov_tool = os.path.join(os.environ["ZEPHYR_SDK_INSTALL_DIR"],
|
2020-02-07 17:01:17 +01:00
|
|
|
"x86_64-zephyr-elf/bin/x86_64-zephyr-elf-gcov")
|
2019-07-08 21:02:13 +02:00
|
|
|
|
2019-12-06 17:37:40 +01:00
|
|
|
logger.info("Generating coverage files...")
|
2019-11-23 16:47:33 +01:00
|
|
|
coverage_tool = CoverageTool.factory(options.coverage_tool)
|
2020-04-07 15:34:05 +02:00
|
|
|
coverage_tool.gcov_tool = options.gcov_tool
|
2020-03-30 01:02:51 +02:00
|
|
|
coverage_tool.base_dir = ZEPHYR_BASE
|
2019-11-23 16:47:33 +01:00
|
|
|
coverage_tool.add_ignore_file('generated')
|
|
|
|
coverage_tool.add_ignore_directory('tests')
|
|
|
|
coverage_tool.add_ignore_directory('samples')
|
|
|
|
coverage_tool.generate(options.outdir)
|
2016-08-31 13:17:03 +02:00
|
|
|
|
2019-06-22 17:04:10 +02:00
|
|
|
if options.device_testing:
|
|
|
|
print("\nHardware distribution summary:\n")
|
2019-11-25 14:19:25 +01:00
|
|
|
table = []
|
|
|
|
header = ['Board', 'ID', 'Counter']
|
|
|
|
for p in hwm.connected_hardware:
|
|
|
|
if p['connected'] and p['platform'] in suite.selected_platforms:
|
|
|
|
row = [p['platform'], p.get('id', None), p['counter']]
|
|
|
|
table.append(row)
|
|
|
|
print(tabulate(table, headers=header, tablefmt="github"))
|
|
|
|
|
2019-12-10 18:26:00 +01:00
|
|
|
suite.save_reports(options.report_name,
|
2020-04-22 15:39:42 +02:00
|
|
|
options.report_suffix,
|
2019-12-10 18:26:00 +01:00
|
|
|
options.report_dir,
|
|
|
|
options.no_update,
|
|
|
|
options.release,
|
|
|
|
options.only_failed)
|
|
|
|
|
2019-06-22 17:04:10 +02:00
|
|
|
if suite.total_failed or (suite.warnings and options.warnings_as_errors):
|
|
|
|
sys.exit(1)
|
2017-12-05 21:28:44 +01:00
|
|
|
|
2019-12-12 15:58:28 +01:00
|
|
|
|
2015-07-17 21:03:52 +02:00
|
|
|
if __name__ == "__main__":
|
2020-01-03 04:27:40 +01:00
|
|
|
try:
|
|
|
|
main()
|
|
|
|
finally:
|
2020-01-26 12:35:40 +01:00
|
|
|
if os.isatty(1): # stdout is interactive
|
|
|
|
os.system("stty sane")
|