New nextest based test runner

We greatly improved the test health of crosvm which allows to
make better use of standard cargo tools instead of custom
tailored implementations for running tests.

The new test runner is available at tools/run_tests2 until it
fully replaces tools/run_tests.

The key difference is that tests are packaged into a tarball
to be executed remotely instead of sending and executing every
binary separately via SSH.

Improvements over the old test runner:

- Faster, nextest runs faster and the new model for remote testing
  is a lot faster when running tests in a VM.
- Simpler, less custom code to maintain.
- More readable output, uses modern TUI functionality.
- Enables future workflows for testing on cloud devices running
  in our luci infrastructure.

How much faster?
- Running all tests on host: 1m -> 9s
- Running all aarch64 tests in vm: 3m20s -> 32s

BUG=b:261600801
TEST=./tools/run_tests2 with args:
-p x86_64
-p x86_64 --dut=host
-p x86_64 --dut=vm
-p aarch64
-p aarch64 --dut=vm
-p armhf
-p mingw64

Change-Id: I842a0ac6f7b288aeccb112b4e2f46a15f3baf54b
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4144641
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
Reviewed-by: Zihan Chen <zihanchen@google.com>
Commit-Queue: Dennis Kempin <denniskempin@google.com>
This commit is contained in:
Dennis Kempin 2023-01-06 14:25:50 -08:00 committed by crosvm LUCI
parent e905b4cf29
commit 5eea928ef9
7 changed files with 620 additions and 60 deletions

2
.config/nextest.toml Normal file
View file

@ -0,0 +1,2 @@
[profile.default]
slow-timeout = { period = "5s", terminate-after = 3 }

View file

@ -12,8 +12,10 @@ our command line tools.
Refer to the scripts in ./tools for example usage.
"""
import datetime
import functools
import json
import shlex
import sys
import subprocess
@ -62,7 +64,20 @@ from multiprocessing.pool import ThreadPool
from pathlib import Path
from subprocess import DEVNULL, PIPE, STDOUT # type: ignore
from tempfile import gettempdir
from typing import Any, Callable, Dict, Iterable, List, NamedTuple, Optional, TypeVar, Union, cast
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
from rich.console import Console
import argh # type: ignore
import argparse
import contextlib
@ -72,6 +87,13 @@ import os
import re
import shutil
import traceback
from rich.console import Group
from rich.text import Text
from rich.live import Live
from rich.spinner import Spinner
# File where to store http headers for gcloud authentication
AUTH_HEADERS_FILE = Path(gettempdir()) / f"crosvm_gcloud_auth_headers_{getpass.getuser()}"
PathLike = Union[Path, str]
@ -111,6 +133,9 @@ GERRIT_URL = "https://chromium-review.googlesource.com"
# Ensure that we really found the crosvm root directory
assert 'name = "crosvm"' in CROSVM_TOML.read_text()
# List of times recorded by `record_time` which will be printed if --timing-info is provided.
global_time_records: List[Tuple[str, datetime.timedelta]] = []
class CommandResult(NamedTuple):
"""Results of a command execution as returned by Command.run()"""
@ -315,7 +340,13 @@ class Command(object):
### Executing programs in the foreground
def run_foreground(self, quiet: bool = False, check: bool = True, dry_run: bool = False):
def run_foreground(
self,
quiet: bool = False,
check: bool = True,
dry_run: bool = False,
style: Optional[Callable[["subprocess.Popen[str]"], None]] = None,
):
"""
Runs a program in the foreground with output streamed to the user.
@ -341,9 +372,17 @@ class Command(object):
This will hide the programs stdout and stderr unless the program fails.
More sophisticated means of outputting stdout/err are available via `Styles`:
>>> Command("echo foo").fg(style=Styles.live_truncated())
Will output the results of the command but truncate output after a few lines. See `Styles`
for more options.
Arguments:
quiet: Do not show stdout/stderr unless the program failed.
check: Raise an exception if the program returned an error code.
style: A function to present the output of the program. See `Styles`
Returns: The return code of the program.
"""
@ -351,28 +390,34 @@ class Command(object):
print(f"Not running: {self}")
return 0
if quiet:
style = Styles.quiet
if verbose():
print(f"$ {self}")
if quiet:
result = self.__run(stdout=PIPE, stderr=STDOUT, check=False)
if style is None or verbose():
return self.__run(stdout=None, stderr=None, check=False).returncode
else:
result = self.__run(stdout=None, stderr=None, check=False)
process = self.__popen(stderr=STDOUT)
style(process)
returncode = process.wait()
if returncode != 0 and check:
assert process.stdout
raise subprocess.CalledProcessError(returncode, process.args)
return returncode
# If stdout was capured, print it if the program failed (or verbose is enabled).
# Skip this if very_verbose is enabled since __run will print captured output already.
if result.stdout and (verbose() or result.returncode != 0) and not very_verbose():
print(result.stdout)
if result.returncode != 0:
if check:
raise subprocess.CalledProcessError(result.returncode, str(self), result.stdout)
return result.returncode
def fg(self, quiet: bool = False, check: bool = True, dry_run: bool = False):
def fg(
self,
quiet: bool = False,
check: bool = True,
dry_run: bool = False,
style: Optional[Callable[["subprocess.Popen[str]"], None]] = None,
):
"""
Shorthand for Command.run_foreground()
"""
return self.run_foreground(quiet, check, dry_run)
return self.run_foreground(quiet, check, dry_run, style)
def write_to(self, filename: Path):
"""
@ -414,6 +459,18 @@ class Command(object):
print(f"$ {self}")
return self.__run(stdout=PIPE, stderr=PIPE, check=check).stdout.strip()
def json(self, check: bool = True) -> Any:
"""
Runs a program and returns stdout parsed as json.
The program will not be visible to the user unless --very-verbose is specified.
"""
stdout = self.stdout(check=check)
if stdout:
return json.loads(stdout)
else:
return None
def lines(self, check: bool = True):
"""
Runs a program and returns stdout line by line.
@ -425,16 +482,10 @@ class Command(object):
### Utilities
def __str__(self):
def fmt_arg(arg: str):
# Quote arguments containing spaces.
if re.search(r"\s", arg):
return f'"{arg}"'
return arg
stdin = ""
if self.stdin_cmd:
stdin = str(self.stdin_cmd) + " | "
return stdin + " ".join(fmt_arg(a) for a in self.args)
return stdin + shlex.join(self.args)
def __repr__(self):
stdin = ""
@ -494,15 +545,6 @@ class Command(object):
text=True,
)
@staticmethod
def __shell_like_split(value: str):
"""Splits a string by spaces, accounting for escape characters and quoting."""
# Re-use csv parses to split by spaces and new lines, while accounting for quoting.
for line in csv.reader(StringIO(value), delimiter=" ", quotechar='"'):
for arg in line:
if arg:
yield arg
@staticmethod
def __parse_cmd(args: Iterable[Any]) -> List[str]:
"""Parses command line arguments for Command."""
@ -517,11 +559,64 @@ class Command(object):
elif isinstance(arg, QuotedString):
return [arg.value]
elif isinstance(arg, Command):
return [*Command.__shell_like_split(arg.stdout())]
return [*shlex.split(arg.stdout())]
elif arg is None or arg is False:
return []
else:
return [*Command.__shell_like_split(str(arg))]
return [*shlex.split(str(arg))]
class Styles(object):
"A collection of methods that can be passed to `Command.fg(style=)`"
@staticmethod
def quiet(process: "subprocess.Popen[str]"):
"Won't print anything unless the command failed."
assert process.stdout
stdout = process.stdout.read()
if process.wait() != 0:
print(stdout, end="")
@staticmethod
def live_truncated(num_lines: int = 8):
"Prints only the last `num_lines` of output while the program is running and succeessful."
def output(process: "subprocess.Popen[str]"):
assert process.stdout
spinner = Spinner("dots")
lines: List[Text] = []
stdout: List[str] = []
with Live(refresh_per_second=30, transient=True) as live:
for line in iter(process.stdout.readline, ""):
stdout.append(line.strip())
lines.append(Text.from_ansi(line.strip(), no_wrap=True))
while len(lines) > num_lines:
lines.pop(0)
live.update(Group(Text(""), *lines, spinner))
if process.wait() == 0:
console.print(Group(Text(""), *lines))
else:
for line in stdout:
print(line)
return output
@staticmethod
def quiet_with_progress(title: str):
"Prints only the last `num_lines` of output while the program is running and succeessful."
def output(process: "subprocess.Popen[str]"):
assert process.stdout
with Live(Spinner("dots", title), refresh_per_second=30, transient=True):
stdout = process.stdout.read()
if process.wait() == 0:
console.print(f"[green]OK[/green] {title}")
else:
print(stdout)
console.print(f"[red]ERR[/red] {title}")
return output
class ParallelCommands(object):
@ -551,6 +646,52 @@ class ParallelCommands(object):
return all(result == 0 for result in results)
class Remote(object):
""" "
Wrapper around the cmd() API and allow execution of commands via SSH.
>>> remote = Remote("foobar", {"opt": "value"})
>>> remote.cmd('printf "(%s)"', quoted("a b c"))
Command('ssh', 'foobar', '-T', '-oopt=value', 'bash -O huponexit -c \\'printf (%s) "a b c"\\'')
A remote working directory can be set:
>>> remote.cmd('printf "(%s)"', quoted("a b c")).with_cwd(Path("target_dir"))
Command('ssh', 'foobar', '-T', '-oopt=value', 'cd target_dir && bash -O huponexit -c \\'printf (%s) "a b c"\\'')
"""
def __init__(self, host: str, opts: Dict[str, str]):
self.host = host
ssh_opts = [f"-o{k}={v}" for k, v in opts.items()]
self.ssh_cmd = cmd("ssh", host, "-T", *ssh_opts)
self.scp_cmd = cmd("scp", *ssh_opts)
def ssh(self, cmd: Command, remote_cwd: Optional[Path] = None):
# Use huponexit to ensure the process is killed if the connection is lost.
# Use shlex to properly quote the command.
wrapped_cmd = f"bash -O huponexit -c {shlex.quote(str(cmd))}"
if remote_cwd is not None:
wrapped_cmd = f"cd {remote_cwd} && {wrapped_cmd}"
# The whole command to pass it to SSH for remote execution.
return self.ssh_cmd.with_args(quoted(wrapped_cmd))
def scp(self, sources: List[Path], target: str, quiet: bool = False):
return self.scp_cmd.with_args(*sources, f"{self.host}:{target}").fg(quiet=quiet)
@contextlib.contextmanager
def record_time(title: str):
"""
Records wall-time of how long this context lasts.
The results will be printed at the end of script executation if --timing-info is specified.
"""
start_time = datetime.datetime.now()
try:
yield
finally:
global_time_records.append((title, datetime.datetime.now() - start_time))
@contextlib.contextmanager
def cwd_context(path: PathLike):
"""Context for temporarily changing the cwd.
@ -629,8 +770,15 @@ def run_commands(
Allow the user to call the provided functions with command line arguments translated to
function arguments via argh: https://pythonhosted.org/argh
"""
exit_code = 0
try:
parser = argparse.ArgumentParser(usage=usage)
parser = argparse.ArgumentParser(
description=usage,
# Docstrings are used as the description in argparse, preserve their formatting.
formatter_class=argparse.RawDescriptionHelpFormatter,
# Do not allow implied abbreviations. Abbreviations should be manually specified.
allow_abbrev=False,
)
add_common_args(parser)
# Add provided commands to parser. Do not use sub-commands if we just got one function.
@ -639,14 +787,29 @@ def run_commands(
if default_fn:
argh.set_default_command(parser, default_fn) # type: ignore
# Call main method
argh.dispatch(parser) # type: ignore
with record_time("Total Time"):
# Call main method
argh.dispatch(parser) # type: ignore
except Exception as e:
if verbose():
traceback.print_exc()
else:
print(e)
sys.exit(1)
exit_code = 1
if parse_common_args().timing_info:
print_timing_info()
sys.exit(exit_code)
def print_timing_info():
console.print()
console.print("Timing info:")
console.print()
for title, delta in global_time_records:
console.print(f" {title:20} {delta.total_seconds():.2f}s")
@functools.lru_cache(None)
@ -684,6 +847,12 @@ def add_common_args(parser: argparse.ArgumentParser):
default=False,
help="Print more debug output",
)
parser.add_argument(
"--timing-info",
action="store_true",
default=False,
help="Print info on how long which parts of the command take",
)
def verbose():
@ -898,6 +1067,15 @@ def kiwi_repo_root():
return (CROSVM_ROOT / "../..").resolve()
def sudo_is_passwordless():
# Run with --askpass but no askpass set, succeeds only if passwordless sudo
# is available.
(ret, _) = subprocess.getstatusoutput("SUDO_ASKPASS=false sudo --askpass true")
return ret == 0
console = Console()
if __name__ == "__main__":
import doctest

View file

@ -5,6 +5,47 @@
import enum
from typing import List, Dict
BUILD_FEATURES: Dict[str, str] = {
"x86_64-unknown-linux-gnu": "linux-x86_64",
"aarch64-unknown-linux-gnu": "linux-aarch64",
"armv7-unknown-linux-gnueabihf": "linux-armhf",
"x86_64-pc-windows-gnu": "win64",
"x86_64-pc-windows-msvc": "win64",
}
# Configuration of integration tests
#
# The configuration below only applies to integration tests to fine tune which tests can be run
# on which platform (e.g. aarch64 emulation does not pass kvm tests).
#
# This configuration does NOT apply to unit tests.
# List of integration tests that will ask for root privileges.
ROOT_TESTS = [
"package(net_util) & binary(unix_tap)",
]
# Do not run these tests on any platform.
DO_NOT_RUN = [
"package(io_uring)",
]
# Do not run these tests for aarch64 builds
DO_NOT_RUN_AARCH64 = [
"package(hypervisor)",
"package(e2e_tests)",
"package(kvm)",
]
# Do not run these tests for win64 builds
DO_NOT_RUN_WIN64 = [
"package(e2e_tests)",
]
# Deprecated test configuration for tools/run_tests
#
# This will eventually be fully replaced the above configuration
class TestOption(enum.Enum):
# Do not build tests for all, or just some platforms.
@ -70,11 +111,3 @@ CRATE_OPTIONS: Dict[str, List[TestOption]] = {
"sandbox": [TestOption.DO_NOT_RUN],
"net_util": [TestOption.REQUIRES_ROOT],
}
BUILD_FEATURES: Dict[str, str] = {
"x86_64-unknown-linux-gnu": "linux-x86_64",
"aarch64-unknown-linux-gnu": "linux-aarch64",
"armv7-unknown-linux-gnueabihf": "linux-armhf",
"x86_64-pc-windows-gnu": "win64",
"x86_64-pc-windows-msvc": "win64",
}

View file

@ -48,6 +48,15 @@ runcmd:
- mkdir /var/empty
# Install cargo-nextest using prebuilt binaries and validate checksum.
{% if v1.machine == 'aarch64' %}
- curl -LsSf https://get.nexte.st/0.9.43/linux-arm | tar zxf - -C /usr/bin
- echo "59374b208807b5002f96c815a04f6bcc80d16707c2e8046471bb41043324363e /usr/bin/cargo-nextest" | sha256sum -c -
{% else %}
- curl -LsSf https://get.nexte.st/0.9.43/linux | tar zxf - -C /usr/bin
- echo "0b100bd45bbae6e7a4fa05f8f2cee0ca231997875ec43f984cd5ffaa2e6e9a98 /usr/bin/cargo-nextest" | sha256sum -c -
{% endif %}
# Enable core dumps for debugging crashes
- echo "* soft core unlimited" > /etc/security/limits.conf

View file

@ -1 +1 @@
r0007
r0008

View file

@ -5,6 +5,7 @@
import argparse
import json
from multiprocessing.pool import ThreadPool
import shlex
import shutil
from fnmatch import fnmatch
@ -17,6 +18,9 @@ from impl.common import (
cmd,
cwd_context,
parallel,
print_timing_info,
quoted,
record_time,
)
# List of globs matching files in the source tree required by tests at runtime.
@ -52,7 +56,7 @@ def collect_rust_libs():
def collect_test_binaries(metadata: Any, strip: bool):
"Collect all test binaries that are needed to run the tests."
target_dir = metadata["rust-build-meta"]["target-directory"]
target_dir = Path(metadata["rust-build-meta"]["target-directory"])
test_binaries = [
Path(suite["binary-path"]).relative_to(target_dir)
for suite in metadata["rust-binaries"].values()
@ -64,15 +68,23 @@ def collect_test_binaries(metadata: Any, strip: bool):
for binary in crate
]
for binary_path in test_binaries + non_test_binaries:
def process_binary(binary_path: Path):
source_path = target_dir / binary_path
destination_path = binary_path
if strip:
stripped_path = source_path.with_suffix(".stripped")
rust_strip(f"--strip-all {source_path} -o {stripped_path}").fg()
yield (stripped_path, destination_path)
if (
not stripped_path.exists()
or source_path.stat().st_ctime > stripped_path.stat().st_ctime
):
rust_strip(f"--strip-all {source_path} -o {stripped_path}").fg()
return (stripped_path, destination_path)
else:
yield (source_path, destination_path)
return (source_path, destination_path)
# Parallelize rust_strip calls.
pool = ThreadPool()
return pool.map(process_binary, test_binaries + non_test_binaries)
def collect_test_data_files():
@ -186,6 +198,7 @@ def main():
parser.add_argument("--output-directory", "-d")
parser.add_argument("--output-archive", "-o")
parser.add_argument("--clean", action="store_true")
parser.add_argument("--timing-info", action="store_true")
(args, nextest_list_args) = parser.parse_known_args()
chdir(CROSVM_ROOT)
@ -201,16 +214,27 @@ def main():
if args.clean and output_directory.exists():
shutil.rmtree(output_directory)
metadata = cargo(
"nextest list --list-type binaries-only --message-format json",
*nextest_list_args,
).json()
with record_time("Listing tests"):
cargo(
"nextest list",
*(quoted(a) for a in nextest_list_args),
).fg()
with record_time("Listing tests metadata"):
metadata = cargo(
"nextest list --list-type binaries-only --message-format json",
*(quoted(a) for a in nextest_list_args),
).json()
collect_files(metadata, output_directory, strip_binaries=not args.no_strip)
generate_run_script(metadata, output_directory)
with record_time("Collecting files"):
collect_files(metadata, output_directory, strip_binaries=not args.no_strip)
generate_run_script(metadata, output_directory)
if output_archive:
generate_archive(output_directory, output_archive)
with record_time("Generating archive"):
generate_archive(output_directory, output_archive)
if args.timing_info:
print_timing_info()
if __name__ == "__main__":

314
tools/run_tests2 Executable file
View file

@ -0,0 +1,314 @@
#!/usr/bin/env python3
# Copyright 2023 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from pathlib import Path
import sys
from typing import Any, Iterable, List, Optional, Union
from impl.common import (
CROSVM_ROOT,
TOOLS_ROOT,
Command,
Remote,
quoted,
Styles,
argh,
console,
chdir,
cmd,
record_time,
run_main,
sudo_is_passwordless,
verbose,
)
from impl.test_target import Triple
from impl.test_config import (
ROOT_TESTS,
DO_NOT_RUN,
DO_NOT_RUN_AARCH64,
DO_NOT_RUN_WIN64,
)
from impl import testvm
rsync = cmd("rsync")
cargo = cmd("cargo")
# Name of the directory used to package all test files.
PACKAGE_NAME = "integration_tests_package"
def join_filters(items: Iterable[str], op: str):
return op.join(f"({i})" for i in items)
class TestFilter(object):
"""
Utility structure to join user-provided filter expressions with additional filters
See https://nexte.st/book/filter-expressions.html
"""
def __init__(self, expression: str):
self.expression = expression
def exclude(self, *exclude_exprs: str):
return self.subset(f"not ({join_filters(exclude_exprs, '|')})")
def subset(self, *subset_exprs: str):
subset_expr = join_filters(subset_exprs, "|")
if not self.expression:
return TestFilter(subset_expr)
return TestFilter(f"({self.expression}) & ({subset_expr})")
def to_args(self):
if not self.expression:
return
yield "--filter-expr"
yield quoted(self.expression)
def configure_cargo(cmd: Command, triple: Triple):
"Configures the provided cmd with cargo arguments and environment needed to build for triple."
return (
cmd.with_args(
"--workspace",
f"--features={triple.feature_flag}",
)
.with_color_flag()
.with_envs(triple.get_cargo_env())
)
class HostTarget(object):
def __init__(self, package_dir: Path):
self.run_cmd = cmd(package_dir / "run.sh").with_color_flag()
def run_tests(self, extra_args: List[Any]):
self.run_cmd.with_args(*extra_args).fg(style=Styles.live_truncated())
class SshTarget(object):
def __init__(self, package_archive: Path, remote: Remote):
console.print("Transfering integration tests package...")
with record_time("Transfering"):
remote.scp([package_archive], "")
with record_time("Unpacking"):
remote.ssh(cmd("tar xaf", package_archive.name)).fg(style=Styles.live_truncated())
self.remote_run_cmd = cmd(f"{PACKAGE_NAME}/run.sh").with_color_flag()
self.remote = remote
def run_tests(self, extra_args: List[Any]):
self.remote.ssh(self.remote_run_cmd.with_args(*extra_args)).fg(
style=Styles.live_truncated()
)
def check_host_prerequisites(run_root_tests: bool):
"Check various prerequisites for executing test binaries."
if os.name == "nt":
return
if run_root_tests:
console.print("Running tests that require root privileges. Refreshing sudo now.")
cmd("sudo true").fg()
for device in ["/dev/kvm", "/dev/vhost-vsock"]:
if not os.access(device, os.R_OK | os.W_OK):
console.print(f"{device} access is required", style="red")
sys.exit(1)
def get_vm_arch(triple: Triple):
if str(triple) == "x86_64-unknown-linux-gnu":
return "x86_64"
elif str(triple) == "aarch64-unknown-linux-gnu":
return "aarch64"
else:
raise Exception(f"{triple} is not supported for running tests in a VM.")
@argh.arg("--filter-expr", "-E", type=str, action="append", help="Nextest filter expression.")
@argh.arg("--platform", "-p", help="Which platform to test. (x86_64, aarch64, armhw, mingw64)")
@argh.arg("--dut", help="Which device to test on. (vm or host)")
@argh.arg("--no-run", help="Build only, do not run any tests.")
@argh.arg("--no-unit-tests", help="Do not run unit tests.")
@argh.arg("--no-integration-tests", help="Do not run integration tests.")
@argh.arg("--no-strip", help="Do not strip test binaries of debug info.")
@argh.arg("--run-root-tests", help="Enables integration tests that require root privileges.")
def main(
filter_expr: List[str] = [],
platform: Optional[str] = None,
dut: Optional[str] = None,
no_run: bool = False,
no_unit_tests: bool = False,
no_integration_tests: bool = False,
no_strip: bool = False,
run_root_tests: bool = False,
):
"""
Runs all crosvm tests
For details on how crosvm tests are organized, see https://crosvm.dev/book/testing.html
# Basic Usage
To run all unit tests for the hosts native architecture:
$ ./tools/run_tests2
To run all unit tests for another supported architecture using an emulator (e.g. wine64,
qemu user space emulation).
$ ./tools/run_tests2 -p aarch64
$ ./tools/run_tests2 -p armhw
$ ./tools/run_tests2 -p mingw64
# Integration Tests
Integration tests can be run on a built-in virtual machine:
$ ./tools/run_tests2 --dut=vm
$ ./tools/run_tests2 --dut=vm -p aarch64
The virtual machine is automatically started for the test process and can be managed via the
`./tools/x86vm` or `./tools/aarch64vm` tools.
Integration tests can be run on the host machine as well, but cannot be guaranteed to work on
all configurations.
$ ./tools/run_tests2 --dut=host
# Test Filtering
This script supports nextest filter expressions: https://nexte.st/book/filter-expressions.html
For example to run all tests in `my-crate` and all crates that depend on it:
$ ./tools/run_tests2 [--dut=] -E 'rdeps(my-crate)'
"""
chdir(CROSVM_ROOT)
if not cmd("which cargo-nextest").success():
raise Exception("Cannot find cargo-nextest. Please re-run `./tools/install-deps`")
triple = Triple.from_shorthand(platform) if platform else Triple.host_default()
test_filter = TestFilter(join_filters(filter_expr, "|"))
# Disable the DUT if integration tests are not run.
if no_integration_tests:
dut = None
# Automatically enable tests that require root if sudo is passwordless
if not run_root_tests:
if dut == "host":
run_root_tests = sudo_is_passwordless()
elif dut == "vm":
# The test VMs have passwordless sudo configured.
run_root_tests = True
# Print summary of tests and where they will be executed.
if dut == "host":
dut_str = "Run on host"
elif dut == "vm" and os.name == "posix":
dut_str = f"Run on built-in {get_vm_arch(triple)} vm"
elif dut == None:
dut_str = "[yellow]Skip[/yellow]"
else:
raise Exception(
f"--dut={dut} is not supported. Options are --dut=host or --dut=vm (linux only)"
)
skip_str = "[yellow]skip[/yellow]"
unit_test_str = "Run on host" if not no_unit_tests else skip_str
integration_test_str = dut_str if dut else skip_str
console.print(f"Running tests for [green]{triple}[/green]")
console.print(f"With features: [green]{triple.feature_flag}[/green]")
console.print()
console.print(f" Unit tests: [bold]{unit_test_str}[/bold]")
console.print(f" Integration tests: [bold]{integration_test_str}[/bold]")
console.print()
# Print tips in certain configurations.
if dut and not run_root_tests:
console.print(
"[green]Tip:[/green] Skipping tests that require root privileges. "
+ "Use [bold]--run-root-tests[/bold] to enable them."
)
if not dut:
console.print(
"[green]Tip:[/green] To run integration tests on a built-in VM: "
+ "Use [bold]--dut=vm[/bold]"
)
console.print(
"[green]Tip:[/green] To run integration tests on the host: Use "
+ "[bold]--dut=host[/bold]"
)
# Prepare the dut for test execution
if dut == "host":
check_host_prerequisites(run_root_tests)
if dut == "vm":
testvm.build_if_needed(get_vm_arch(triple))
testvm.up(get_vm_arch(triple))
console.print()
console.rule("Building tests")
nextest_run = configure_cargo(cmd("cargo nextest run"), triple)
with record_time("Build"):
nextest_run.with_args("--no-run").fg(style=Styles.live_truncated())
if no_run:
return
if not no_unit_tests:
console.print()
console.rule("Running unit tests")
with record_time("Unit Tests"):
nextest_run.with_args("--lib --bins --benches", *test_filter.to_args()).fg(
style=Styles.live_truncated()
)
if dut:
package_dir = triple.target_dir / PACKAGE_NAME
package_archive = package_dir.with_suffix(".tar.zst")
nextest_package = configure_cargo(cmd(TOOLS_ROOT / "nextest_package"), triple)
test_exclusions = [*DO_NOT_RUN]
if not run_root_tests:
test_exclusions += ROOT_TESTS
if triple == Triple.from_shorthand("mingw64"):
test_exclusions += DO_NOT_RUN_WIN64
if triple == Triple.from_shorthand("aarch64"):
test_exclusions += DO_NOT_RUN_AARCH64
test_filter = test_filter.exclude(*test_exclusions)
console.print()
console.rule("Packaging integration tests")
with record_time("Packing"):
nextest_package(
"--test *",
f"-d {package_dir}",
f"-o {package_archive}" if dut != "host" else None,
"--no-strip" if no_strip else None,
*test_filter.to_args(),
"--verbose" if verbose() else None,
).fg(style=Styles.live_truncated())
target: Union[HostTarget, SshTarget]
if dut == "host":
target = HostTarget(package_dir)
elif dut == "vm":
remote = Remote("localhost", testvm.ssh_opts(get_vm_arch(triple)))
target = SshTarget(package_archive, remote)
console.print()
console.rule("Running integration tests")
with record_time("Integration tests"):
target.run_tests([*test_filter.to_args()])
if __name__ == "__main__":
run_main(main)