tools/presubmit: New presubmit checks

This new script merges tools/health-checks with tools/presubmit. It
can now execute checks in parallel and will run crosvm tests and
clippy for all platforms as well.

This reduces presubmit check time for "everything" on a clean state
from 6:30m to 2m.

Time to run checks after a no-op change to the base crate is 1m,
down from 3m.

The default set of checks is optimized to cover all platforms but
prevent blocking on the build cache. This allows it to complete
in ~30s on incremental builds.

Luci build recipes will be updated to use this script, to ensure
that CI and local checks are always in sync.

The old health-check script is forwarded to the new presubmit
script while CI is updated to use the new script.

BUG=b:254317173
TEST=tools/presubmit

Change-Id: Iafde602bab61dbd2ee2f5f74918dc182b2832d47
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4277625
Commit-Queue: Dennis Kempin <denniskempin@google.com>
Reviewed-by: Zihan Chen <zihanchen@google.com>
Reviewed-by: Vikram Auradkar <auradkar@google.com>
This commit is contained in:
Dennis Kempin 2023-02-21 13:40:59 -08:00 committed by crosvm LUCI
parent 3e82753d09
commit 35144ac49a
11 changed files with 1133 additions and 852 deletions

View file

@ -30,13 +30,19 @@ def main(
# Note: Clippy checks are configured in .cargo/config.toml
common_args = [
"--fix" if fix else None,
"--message-format=json" if json else None,
"--locked" if locked else None,
"--all-targets",
"--",
"-Dwarnings",
]
if fix:
common_args = [
"--fix",
"--allow-dirty",
"--allow-staged",
*common_args,
]
print("Clippy crosvm workspace")
clippy(
"--workspace",

201
tools/custom_checks Executable file
View file

@ -0,0 +1,201 @@
#!/usr/bin/env python3
# Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Contains custom presubmit checks implemented in python.
#
# These are implemented as a separate CLI tool from tools/presubmit as the presubmit
# framework needs to call a subprocess to execute checks.
from fnmatch import fnmatch
import os
import re
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, Generator, List, cast
from impl.common import (
cmd,
cwd_context,
run_commands,
)
def check_platform_independent(*files: str):
"Checks the provided files to ensure they are free of platform independent code."
cfg_unix = "cfg.*unix"
cfg_linux = "cfg.*linux"
cfg_windows = "cfg.*windows"
cfg_android = "cfg.*android"
target_os = "target_os = "
target_os_pattern = re.compile(
"%s|%s|%s|%s|%s" % (cfg_android, cfg_linux, cfg_unix, cfg_windows, target_os)
)
for file in files:
for line_number, line in enumerate(open(file, encoding="utf8")):
if re.search(target_os_pattern, line):
raise Exception(f"Found unexpected platform dependent code in {file}:{line_number}")
CRLF_LINE_ENDING_FILES: List[str] = [
"**.bat",
"**.ps1",
"e2e_tests/tests/goldens/backcompat_test_simple_lspci_win.txt",
]
def is_crlf_file(file: str):
for glob in CRLF_LINE_ENDING_FILES:
if fnmatch(file, glob):
return True
return False
def check_line_endings(*files: str):
"Checks line endings. Windows only files are using clrf. All others just lf."
for line in cmd("git ls-files --eol", *files).lines():
parts = line.split()
file = parts[3]
index_endings = parts[0][2:]
wdir_endings = parts[1][2:]
def check_endings(endings: str):
if is_crlf_file(file):
if endings not in ("crlf", "mixed"):
raise Exception(f"{file} Expected crlf file endings. Found {endings}")
else:
if endings in ("crlf", "mixed"):
raise Exception(f"{file} Expected lf file endings. Found {endings}")
check_endings(index_endings)
check_endings(wdir_endings)
def check_rust_lockfiles(*_files: str):
"Verifies that none of the Cargo.lock files require updates."
lockfiles = [Path("Cargo.lock"), *Path("common").glob("*/Cargo.lock")]
for path in lockfiles:
with cwd_context(path.parent):
if not cmd("cargo update --workspace --locked").success():
print(f"{path} is not up-to-date.")
print()
print("You may need to rebase your changes and run `cargo update --workspace`")
print("(or ./tools/run_tests) to ensure the Cargo.lock file is current.")
raise Exception("Cargo.lock out of date")
# These crosvm features are currently not built upstream. Do not add to this list.
KNOWN_DISABLED_FEATURES = [
"default-no-sandbox",
"direct",
"libvda",
"whpx",
]
def check_rust_features(*_files: str):
"Verifies that all cargo features are included in the list of features we compile upstream."
metadata = json.loads(cmd("cargo metadata --format-version=1").stdout())
crosvm_metadata = next(p for p in metadata["packages"] if p["name"] == "crosvm")
features = cast(Dict[str, List[str]], crosvm_metadata["features"])
def collect_features(feature_name: str) -> Generator[str, None, None]:
yield feature_name
for feature in features[feature_name]:
name = feature.split("/")[0]
if name in features:
yield from collect_features(name)
all_platform_features = set(
(
*collect_features("all-x86_64"),
*collect_features("all-aarch64"),
*collect_features("all-armhf"),
*collect_features("all-mingw64"),
*collect_features("all-msvc64"),
)
)
disabled_features = [
feature
for feature in features
if feature not in all_platform_features and feature not in KNOWN_DISABLED_FEATURES
]
if disabled_features:
raise Exception(
f"The features {', '.join(disabled_features)} are not enabled in upstream crosvm builds."
)
LICENSE_HEADER_RE = (
r".*Copyright (?P<year>20[0-9]{2})(?:-20[0-9]{2})? The ChromiumOS Authors\n"
r".*Use of this source code is governed by a BSD-style license that can be\n"
r".*found in the LICENSE file\.\n"
r"( *\*/\n)?" # allow the end of a C-style comment before the blank line
r"\n"
)
NEW_LICENSE_HEADER = [
f"Copyright {datetime.now().year} The ChromiumOS Authors",
"Use of this source code is governed by a BSD-style license that can be",
"found in the LICENSE file.",
]
def new_licence_header(file_suffix: str):
if file_suffix in (".py", "", ".policy", ".sh"):
prefix = "#"
else:
prefix = "//"
return "\n".join(f"{prefix} {line}" for line in NEW_LICENSE_HEADER) + "\n\n"
def check_copyright_header(*files: str, fix: bool = False):
"Checks copyright header. Can 'fix' them if needed by adding the header."
license_re = re.compile(LICENSE_HEADER_RE, re.MULTILINE)
for file_path in (Path(f) for f in files):
header = file_path.open("r").read(512)
license_match = license_re.search(header)
if license_match:
continue
# Generated files do not need a copyright header.
if "generated by" in header:
continue
if fix:
print(f"Adding copyright header: {file_path}")
contents = file_path.read_text()
file_path.write_text(new_licence_header(file_path.suffix) + contents)
else:
raise Exception(f"Bad copyright header: {file_path}")
def check_file_ends_with_newline(*files: str, fix: bool = False):
"Checks if files end with a newline."
for file_path in (Path(f) for f in files):
with file_path.open("rb") as file:
# Skip empty files
file.seek(0, os.SEEK_END)
if file.tell() == 0:
continue
# Check last byte of the file
file.seek(-1, os.SEEK_END)
file_end = file.read(1)
if file_end.decode("utf-8") != "\n":
if fix:
file_path.write_text(file_path.read_text() + "\n")
else:
raise Exception(f"File does not end with a newline {file_path}")
if __name__ == "__main__":
run_commands(
check_file_ends_with_newline,
check_copyright_header,
check_rust_features,
check_rust_lockfiles,
check_line_endings,
check_platform_independent,
)

View file

@ -72,6 +72,7 @@ if sys.platform == "linux":
[
f"--env OUTSIDE_UID={os.getuid()}",
f"--env OUTSIDE_GID={os.getgid()}",
f"--env TERM={os.environ.get('TERM', 'xterm-256color')}",
]
)

View file

@ -27,9 +27,9 @@ from impl.common import (
def main(check: bool = False, nightly: bool = False):
chdir(CROSVM_ROOT)
cmd(
"./tools/health-check --all rust_format python_format markdown_format",
"./tools/presubmit format",
"--fix" if not check else None,
"--nightly" if nightly else None,
"--nightly-fmt" if nightly else None,
).fg()

View file

@ -3,396 +3,28 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, Generator, List, cast
from impl.check_code_hygiene import has_crlf_line_endings, has_lf_line_endings
from impl.common import (
CROSVM_ROOT,
TOOLS_ROOT,
argh,
chdir,
cmd,
cwd_context,
is_kiwi_repo,
parallel,
run_main,
)
from impl.health_check import Check, CheckContext, run_checks
python = cmd("python3")
mypy = cmd("mypy").with_color_env("MYPY_FORCE_COLOR")
black = cmd("black").with_color_arg(always="--color", never="--no-color")
mdformat = cmd("mdformat")
lucicfg = cmd("third_party/depot_tools/lucicfg")
def check_python_tests(_: CheckContext):
"No matter which python files have changed, run all available python tests."
PYTHON_TESTS = [
"tests.cl_tests",
"impl.common",
]
with cwd_context(TOOLS_ROOT):
parallel(*cmd("python3 -m").foreach(PYTHON_TESTS)).fg(quiet=True)
def check_python_types(context: CheckContext):
"Run mypy on all python files to type-check."
parallel(*mypy("--pretty").foreach(context.all_files)).fg(quiet=True)
def check_python_format(context: CheckContext):
parallel(*black("--check" if not context.fix else None).foreach(context.modified_files)).fg(
quiet=not context.fix
)
def check_crlf_line_endings(context: CheckContext):
"Checks for crlf line endingings."
crlf_endings = has_crlf_line_endings(context.all_files)
if crlf_endings:
print("Error: Following files have crlf(dos) line encodings")
print(*crlf_endings)
raise Exception("Files with crlf line endings.")
def check_lf_line_endings(context: CheckContext):
"Checks for crlf line endingings."
crlf_endings = has_lf_line_endings(context.all_files)
if crlf_endings:
print("Error: Following files have lf(unix) line encodings")
print(*crlf_endings)
raise Exception("Files with crlf line endings.")
def check_markdown_format(context: CheckContext):
"Runs mdformat on all markdown files."
if "blaze" in mdformat("--version").stdout():
raise Exception(
"You are using google's mdformat. "
+ "Please update your PATH to ensure the pip installed mdformat is available."
)
parallel(
*mdformat("--wrap 100", "--check" if not context.fix else "").foreach(
context.modified_files
)
).fg(quiet=not context.fix)
def check_rust_clippy(_: CheckContext):
"Runs clippy on the whole project, no matter which rs files were touched."
cmd("./tools/clippy --locked").with_color_flag().fg(quiet=True)
def check_cargo_doc(_: CheckContext):
"Runs cargo-doc on the whole project and verifies that no warnings are emitted."
cmd("./tools/cargo-doc").with_env("RUSTDOCFLAGS", "-D warnings").with_color_flag().fg(
quiet=True
)
def check_rust_format(context: CheckContext):
"Runs rustfmt on all modified files."
if context.nightly:
rustfmt = cmd(
cmd("rustup +nightly which rustfmt"),
"--config imports_granularity=item,group_imports=StdExternalCrate",
)
else:
rustfmt = cmd(cmd("rustup which rustfmt"))
parallel(
*rustfmt("--check" if not context.fix else "")
.with_color_flag()
.foreach(context.modified_files)
).fg(quiet=not context.fix)
def check_rust_lockfiles(_: CheckContext):
"Verifies that none of the Cargo.lock files require updates."
lockfiles = [Path("Cargo.lock"), *Path("common").glob("*/Cargo.lock")]
for path in lockfiles:
with cwd_context(path.parent):
if not cmd("cargo update --workspace --locked").success():
print(f"{path} is not up-to-date.")
print()
print("You may need to rebase your changes and run `cargo update --workspace`")
print("(or ./tools/run_tests) to ensure the Cargo.lock file is current.")
raise Exception("Cargo.lock out of date")
# These crosvm features are currently not built upstream. Do not add to this list.
KNOWN_DISABLED_FEATURES = [
"default-no-sandbox",
"direct",
"libvda",
"whpx",
]
# List of python files and globs on which we run checks.
PYTHON_FILES = [
"tools/**.py",
]
def check_rust_features(_: CheckContext):
"Verifies that all cargo features are included in the list of features we compile upstream."
metadata = json.loads(cmd("cargo metadata --format-version=1").stdout())
crosvm_metadata = next(p for p in metadata["packages"] if p["name"] == "crosvm")
features = cast(Dict[str, List[str]], crosvm_metadata["features"])
def collect_features(feature_name: str) -> Generator[str, None, None]:
yield feature_name
for feature in features[feature_name]:
name = feature.split("/")[0]
if name in features:
yield from collect_features(name)
all_platform_features = set(
(
*collect_features("all-x86_64"),
*collect_features("all-aarch64"),
*collect_features("all-armhf"),
*collect_features("all-mingw64"),
*collect_features("all-msvc64"),
)
)
disabled_features = [
feature
for feature in features
if feature not in all_platform_features and feature not in KNOWN_DISABLED_FEATURES
]
if disabled_features:
raise Exception(
f"The features {', '.join(disabled_features)} are not enabled in upstream crosvm builds."
)
LICENSE_HEADER_RE = (
r".*Copyright (?P<year>20[0-9]{2})(?:-20[0-9]{2})? The ChromiumOS Authors\n"
r".*Use of this source code is governed by a BSD-style license that can be\n"
r".*found in the LICENSE file\.\n"
r"( *\*/\n)?" # allow the end of a C-style comment before the blank line
r"\n"
cmd,
chdir,
)
NEW_LICENSE_HEADER = [
f"Copyright {datetime.now().year} The ChromiumOS Authors",
"Use of this source code is governed by a BSD-style license that can be",
"found in the LICENSE file.",
]
def new_licence_header(file_suffix: str):
if file_suffix in (".py", "", ".policy", ".sh"):
prefix = "#"
else:
prefix = "//"
return "\n".join(f"{prefix} {line}" for line in NEW_LICENSE_HEADER) + "\n\n"
def check_copyright_header(context: CheckContext):
"Checks copyright header. Can 'fix' them if needed by adding the header."
license_re = re.compile(LICENSE_HEADER_RE, re.MULTILINE)
for file in context.modified_files:
header = file.open("r").read(512)
license_match = license_re.search(header)
if license_match:
continue
# Generated files do not need a copyright header.
if "generated by" in header:
continue
if context.fix:
print(f"Adding copyright header: {file}")
contents = file.read_text()
file.write_text(new_licence_header(file.suffix) + contents)
else:
raise Exception(f"Bad copyright header: {file}")
def check_infra_configs(context: CheckContext):
"Validate luci configs by sending them to luci-config."
for file in context.modified_files:
if context.fix:
lucicfg("fmt", file).fg()
lucicfg("generate", file).fg()
lucicfg("fmt --dry-run", file).fg(quiet=True)
# TODO: Validate config files. Requires authentication with luci inside docker.
def check_infra_tests(context: CheckContext):
"Run recipe.py tests, all of them, regardless of which files were modified."
recipes = cmd("infra/recipes.py").with_path_env("third_party/depot_tools")
if context.fix:
recipes("test train --py3-only").fg()
recipes("test run --py3-only").fg(quiet=True)
def check_file_ends_with_newline(context: CheckContext):
"Checks if files end with a newline."
for file_path in context.modified_files:
with file_path.open("rb") as file:
# Skip empty files
file.seek(0, os.SEEK_END)
if file.tell() == 0:
continue
# Check last byte of the file
file.seek(-1, os.SEEK_END)
file_end = file.read(1)
if file_end.decode("utf-8") != "\n":
if context.fix:
file_path.write_text(file_path.read_text() + "\n")
else:
raise Exception(f"File does not end with a newline {file_path}")
# List of files have crlf line endings.
CRLF_LINE_ENDING_FILES: List[str] = [
"**.bat",
"**.ps1",
"e2e_tests/tests/goldens/backcompat_test_simple_lspci_win.txt",
]
# List of all checks and on which files they should run.
CHECKS: List[Check] = [
Check(
check_copyright_header,
files=["**.rs", "**.py", "**.c", "**.h", "**.policy", "**.sh"],
exclude=[
"infra/recipes.py",
"hypervisor/src/whpx/whpx_sys/*.h",
"third_party/vmm_vhost/*",
"net_sys/src/lib.rs",
"system_api/src/bindings/*",
],
python_tools=True,
),
Check(
check_rust_format,
files=["**.rs"],
exclude=["system_api/src/bindings/*"],
can_fix=True,
),
Check(
check_rust_lockfiles,
files=["**Cargo.toml"],
),
Check(
check_rust_features,
files=["Cargo.toml"],
),
Check(
check_rust_clippy,
files=["**.rs", "**Cargo.toml"],
),
Check(
check_cargo_doc,
files=["**.rs", "**Cargo.toml"],
),
Check(
check_python_tests,
files=PYTHON_FILES,
python_tools=True,
),
Check(
check_python_types,
files=PYTHON_FILES,
exclude=["tools/windows/*"],
python_tools=True,
),
Check(
check_python_format,
files=["**.py"],
python_tools=True,
exclude=["infra/recipes.py"],
can_fix=True,
),
Check(
check_markdown_format,
files=["**.md"],
exclude=[
"infra/README.recipes.md",
"docs/book/src/appendix/memory_layout.md",
],
can_fix=True,
),
Check(
check_file_ends_with_newline,
exclude=[
"**.h264",
"**.vp8",
"**.vp9",
"**.ivf",
"**.bin",
"**.png",
"**.min.js",
"**.drawio",
"**.json",
],
),
Check(check_crlf_line_endings, exclude=CRLF_LINE_ENDING_FILES),
Check(check_lf_line_endings, files=CRLF_LINE_ENDING_FILES),
]
# We disable LUCI infra related tests because kokoro doesn't have internet connectivity that
# the tests rely on.
if not is_kiwi_repo():
CHECKS.extend(
[
Check(
check_infra_configs,
files=["infra/config/**.star"],
can_fix=True,
),
Check(
check_infra_tests,
files=["infra/**.py"],
can_fix=True,
),
]
)
CHECKS_DICT = dict((c.name, c) for c in CHECKS)
@argh.arg("--list-checks", default=False, help="List names of available checks and exit.")
@argh.arg("--fix", default=False, help="Asks checks to fix problems where possible.")
@argh.arg("--all", default=False, help="Run on all files instead of just modified files.")
@argh.arg(
"checks",
choices=[*CHECKS_DICT.keys(), []],
help="Optional list of checks to run. Defaults to run all checks.",
)
def main(
list_checks: bool = False,
fix: bool = False,
all: bool = False,
nightly: bool = False,
*checks: str,
):
"""
Run health checks on crosvm. This includes formatting, linters and other various checks.
"""
def main(list_checks: bool = False, all: bool = False, *check_names: str):
chdir(CROSVM_ROOT)
if not checks:
checks_list = [*CHECKS_DICT.values()]
else:
checks_list = [CHECKS_DICT[check] for check in checks]
if list_checks:
for check in checks_list:
print(check.name)
return
success = run_checks(checks_list, fix=fix, run_on_all_files=all, nightly=nightly)
sys.exit(0 if success else 1)
if not list_checks:
print("Deprecated. Please use ./tools/presubmit instead")
if not check_names:
check_names = ("health_checks",)
cmd(
sys.executable,
"tools/presubmit",
"--no-delta" if all else None,
"--list-checks" if list_checks else None,
*check_names
).fg()
if __name__ == "__main__":

View file

@ -1,156 +0,0 @@
# Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from pathlib import Path
from typing import List
import re
import subprocess
import sys
USAGE = """\
Checks code hygiene of a given directory.
The tool verifies that
- code under given directory has no conditionally compiled platform specific code.
- crates in current directory, excluding crates in ./common/, do not depend on
on sys_util, sys_util_core or on win_sys_util.
To check
$ ./tools/impl/check_code_hygiene ./common/sys_util_core
On finding platform specific code, the tool prints the file, line number and the
line containing conditional compilation.
On finding dependency on sys_util, sys_util_core or on win_sys_util, the tool prints
the names of crates.
"""
def has_platform_dependent_code(rootdir: Path):
"""Recursively searches for target os specific code in the given rootdir.
Returns false and relative file path if target specific code is found.
Returns false and rootdir if rootdir does not exists or is not a directory.
Otherwise returns true and empty string is returned.
Args:
rootdir: Base directory path to search for.
"""
if not rootdir.is_dir():
return False, "'" + str(rootdir) + "' does not exists or is not a directory"
cfg_unix = "cfg.*unix"
cfg_linux = "cfg.*linux"
cfg_windows = "cfg.*windows"
cfg_android = "cfg.*android"
target_os = "target_os = "
target_os_pattern = re.compile(
"%s|%s|%s|%s|%s" % (cfg_android, cfg_linux, cfg_unix, cfg_windows, target_os)
)
for file_path in rootdir.rglob("**/*.rs"):
for line_number, line in enumerate(open(file_path, encoding="utf8")):
if re.search(target_os_pattern, line):
return False, str(file_path) + ":" + str(line_number) + ":" + line
return True, ""
def is_sys_util_independent():
"""Recursively searches for that depend on sys_util, sys_util_core or win_util.
Does not search crates in common/ as they are allowed to be platform specific.
Returns false and a list of crates that depend on those crates. Otherwise
returns true and am empty list.
"""
crates: list[str] = []
sys_util_crates = re.compile("sys_util|sys_util_core|win_sys_util")
files: list[Path] = list(Path(".").glob("**/Cargo.toml"))
files.extend(Path("src").glob("**/*.rs"))
# Exclude common as it is allowed to depend on sys_util and exclude Cargo.toml
# from root directory as it contains workspace related entries for sys_util.
files[:] = [
file for file in files if not file.is_relative_to("common") and str(file) != "Cargo.toml"
]
for file_path in files:
with open(file_path) as open_file:
for line in open_file:
if sys_util_crates.match(line):
crates.append(str(file_path))
return not crates, crates
def has_line_endings(file_pattern: str, line_ending_pattern: str):
"""Searches for files with crlf(dos) line endings in a git repo. Returns
a list of files having crlf line endings.
"""
process = subprocess.Popen(
f"git ls-files --eol {file_pattern}",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
shell=True,
)
stdout, _ = process.communicate()
dos_files: list[str] = []
if process.returncode != 0:
return dos_files
crlf_re = re.compile(line_ending_pattern)
assert process.stdout
for line in iter(stdout.splitlines()):
# A typical output of git ls-files --eol looks like below
# i/lf w/lf attr/ vhost/Cargo.toml
fields = line.split()
if fields and crlf_re.search(fields[0] + fields[1]):
dos_files.append(fields[3] + "\n")
return dos_files
def has_crlf_line_endings(files: List[Path]):
f = " ".join([str(file) for file in files])
return has_line_endings(f, "crlf|mixed")
def has_lf_line_endings(files: List[Path]):
f = " ".join([str(file) for file in files])
return has_line_endings(f, "\blf|mixed")
def main():
parser = argparse.ArgumentParser(usage=USAGE)
parser.add_argument("path", type=Path, help="Path of the directory to check.")
args = parser.parse_args()
hygiene, error = has_platform_dependent_code(args.path)
if not hygiene:
print("Error: Platform dependent code not allowed in sys_util_core crate.")
print("Offending line: " + error)
sys.exit(-1)
hygiene, crates = is_sys_util_independent()
if not hygiene:
print("Error: Following files depend on sys_util, sys_util_core or on win_sys_util")
print(crates)
sys.exit(-1)
crlf_endings = has_crlf_line_endings()
if crlf_endings:
print("Error: Following files have crlf(dos) line encodings")
print(*crlf_endings)
sys.exit(-1)
if __name__ == "__main__":
main()

View file

@ -30,6 +30,7 @@ import shutil
import subprocess
import sys
import traceback
from copy import deepcopy
from io import StringIO
from math import ceil
from multiprocessing.pool import ThreadPool
@ -51,11 +52,12 @@ from typing import (
)
import argh # type: ignore
import rich
import rich.console
import rich.live
import rich.spinner
import rich.text
from rich.console import Console, Group
from rich.live import Live
from rich.spinner import Spinner
from rich.text import Text
# File where to store http headers for gcloud authentication
AUTH_HEADERS_FILE = Path(gettempdir()) / f"crosvm_gcloud_auth_headers_{getpass.getuser()}"
@ -159,7 +161,7 @@ class Command(object):
In contrast to bash, globs are *not* evaluated, but can easily be provided using Path:
>>> Command('ls -l', *Path('.').glob('*.toml'))
>>> Command('ls -l', *Path(CROSVM_ROOT).glob('*.toml'))
Command('ls', '-l', ...)
None or False are ignored to make it easy to include conditional arguments:
@ -203,10 +205,12 @@ class Command(object):
*args: Any,
stdin_cmd: Optional["Command"] = None,
env_vars: Dict[str, str] = {},
cwd: Optional[Path] = None,
):
self.args = Command.__parse_cmd(args)
self.stdin_cmd = stdin_cmd
self.env_vars = env_vars
self.cwd = cwd
### Builder API to construct commands
@ -217,9 +221,19 @@ class Command(object):
>>> cargo.with_args('clippy')
Command('cargo', 'clippy')
"""
cmd = Command()
cmd = deepcopy(self)
cmd.args = [*self.args, *Command.__parse_cmd(args)]
cmd.env_vars = self.env_vars
return cmd
def with_cwd(self, cwd: Optional[Path]):
"""Changes the working directory the command is executed in.
>>> cargo = Command('pwd')
>>> cargo.with_cwd('/tmp').stdout()
'/tmp'
"""
cmd = deepcopy(self)
cmd.cwd = cwd
return cmd
def __call__(self, *args: Any):
@ -240,9 +254,7 @@ class Command(object):
The variable is removed if value is None.
"""
cmd = Command()
cmd.args = self.args
cmd.env_vars = self.env_vars
cmd = deepcopy(self)
for key, value in envs.items():
if value is not None:
cmd.env_vars[key] = value
@ -254,10 +266,7 @@ class Command(object):
def with_path_env(self, new_path: str):
"""Returns a command with a path added to the PATH variable."""
path_var = self.env_vars.get("PATH", os.environ.get("PATH", ""))
cmd = Command()
cmd.args = self.args
cmd.env_vars = {**self.env_vars, "PATH": f"{path_var}:{new_path}"}
return cmd
return self.with_env("PATH", f"{path_var}:{new_path}")
def with_color_arg(
self,
@ -337,7 +346,7 @@ class Command(object):
>>> Command('false').fg()
Traceback (most recent call last):
...
subprocess.CalledProcessError: Command 'false' returned non-zero exit status 1.
subprocess.CalledProcessError...
But can be disabled:
@ -354,6 +363,9 @@ class Command(object):
More sophisticated means of outputting stdout/err are available via `Styles`:
>>> Command("echo foo").fg(style=Styles.live_truncated())
foo
0
Will output the results of the command but truncate output after a few lines. See `Styles`
for more options.
@ -378,7 +390,7 @@ class Command(object):
if style is None or verbose():
return self.__run(stdout=None, stderr=None, check=check).returncode
else:
process = self.__popen(stderr=STDOUT)
process = self.popen(stderr=STDOUT)
style(process)
returncode = process.wait()
if returncode != 0 and check:
@ -428,7 +440,7 @@ class Command(object):
print(f"$ {self}")
return self.__run(stdout=PIPE, stderr=PIPE, check=False).returncode == 0
def stdout(self, check: bool = True):
def stdout(self, check: bool = True, stderr: int = PIPE):
"""
Runs a program and returns stdout.
@ -436,7 +448,7 @@ class Command(object):
"""
if very_verbose():
print(f"$ {self}")
return self.__run(stdout=PIPE, stderr=PIPE, check=check).stdout.strip()
return self.__run(stdout=PIPE, stderr=stderr, check=check).stdout.strip()
def json(self, check: bool = True) -> Any:
"""
@ -450,13 +462,13 @@ class Command(object):
else:
return None
def lines(self, check: bool = True):
def lines(self, check: bool = True, stderr: int = PIPE):
"""
Runs a program and returns stdout line by line.
The program will not be visible to the user unless --very-verbose is specified.
"""
return self.stdout(check=check).splitlines()
return self.stdout(check=check, stderr=stderr).splitlines()
### Utilities
@ -487,6 +499,7 @@ class Command(object):
print(f"env: {k}={v}")
result = subprocess.run(
self.args,
cwd=self.cwd,
stdout=stdout,
stderr=stderr,
stdin=self.__stdin_stream(),
@ -508,15 +521,16 @@ class Command(object):
def __stdin_stream(self):
if self.stdin_cmd:
return self.stdin_cmd.__popen().stdout
return self.stdin_cmd.popen().stdout
return None
def __popen(self, stderr: Optional[int] = PIPE) -> "subprocess.Popen[str]":
def popen(self, stderr: Optional[int] = PIPE) -> "subprocess.Popen[str]":
"""
Runs a program and returns the Popen object of the running process.
"""
return subprocess.Popen(
self.args,
cwd=self.cwd,
stdout=subprocess.PIPE,
stderr=stderr,
stdin=self.__stdin_stream(),
@ -562,18 +576,18 @@ class Styles(object):
def output(process: "subprocess.Popen[str]"):
assert process.stdout
spinner = Spinner("dots")
lines: List[Text] = []
spinner = rich.spinner.Spinner("dots")
lines: List[rich.text.Text] = []
stdout: List[str] = []
with Live(refresh_per_second=30, transient=True) as live:
with rich.live.Live(refresh_per_second=30, transient=True) as live:
for line in iter(process.stdout.readline, ""):
stdout.append(line.strip())
lines.append(Text.from_ansi(line.strip(), no_wrap=True))
lines.append(rich.text.Text.from_ansi(line.strip(), no_wrap=True))
while len(lines) > num_lines:
lines.pop(0)
live.update(Group(Text(""), *lines, spinner))
live.update(rich.console.Group(rich.text.Text(""), *lines, spinner))
if process.wait() == 0:
console.print(Group(Text(""), *lines))
console.print(rich.console.Group(rich.text.Text(""), *lines))
else:
for line in stdout:
print(line)
@ -586,7 +600,9 @@ class Styles(object):
def output(process: "subprocess.Popen[str]"):
assert process.stdout
with Live(Spinner("dots", title), refresh_per_second=30, transient=True):
with rich.live.Live(
rich.spinner.Spinner("dots", title), refresh_per_second=30, transient=True
):
stdout = process.stdout.read()
if process.wait() == 0:
@ -626,16 +642,8 @@ class ParallelCommands(object):
class Remote(object):
""" "
Wrapper around the cmd() API and allow execution of commands via SSH.
>>> remote = Remote("foobar", {"opt": "value"})
>>> remote.cmd('printf "(%s)"', quoted("a b c"))
Command('ssh', 'foobar', '-T', '-oopt=value', 'bash -O huponexit -c \\'printf (%s) "a b c"\\'')
A remote working directory can be set:
>>> remote.cmd('printf "(%s)"', quoted("a b c")).with_cwd(Path("target_dir"))
Command('ssh', 'foobar', '-T', '-oopt=value', 'cd target_dir && bash -O huponexit -c \\'printf (%s) "a b c"\\'')
"""
Wrapper around the cmd() API and allow execution of commands via SSH."
"""
def __init__(self, host: str, opts: Dict[str, str]):
@ -736,8 +744,8 @@ cwd = cwd_context
parallel = ParallelCommands
def run_main(main_fn: Callable[..., Any]):
run_commands(default_fn=main_fn)
def run_main(main_fn: Callable[..., Any], usage: Optional[str] = None):
run_commands(default_fn=main_fn, usage=usage)
def run_commands(
@ -799,7 +807,7 @@ def parse_common_args():
These args are parsed separately of the run_main/run_commands method so we can access
verbose/etc before the commands arguments are parsed.
"""
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(add_help=False)
add_common_args(parser)
return parser.parse_known_args()[0]
@ -1140,9 +1148,10 @@ class Triple(NamedTuple):
return f"{self.arch}-{self.vendor}-{self.sys}-{self.abi}"
console = Console()
console = rich.console.Console()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
(failures, num_tests) = doctest.testmod(optionflags=doctest.ELLIPSIS)
sys.exit(1 if failures > 0 else 0)

View file

@ -1,166 +0,0 @@
#!/usr/bin/env python3
# Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from dataclasses import dataclass
from fnmatch import fnmatch
from pathlib import Path
from time import time
from typing import Callable, List, NamedTuple
from impl.common import all_tracked_files, cmd, verbose
git = cmd("git")
@dataclass
class CheckContext(object):
"Information passed to each check when it's called."
# Whether or not --fix was set and checks should attempt to fix problems they encounter.
fix: bool
# Use rust nightly version for rust checks
nightly: bool
# All files that this check should cover (e.g. all python files on a python check).
all_files: List[Path]
# Those files of all_files that were modified locally.
modified_files: List[Path]
# Files that do not exist upstream and have been added locally.
new_files: List[Path]
class Check(NamedTuple):
"Metadata for each check, definining on which files it should run."
# Function to call for this check
check_function: Callable[[CheckContext], None]
# List of globs that this check should be triggered on
files: List[str] = []
python_tools: bool = False
# List of globs to exclude from this check
exclude: List[str] = []
# Whether or not this check can fix issues.
can_fix: bool = False
@property
def name(self):
name = self.check_function.__name__
if name.startswith("check_"):
return name[len("check_") :]
return name
def list_file_diff():
"""
Lists files there were modified compared to the upstream branch.
Falls back to all files tracked by git if there is no upstream branch.
"""
upstream = git("rev-parse @{u}").stdout(check=False)
if upstream:
for line in git("diff --name-status", upstream).lines():
parts = line.split("\t", 1)
file = Path(parts[1].strip())
if file.is_file():
yield (parts[0].strip(), file)
else:
print("WARNING: Not tracking a branch. Checking all files.")
for file in all_tracked_files():
yield ("M", file)
def should_run_check_on_file(check: Check, file: Path):
"Returns true if `file` should be run on `check`."
# Skip third_party except vmm_vhost.
if str(file).startswith("third_party") and not str(file).startswith("third_party/vmm_vhost"):
return False
# Skip excluded files
for glob in check.exclude:
if fnmatch(str(file), glob):
return False
# Match python tools (no file-extension, but with a python shebang line)
if check.python_tools:
if fnmatch(str(file), "tools/*") and file.suffix == "" and file.is_file():
if file.open(errors="ignore").read(32).startswith("#!/usr/bin/env python3"):
return True
# If no constraint is specified, match all files.
if not check.files and not check.python_tools:
return True
# Otherwise, match only those specified by `files`.
for glob in check.files:
if fnmatch(str(file), glob):
return True
return False
def run_check(check: Check, context: CheckContext):
"Runs `check` using the information in `context`. Prints status updates."
start_time = time()
if verbose():
print(f"Checking {check.name}...")
try:
check.check_function(context)
success = True
except Exception as e:
print(e)
success = False
duration = time() - start_time
print(f"Check {check.name}", "OK" if success else "FAILED", f" ({duration:.2f} s)")
return success
def run_checks(
checks_list: List[Check],
fix: bool,
run_on_all_files: bool,
nightly: bool,
):
"""
Runs all checks in checks_list.
Arguments:
fix: Tell checks to fix issues if they can (e.g. run formatter).
run_on_all_files: Do not use git delta, but run on all files.
nightly: Use nightly version of rust tooling.
"""
all_files = [*all_tracked_files()]
file_diff = [*list_file_diff()]
new_files = [f for (s, f) in file_diff if s == "A"]
if run_on_all_files:
modified_files = all_files
else:
modified_files = [f for (s, f) in file_diff if s in ("M", "A")]
failed_checks: List[Check] = []
for check in checks_list:
context = CheckContext(
fix=fix,
nightly=nightly,
all_files=[f for f in all_files if should_run_check_on_file(check, f)],
modified_files=[f for f in modified_files if should_run_check_on_file(check, f)],
new_files=[f for f in new_files if should_run_check_on_file(check, f)],
)
if context.modified_files:
if not run_check(check, context):
failed_checks.append(check)
if any(c.can_fix for c in failed_checks):
print("")
print("Some of the issues above can be fixed automatically with:")
print("./tools/health-check --fix")
return len(failed_checks) == 0

370
tools/impl/presubmit.py Normal file
View file

@ -0,0 +1,370 @@
#!/usr/bin/env python3
# Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import subprocess
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from fnmatch import fnmatch
from pathlib import Path
from time import sleep
from typing import Callable, List, NamedTuple, Optional, Set, Union
from datetime import datetime, timedelta
from impl.common import Command, all_tracked_files, cmd, console, verbose
import rich
import rich.console
import rich.live
import rich.spinner
import rich.text
git = cmd("git")
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
@dataclass
class CheckContext(object):
"Information passed to each check when it's called."
# Whether or not --fix was set and checks should attempt to fix problems they encounter.
fix: bool
# Use rust nightly version for rust checks
nightly_fmt: bool
# All files that this check should cover (e.g. all python files on a python check).
all_files: List[Path]
# Those files of all_files that were modified locally.
modified_files: List[Path]
# Files that do not exist upstream and have been added locally.
new_files: List[Path]
class Check(NamedTuple):
"Metadata for each check, definining on which files it should run."
# Function to call for this check
check_function: Callable[[CheckContext], Union[Command, List[Command]]]
custom_name: Optional[str] = None
# List of globs that this check should be triggered on
files: List[str] = []
python_tools: bool = False
# List of globs to exclude from this check
exclude: List[str] = []
# Whether or not this check can fix issues.
can_fix: bool = False
# Which groups this check belongs to.
groups: List[str] = []
# Priority tasks usually take lonkger and are started first, and will show preliminary output.
priority: bool = False
@property
def name(self):
if self.custom_name:
return self.custom_name
name = self.check_function.__name__
if name.startswith("check_"):
return name[len("check_") :]
return name
@property
def doc(self):
if self.check_function.__doc__:
return self.check_function.__doc__.strip()
else:
return None
class Group(NamedTuple):
"Metadata for a group of checks"
name: str
doc: str
checks: Set[str]
def list_file_diff():
"""
Lists files there were modified compared to the upstream branch.
Falls back to all files tracked by git if there is no upstream branch.
"""
upstream = git("rev-parse @{u}").stdout(check=False)
if upstream:
for line in git("diff --name-status", upstream).lines():
parts = line.split("\t", 1)
file = Path(parts[1].strip())
if file.is_file():
yield (parts[0].strip(), file)
else:
print("WARNING: Not tracking a branch. Checking all files.")
for file in all_tracked_files():
yield ("M", file)
def should_run_check_on_file(check: Check, file: Path):
"Returns true if `file` should be run on `check`."
# Skip third_party except vmm_vhost.
if str(file).startswith("third_party") and not str(file).startswith("third_party/vmm_vhost"):
return False
# Skip excluded files
for glob in check.exclude:
if fnmatch(str(file), glob):
return False
# Match python tools (no file-extension, but with a python shebang line)
if check.python_tools:
if fnmatch(str(file), "tools/*") and file.suffix == "" and file.is_file():
if file.open(errors="ignore").read(32).startswith("#!/usr/bin/env python3"):
return True
# If no constraint is specified, match all files.
if not check.files and not check.python_tools:
return True
# Otherwise, match only those specified by `files`.
for glob in check.files:
if fnmatch(str(file), glob):
return True
return False
class Task(object):
"""
Represents a task that needs to be executed to perform a `Check`.
The task can be executed via `Task.execute`, which will update the state variables with
status and progress information.
This information can then be rendered from a separate thread via `Task.status_widget()`
"""
def __init__(self, title: str, commands: List[Command], priority: bool):
"Display title."
self.title = title
"Commands to execute."
self.commands = commands
"Task is a priority check."
self.priority = priority
"List of log lines (stdout+stderr) produced by the task."
self.log_lines: List[str] = []
"Task was compleded, but may or not have been successful."
self.done = False
"True if the task completed successfully."
self.success = False
"Time the task was started."
self.start_time = datetime.min
"Duration the task took to execute. Only filled after completion."
self.duration = timedelta.max
"Spinner object for status_widget UI."
self.spinner = rich.spinner.Spinner("point", title)
def status_widget(self):
"Returns a rich console object showing the currrent status of the task."
duration = self.duration if self.done else datetime.now() - self.start_time
title = f"[{duration.total_seconds():6.2f}s] [bold]{self.title}[/bold]"
if self.done:
status: str = "[green]OK [/green]" if self.success else "[red]ERR[/red]"
title_widget = rich.text.Text.from_markup(f"{status} {title}")
else:
self.spinner.text = rich.text.Text.from_markup(title)
title_widget = self.spinner
if not self.priority:
return title_widget
last_lines = [
self.log_lines[-3] if len(self.log_lines) >= 3 else "",
self.log_lines[-2] if len(self.log_lines) >= 2 else "",
self.log_lines[-1] if len(self.log_lines) >= 1 else "",
]
return rich.console.Group(
*(
# Print last log lines without it's original colors
rich.text.Text(
"" + ansi_escape.sub("", log_line),
style="light_slate_grey",
overflow="ellipsis",
no_wrap=True,
)
for log_line in last_lines
),
rich.text.Text("", end="", style="light_slate_grey"),
title_widget,
rich.text.Text(),
)
def execute(self):
"Execute the task while updating the status variables."
self.start_time = datetime.now()
success = True
for command in self.commands:
if verbose():
self.log_lines.append(f"$ {command}")
process = command.popen(stderr=subprocess.STDOUT)
assert process.stdout
for line in iter(process.stdout.readline, ""):
self.log_lines.append(line.strip())
if process.wait() != 0:
success = False
self.duration = datetime.now() - self.start_time
self.success = success
self.done = True
def print_logs(tasks: List[Task]):
"Prints logs of all failed or unfinished tasks."
for task in tasks:
if not task.done:
print()
console.rule(f"{task.title} did not finish", style="yellow")
for line in task.log_lines:
print(line)
if not task.log_lines:
print(f"{task.title} did not output any logs")
for task in tasks:
if task.done and not task.success:
console.rule(f"{task.title} failed", style="red")
for line in task.log_lines:
print(line)
if not task.log_lines:
print(f"{task.title} did not output any logs")
def print_summary(tasks: List[Task]):
"Prints a summary of all task results."
console.rule("Summary")
tasks.sort(key=lambda t: t.duration)
for task in tasks:
title = f"[{task.duration.total_seconds():6.2f}s] [bold]{task.title}[/bold]"
status: str = "[green]OK [/green]" if task.success else "[red]ERR[/red]"
console.print(f"{status} {title}")
def execute_tasks_parallel(tasks: List[Task]):
"Executes the list of tasks in parallel, while rendering live status updates."
with ThreadPoolExecutor() as executor:
try:
# Since tasks are executed in subprocesses, we can use a thread pool to parallelize
# despite the GIL.
task_futures = [executor.submit(lambda: t.execute()) for t in tasks]
# Render task updates while they are executing in the background.
with rich.live.Live(refresh_per_second=30) as live:
while True:
live.update(
rich.console.Group(
*(t.status_widget() for t in tasks),
rich.text.Text(),
rich.text.Text.from_markup(
"[green]Tip:[/green] Press CTRL-C to abort execution and see all logs."
),
)
)
if all(future.done() for future in task_futures):
break
sleep(0.1)
except KeyboardInterrupt:
print_logs(tasks)
# Force exit to skip waiting for the executor to shutdown. This will kill all
# running subprocesses.
os._exit(1) # type: ignore
# Render error logs and summary after execution
print_logs(tasks)
print_summary(tasks)
if any(not t.success for t in tasks):
raise Exception("Some checks failed")
def execute_tasks_serial(tasks: List[Task]):
"Executes the list of tasks one-by-one"
for task in tasks:
console.rule(task.title)
for command in task.commands:
command.fg()
console.print()
def generate_plan(
checks_list: List[Check],
fix: bool,
run_on_all_files: bool,
nightly_fmt: bool,
):
"Generates a list of `Task`s to execute the checks provided in `checks_list`"
all_files = [*all_tracked_files()]
file_diff = [*list_file_diff()]
new_files = [f for (s, f) in file_diff if s == "A"]
if run_on_all_files:
modified_files = all_files
else:
modified_files = [f for (s, f) in file_diff if s in ("M", "A")]
tasks: List[Task] = []
for check in checks_list:
if fix and not check.can_fix:
continue
context = CheckContext(
fix=fix,
nightly_fmt=nightly_fmt,
all_files=[f for f in all_files if should_run_check_on_file(check, f)],
modified_files=[f for f in modified_files if should_run_check_on_file(check, f)],
new_files=[f for f in new_files if should_run_check_on_file(check, f)],
)
if context.modified_files:
commands = check.check_function(context)
if not isinstance(commands, list):
commands = [commands]
title = f"fixing {check.name}" if fix else check.name
tasks.append(Task(title, commands, check.priority))
# Sort so that priority tasks are launched (and rendered) first
tasks.sort(key=lambda t: (t.priority, t.title), reverse=True)
return tasks
def run_checks(
checks_list: List[Check],
fix: bool,
run_on_all_files: bool,
nightly_fmt: bool,
parallel: bool,
):
"""
Runs all checks in checks_list.
Arguments:
fix: Run fixes instead of checks on `Check`s that support it.
run_on_all_files: Do not use git delta, but run on all files.
nightly_fmt: Use nightly version of rust tooling.
parallel: Run tasks in parallel.
"""
tasks = generate_plan(checks_list, fix, run_on_all_files, nightly_fmt)
if parallel:
execute_tasks_parallel(list(tasks))
else:
execute_tasks_serial(list(tasks))

View file

@ -1,123 +1,507 @@
#!/bin/bash
# Copyright 2021 The ChromiumOS Authors
#!/usr/bin/env python3
# Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
cd "$(dirname $0)/.."
import sys
import typing
from typing import List, Literal, Set, Tuple
HELP="This will run presubmit checks for crosvm.
from impl.common import (
CROSVM_ROOT,
TOOLS_ROOT,
argh,
chdir,
cmd,
cwd_context,
is_kiwi_repo,
run_main,
)
from impl.presubmit import Check, CheckContext, run_checks, Group
To run all checks just run
python = cmd("python3")
mypy = cmd("mypy").with_color_env("MYPY_FORCE_COLOR")
black = cmd("black").with_color_arg(always="--color", never="--no-color")
mdformat = cmd("mdformat")
lucicfg = cmd("third_party/depot_tools/lucicfg")
$ ./tools/presubmit
# All supported platforms as a type and a list.
Platform = Literal["x86_64", "aarch64", "mingw64", "armhf"]
PLATFORMS: Tuple[Platform, ...] = typing.get_args(Platform)
The checks can be run in parallel for faster execution:
$ ./tools/presubmit --tmux
####################################################################################################
# Check methods
#
# Each check returns a Command (or list of Commands) to be run to execute the check. They are
# registered and configured in the CHECKS list below.
#
# Some check functions are factory functions that return a check command for all supported
# platforms.
This will open a tmux session to run all presubmit builds in parallel. It will
create a nested tmux session if you are already using it.
All tests are executed in the local development environment. If your host is not
set up for aarch64 builds, it will use './tools/dev_container' to build run
those.
def check_python_tests(_: CheckContext):
"Runs unit tests for python dev tooling."
PYTHON_TESTS = [
"tests.cl_tests",
"impl.common",
]
return [python.with_cwd(TOOLS_ROOT).with_args("-m", file) for file in PYTHON_TESTS]
There are three levels of presubmit tests that can be run:
$ ./tools/presubmit --quick
$ ./tools/presubmit
$ ./tools/presubmit --all
def check_python_types(context: CheckContext):
"Run mypy type checks on python dev tooling."
return [mypy("--pretty", file) for file in context.all_files]
The quick mode will only cover x86 and does not require a dev_container. The
default mode will add aarch64 tests, and the all mode will test everything that
is also tested on Kokoro.
"
while [[ $# -gt 0 ]]; do
case "$1" in
-q | --quick)
QUICK=true
shift
;;
-a | --all)
ALL=true
shift
;;
--tmux)
RUN_IN_TMUX=true
shift
;;
-h | --help)
echo "$HELP"
exit 0
shift
;;
*)
echo "unknown argument $1"
exit 1
;;
esac
done
run_commands_in_tmux() {
local tmux_commands=(
set-option -g default-shell /bin/bash \;
new-session "$1; read -p 'Press enter to close.'" \;
def check_python_format(context: CheckContext):
"Runs the black formatter on python dev tooling."
return black.with_args(
"--check" if not context.fix else None,
*context.modified_files,
)
for cmd in "${@:2}"; do
tmux_commands+=(
split-window -h "$cmd; read -p 'Press enter to close.'" \;
def check_markdown_format(context: CheckContext):
"Runs mdformat on all markdown files."
if "blaze" in mdformat("--version").stdout():
raise Exception(
"You are using google's mdformat. "
+ "Please update your PATH to ensure the pip installed mdformat is available."
)
done
tmux_commands+=(
select-layout even-horizontal \;
return mdformat.with_args(
"--wrap 100",
"--check" if not context.fix else "",
*context.modified_files,
)
TMUX="" tmux "${tmux_commands[@]}"
}
run_commands() {
for cmd in "$@"; do
echo "$ ${cmd}"
bash -c "$cmd"
echo
done
}
aarch64_wrapper() {
if ! (rustup target list --installed | grep -q aarch64 &&
dpkg --print-foreign-architectures | grep -q arm64); then
echo "./tools/dev_container"
fi
}
def check_rust_format(context: CheckContext):
"Runs rustfmt on all modified files."
if context.nightly_fmt:
rustfmt = cmd(
cmd("rustup +nightly which rustfmt"),
"--config imports_granularity=item,group_imports=StdExternalCrate",
)
else:
rustfmt = cmd(cmd("rustup which rustfmt"))
return rustfmt.with_color_flag().with_args(
"--check" if not context.fix else "",
*context.modified_files,
)
commands=(
"./tools/health-check"
def check_cargo_doc(_: CheckContext):
"Runs cargo-doc and verifies that no warnings are emitted."
return cmd("./tools/cargo-doc").with_env("RUSTDOCFLAGS", "-D warnings").with_color_flag()
def check_crosvm_tests(platform: Platform):
def check(_: CheckContext):
dut = None
if platform == "x86_64":
dut = "--dut=host"
elif platform == "aarch64":
dut = "--dut=vm"
return cmd("./tools/run_tests --platform", platform, dut).with_color_flag()
check.__doc__ = f"Runs all crosvm tests for {platform}."
return check
def check_crosvm_unit_tests(platform: Platform):
def check(_: CheckContext):
return cmd("./tools/run_tests --platform", platform).with_color_flag()
check.__doc__ = f"Runs crosvm unit tests for {platform}."
return check
def check_clippy(platform: Platform):
def check(context: CheckContext):
return cmd(
"./tools/clippy --platform",
platform,
"--fix" if context.fix else None,
).with_color_flag()
check.__doc__ = f"Runs clippy for {platform}."
return check
def check_infra_configs(context: CheckContext):
"Validate luci configs by sending them to luci-config."
# TODO: Validate config files. Requires authentication with luci inside docker.
return [lucicfg("fmt --dry-run", file) for file in context.modified_files]
def check_infra_tests(_: CheckContext):
"Run recipe.py tests, all of them, regardless of which files were modified."
recipes = cmd("infra/recipes.py").with_path_env("third_party/depot_tools")
return recipes("test run")
def custom_check(name: str, can_fix: bool = False):
"Custom checks are written in python in tools/custom_checks. This is a wrapper to call them."
def check(context: CheckContext):
return cmd(
TOOLS_ROOT / "custom_checks",
name,
*context.modified_files,
"--fix" if can_fix and context.fix else None,
)
check.__name__ = name.replace("-", "_")
check.__doc__ = f"Runs tools/custom_check {name}"
return check
####################################################################################################
# Checks configuration
#
# Configures which checks are available and on which files they are run.
# Check names default to the function name minus the check_ prefix
CHECKS: List[Check] = [
Check(
check_rust_format,
files=["**.rs"],
exclude=["system_api/src/bindings/*"],
can_fix=True,
),
Check(
check_cargo_doc,
files=["**.rs", "**Cargo.toml"],
priority=True,
),
Check(
check_python_tests,
files=["tools/**.py"],
python_tools=True,
priority=True,
),
Check(
check_python_types,
files=["tools/**.py"],
exclude=["tools/windows/*"],
python_tools=True,
),
Check(
check_python_format,
files=["**.py"],
python_tools=True,
exclude=["infra/recipes.py"],
can_fix=True,
),
Check(
check_markdown_format,
files=["**.md"],
exclude=[
"infra/README.recipes.md",
"docs/book/src/appendix/memory_layout.md",
],
can_fix=True,
),
*(
Check(
check_crosvm_tests(platform),
custom_name=f"crosvm_tests_{platform}",
files=["**.rs"],
priority=True,
)
for platform in PLATFORMS
),
*(
Check(
check_crosvm_unit_tests(platform),
custom_name=f"crosvm_unit_tests_{platform}",
files=["**.rs"],
priority=True,
)
for platform in PLATFORMS
),
*(
Check(
check_clippy(platform),
custom_name=f"clippy_{platform}",
files=["**.rs"],
can_fix=True,
priority=True,
)
for platform in PLATFORMS
),
Check(
custom_check("check-copyright-header"),
files=["**.rs", "**.py", "**.c", "**.h", "**.policy", "**.sh"],
exclude=[
"infra/recipes.py",
"hypervisor/src/whpx/whpx_sys/*.h",
"third_party/vmm_vhost/*",
"net_sys/src/lib.rs",
"system_api/src/bindings/*",
],
python_tools=True,
can_fix=True,
),
Check(
custom_check("check-rust-features"),
files=["**Cargo.toml"],
),
Check(
custom_check("check-rust-lockfiles"),
files=["**Cargo.toml"],
),
Check(
custom_check("check-line-endings"),
),
Check(
custom_check("check-file-ends-with-newline"),
exclude=[
"**.h264",
"**.vp8",
"**.vp9",
"**.ivf",
"**.bin",
"**.png",
"**.min.js",
"**.drawio",
"**.json",
],
),
]
# We disable LUCI infra related tests because kokoro doesn't have internet connectivity that
# the tests rely on.
if not is_kiwi_repo():
CHECKS.extend(
[
Check(
check_infra_configs,
files=["infra/config/**.star"],
can_fix=True,
),
Check(
check_infra_tests,
files=["infra/**.py"],
can_fix=True,
),
]
)
####################################################################################################
# Group configuration
#
# Configures pre-defined groups of checks. Some are configured for CI builders and others
# are configured for convenience during local development.
HEALTH_CHECKS = [
"rust_format",
"cargo_doc",
"infra_configs",
"infra_tests",
"python_format",
"python_tests",
"python_types",
"markdown_format",
"copyright_header",
"rust_features",
"rust_lockfiles",
"file_ends_with_newline",
"line_endings",
]
PLATFORM_CHECKS = dict(
(
platform,
set(
[
f"crosvm_tests_{platform}",
f"clippy_{platform}",
]
),
)
for platform in PLATFORMS
)
if [ "$ALL" == true ]; then
commands+=(
"./tools/run_tests --dut=host"
"./tools/run_tests --platform=mingw64"
"./tools/clippy --platform=mingw64"
"$(aarch64_wrapper) ./tools/run_tests --platform=aarch64 --dut=vm"
"$(aarch64_wrapper) ./tools/run_tests --platform=armhf"
"cargo build --verbose --no-default-features"
)
elif [ "$QUICK" != true ]; then
commands+=(
"./tools/run_tests --dut=host"
"$(aarch64_wrapper) ./tools/run_tests --platform=aarch64"
"./tools/run_tests --platform=mingw64"
)
else
commands+=(
"./tools/run_tests"
)
fi
GROUPS: List[Group] = [
# The default group is run if no check or group is explicitly set
Group(
name="default",
doc="Checks run by default",
checks=set(
[
*HEALTH_CHECKS,
# Run only one task per platform to prevent blocking on the build cache.
"crosvm_tests_x86_64",
"crosvm_unit_tests_aarch64",
"crosvm_unit_tests_mingw64",
"clippy_armhf",
]
),
),
Group(
name="quick",
doc="Runs a quick subset of presubmit checks.",
checks=set(
[
*HEALTH_CHECKS,
"crosvm_unit_tests_x86_64",
"clippy_aarch64",
]
),
),
Group(
name="unit_tests",
doc="Runs unit tests for all platforms",
checks=set(f"crosvm_unit_tests_{platform}" for platform in PLATFORMS),
),
Group(
name="health_checks",
doc="Checks run on the health_check builder",
checks=set(HEALTH_CHECKS),
),
Group(
name="format",
doc="Runs all formatting checks (or fixes)",
checks=set(
[
"rust_format",
"markdown_format",
"python_format",
]
),
),
*(
Group(
name=f"linux_{platform}",
doc=f"Checks run on the linux-{platform} builder",
checks=PLATFORM_CHECKS[platform],
)
for platform in PLATFORMS
),
Group(
name="all",
doc="Run checks of all builders.",
checks=set(
[
*(check for checks in PLATFORM_CHECKS.values() for check in checks),
*HEALTH_CHECKS,
]
),
),
]
if [ "$RUN_IN_TMUX" = true ]; then
run_commands_in_tmux "${commands[@]}"
else
run_commands "${commands[@]}"
fi
# Turn both lists into dicts for convenience
CHECKS_DICT = dict((c.name, c) for c in CHECKS)
GROUPS_DICT = dict((c.name, c) for c in GROUPS)
def validate_config():
"Validates the CHECKS and GROUPS configuration."
for group in GROUPS:
for check in group.checks:
if not check in CHECKS_DICT:
raise Exception(f"Group {group.name} includes non-existing check {check}.")
def find_in_group(check: Check):
for group in GROUPS:
if check.name in group.checks:
return True
return False
for check in CHECKS:
if not find_in_group(check):
raise Exception(f"Check {check.name} is not included in any group.")
all_names = [c.name for c in CHECKS] + [g.name for g in GROUPS]
for name in all_names:
if all_names.count(name) > 1:
raise Exception(f"Check or group {name} is defined multiple times.")
@argh.arg("--list-checks", default=False, help="List names of available checks and exit.")
@argh.arg("--fix", default=False, help="Asks checks to fix problems where possible.")
@argh.arg("--no-delta", default=False, help="Run on all files instead of just modified files.")
@argh.arg("--no-parallel", default=False, help="Do not run checks in parallel.")
@argh.arg("--nightly-fmt", default=False, help="Use nightly rust for rustfmt")
@argh.arg(
"checks_or_groups",
help="List of checks or groups to run. Defaults to run the `default` group.",
)
def main(
list_checks: bool = False,
fix: bool = False,
no_delta: bool = False,
no_parallel: bool = False,
nightly_fmt: bool = False,
*checks_or_groups: str,
):
chdir(CROSVM_ROOT)
validate_config()
if not checks_or_groups:
checks_or_groups = ("default",)
# Resolve and validate the groups and checks provided
check_names: Set[str] = set()
for check_or_group in checks_or_groups:
if check_or_group in CHECKS_DICT:
check_names.add(check_or_group)
elif check_or_group in GROUPS_DICT:
check_names = check_names.union(GROUPS_DICT[check_or_group].checks)
else:
raise Exception(f"No such check or group: {check_or_group}")
if list_checks:
for check in check_names:
print(check)
return
check_list = [CHECKS_DICT[name] for name in check_names]
run_checks(
check_list,
fix=fix,
run_on_all_files=no_delta,
nightly_fmt=nightly_fmt,
parallel=not no_parallel,
)
def usage():
groups = "\n".join(f" {group.name}: {group.doc}" for group in GROUPS)
checks = "\n".join(f" {check.name}: {check.doc}" for check in CHECKS)
return f"""\
Runs checks on the crosvm codebase.
Basic usage, to run a default selection of checks:
./tools/presubmit
Some checkers can fix issues they find (e.g. formatters, clippy, etc):
./tools/presubmit --fix
Various groups of presubmit checks can be run via:
./tools/presubmit group_name
Available groups are:
{groups}
You can also provide the names of specific checks to run:
./tools/presubmit check1 check2
Available checks are:
{checks}
"""
if __name__ == "__main__":
run_main(main, usage=usage())

View file

@ -111,4 +111,4 @@ Branch foo tracking origin/main
if __name__ == "__main__":
unittest.main()
unittest.main(warnings="ignore")