Adding upstream version 1.4.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-15 09:34:27 +02:00
parent dc7df702ea
commit 7996c81031
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
166 changed files with 13787 additions and 11959 deletions

View file

@ -37,6 +37,7 @@ def test_anta_dry_run(
results = session_results[request.node.callspec.id]
# TODO: Use AntaRunner directly in ANTA v2.0.0
@benchmark
def _() -> None:
results.reset()
@ -69,6 +70,7 @@ def test_anta(
results = session_results[request.node.callspec.id]
# TODO: Use AntaRunner directly in ANTA v2.0.0
@benchmark
def _() -> None:
results.reset()
@ -77,21 +79,6 @@ def test_anta(
logging.disable(logging.NOTSET)
if len(catalog.tests) * len(inventory) != len(results.results):
# This could mean duplicates exist.
# TODO: consider removing this code and refactor unit test data as a dictionary with tuple keys instead of a list
seen = set()
dupes = []
for test in catalog.tests:
if test in seen:
dupes.append(test)
else:
seen.add(test)
if dupes:
for test in dupes:
msg = f"Found duplicate in test catalog: {test}"
logger.error(msg)
pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(results.results)}", pytrace=False)
bench_info = (
"\n--- ANTA NRFU Benchmark Information ---\n"
f"Test results: {len(results.results)}\n"

View file

@ -67,5 +67,6 @@ def test_csv(results: ResultManager, tmp_path: Path) -> None:
@pytest.mark.benchmark
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
def test_markdown(results: ResultManager, tmp_path: Path) -> None:
"""Benchmark MDReportGenerator.generate()."""
MDReportGenerator.generate(results=results, md_filename=tmp_path / "report.md")
"""Benchmark MDReportGenerator.generate_sections()."""
sections = [(section, results) for section in MDReportGenerator.DEFAULT_SECTIONS]
MDReportGenerator.generate_sections(sections=sections, md_filename=tmp_path / "report.md")

View file

@ -7,6 +7,9 @@ from __future__ import annotations
from typing import TYPE_CHECKING, Any
import pytest
from anta._runner import AntaRunContext, AntaRunFilters, AntaRunner
from anta.result_manager import ResultManager
from anta.runner import get_coroutines, prepare_tests
@ -22,6 +25,8 @@ if TYPE_CHECKING:
from anta.result_manager.models import TestResult
# TODO: Remove this in ANTA v2.0.0
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_prepare_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark `anta.runner.prepare_tests`."""
@ -36,6 +41,8 @@ def test_prepare_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, invent
assert sum(len(tests) for tests in selected_tests.values()) == len(inventory) * len(catalog.tests)
# TODO: Remove this in ANTA v2.0.0
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark `anta.runner.get_coroutines`."""
selected_tests = prepare_tests(inventory=inventory, catalog=catalog, tests=None, tags=None)
@ -52,3 +59,38 @@ def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inven
count = sum(len(tests) for tests in selected_tests.values())
assert count == len(coroutines)
def test__setup_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark `anta._runner.AntaRunner._setup_tests`."""
runner = AntaRunner()
ctx = AntaRunContext(inventory=inventory, catalog=catalog, manager=ResultManager(), filters=AntaRunFilters(), selected_inventory=inventory)
def bench() -> None:
catalog.clear_indexes()
runner._setup_tests(ctx)
benchmark(bench)
assert ctx.total_tests_scheduled != 0
assert ctx.total_devices_selected_for_testing == len(inventory)
assert ctx.total_tests_scheduled == len(inventory) * len(catalog.tests)
def test__get_test_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark `anta._runner.AntaRunner._get_test_coroutines`."""
runner = AntaRunner()
ctx = AntaRunContext(inventory=inventory, catalog=catalog, manager=ResultManager(), filters=AntaRunFilters(), selected_inventory=inventory)
runner._setup_tests(ctx)
assert ctx.selected_tests is not None
def bench() -> list[Coroutine[Any, Any, TestResult]]:
coros = runner._get_test_coroutines(ctx)
for c in coros:
c.close()
return coros
coroutines = benchmark(bench)
assert ctx.total_tests_scheduled == len(coroutines)

View file

@ -50,13 +50,21 @@ class AntaMockEnvironment: # pylint: disable=too-few-public-methods
Also provide the attribute 'eos_data_catalog` with the output of all the commands used in the test catalog.
Each module in `tests.units.anta_tests` has a `DATA` constant.
The `DATA` structure is a list of dictionaries used to parametrize the test. The list elements have the following keys:
- `name` (str): Test name as displayed by Pytest.
- `test` (AntaTest): An AntaTest subclass imported in the test module - e.g. VerifyUptime.
- `eos_data` (list[dict]): List of data mocking EOS returned data to be passed to the test.
- `inputs` (dict): Dictionary to instantiate the `test` inputs as defined in the class from `test`.
The keys of `eos_data_catalog` is the tuple (DATA['test'], DATA['name']). The values are `eos_data`.
The `DATA` structure is a dictionary where:
- Each key is a tuple of size 2 containing:
- An AntaTest subclass imported in the test module as first element - e.g. VerifyUptime.
- A string used as name displayed by pytest as second element.
- Each value is an instance of AntaUnitTest, which is a Python TypedDict.
And AntaUnitTest have the following keys:
- `eos_data` (list[dict]): List of data mocking EOS returned data to be passed to the test.
- `inputs` (dict): Dictionary to instantiate the `test` inputs as defined in the class from `test`.
- `expected` (dict): Expected test result structure, a dictionary containing a key `result` containing one of the allowed status
(`Literal[AntaTestStatus.SUCCESS, AntaTestStatus.FAILURE, AntaTestStatus.SKIPPED]`) and
optionally a key `messages` which is a list(str) and each message is expected to be a substring of one of the actual messages in the TestResult object.
The keys of `eos_data_catalog` is the tuple (AntaTest subclass, A string used as name displayed by pytest). The values are `eos_data`.
"""
def __init__(self) -> None:
@ -87,10 +95,11 @@ class AntaMockEnvironment: # pylint: disable=too-few-public-methods
test_definitions = []
eos_data_catalog = {}
for module in import_test_modules():
for test_data in module.DATA:
test = test_data["test"]
result_overwrite = AntaTest.Input.ResultOverwrite(custom_field=test_data["name"])
if test_data["inputs"] is None:
for (test, name), test_data in module.DATA.items():
# Extract the test class, name and test data from a nested tuple structure:
# unit test: Tuple[Tuple[Type[AntaTest], str], AntaUnitTest]
result_overwrite = AntaTest.Input.ResultOverwrite(custom_field=name)
if test_data.get("inputs") is None:
inputs = test.Input(result_overwrite=result_overwrite)
else:
inputs = test.Input(**test_data["inputs"], result_overwrite=result_overwrite)
@ -98,7 +107,7 @@ class AntaMockEnvironment: # pylint: disable=too-few-public-methods
test=test,
inputs=inputs,
)
eos_data_catalog[(test.__name__, test_data["name"])] = test_data["eos_data"]
eos_data_catalog[(test.__name__, name)] = test_data["eos_data"]
test_definitions.append(test_definition)
return (AntaCatalog(tests=test_definitions), eos_data_catalog)