Merging upstream version 1.2.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
ae7b7df396
commit
afeccccd6a
154 changed files with 7346 additions and 5000 deletions
|
@ -4,12 +4,14 @@
|
|||
"""Fixtures for benchmarking ANTA."""
|
||||
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
import pytest
|
||||
import respx
|
||||
from _pytest.terminal import TerminalReporter
|
||||
|
||||
from anta.catalog import AntaCatalog
|
||||
from anta.result_manager import ResultManager
|
||||
|
||||
from .utils import AntaMockEnvironment
|
||||
|
||||
|
@ -17,6 +19,12 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
TEST_CASE_COUNT = None
|
||||
|
||||
# Used to globally configure the benchmarks by specifying parameters for inventories
|
||||
BENCHMARK_PARAMETERS = [
|
||||
pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
|
||||
pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(name="anta_mock_env", scope="session") # We want this fixture to have a scope set to session to avoid reparsing all the unit tests data.
|
||||
def anta_mock_env_fixture() -> AntaMockEnvironment:
|
||||
|
@ -35,6 +43,22 @@ def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog:
|
|||
return anta_mock_env.catalog
|
||||
|
||||
|
||||
@pytest.fixture(name="session_results", scope="session") # We want this fixture to be reused across test modules within tests/benchmark
|
||||
def session_results_fixture() -> defaultdict[str, ResultManager]:
|
||||
"""Return a dictionary of ResultManger objects for the benchmarks.
|
||||
|
||||
The key is the test id as defined in the pytest_generate_tests in this module.
|
||||
Used to pass a populated ResultManager from one benchmark to another.
|
||||
"""
|
||||
return defaultdict(lambda: ResultManager())
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def results(request: pytest.FixtureRequest, session_results: defaultdict[str, ResultManager]) -> ResultManager:
|
||||
"""Return the unique ResultManger object for the current benchmark parameter."""
|
||||
return session_results[request.node.callspec.id]
|
||||
|
||||
|
||||
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
|
||||
"""Display the total number of ANTA unit test cases used to benchmark."""
|
||||
terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases")
|
||||
|
@ -49,9 +73,12 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
|
|||
return
|
||||
metafunc.parametrize(
|
||||
"inventory",
|
||||
[
|
||||
pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
|
||||
pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
|
||||
],
|
||||
BENCHMARK_PARAMETERS,
|
||||
indirect=True,
|
||||
)
|
||||
elif "results" in metafunc.fixturenames:
|
||||
metafunc.parametrize(
|
||||
"results",
|
||||
BENCHMARK_PARAMETERS,
|
||||
indirect=True,
|
||||
)
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
import asyncio
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
@ -22,45 +23,61 @@ from .utils import collect, collect_commands
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_anta_dry_run(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
|
||||
def test_anta_dry_run(
|
||||
benchmark: BenchmarkFixture,
|
||||
event_loop: asyncio.AbstractEventLoop,
|
||||
catalog: AntaCatalog,
|
||||
inventory: AntaInventory,
|
||||
request: pytest.FixtureRequest,
|
||||
session_results: defaultdict[str, ResultManager],
|
||||
) -> None:
|
||||
"""Benchmark ANTA in Dry-Run Mode."""
|
||||
# Disable logging during ANTA execution to avoid having these function time in benchmarks
|
||||
logging.disable()
|
||||
|
||||
def _() -> ResultManager:
|
||||
manager = ResultManager()
|
||||
catalog.clear_indexes()
|
||||
event_loop.run_until_complete(main(manager, inventory, catalog, dry_run=True))
|
||||
return manager
|
||||
results = session_results[request.node.callspec.id]
|
||||
|
||||
manager = benchmark(_)
|
||||
@benchmark
|
||||
def _() -> None:
|
||||
results.reset()
|
||||
catalog.clear_indexes()
|
||||
event_loop.run_until_complete(main(results, inventory, catalog, dry_run=True))
|
||||
|
||||
logging.disable(logging.NOTSET)
|
||||
if len(manager.results) != len(inventory) * len(catalog.tests):
|
||||
pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(manager.results)}", pytrace=False)
|
||||
bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(manager.results)}\n" "-----------------------------------------------"
|
||||
|
||||
if len(results.results) != len(inventory) * len(catalog.tests):
|
||||
pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(results.results)}", pytrace=False)
|
||||
bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(results.results)}\n" "-----------------------------------------------"
|
||||
logger.info(bench_info)
|
||||
|
||||
|
||||
@patch("anta.models.AntaTest.collect", collect)
|
||||
@patch("anta.device.AntaDevice.collect_commands", collect_commands)
|
||||
@pytest.mark.dependency(name="anta_benchmark", scope="package")
|
||||
@respx.mock # Mock eAPI responses
|
||||
def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
|
||||
def test_anta(
|
||||
benchmark: BenchmarkFixture,
|
||||
event_loop: asyncio.AbstractEventLoop,
|
||||
catalog: AntaCatalog,
|
||||
inventory: AntaInventory,
|
||||
request: pytest.FixtureRequest,
|
||||
session_results: defaultdict[str, ResultManager],
|
||||
) -> None:
|
||||
"""Benchmark ANTA."""
|
||||
# Disable logging during ANTA execution to avoid having these function time in benchmarks
|
||||
logging.disable()
|
||||
|
||||
def _() -> ResultManager:
|
||||
manager = ResultManager()
|
||||
catalog.clear_indexes()
|
||||
event_loop.run_until_complete(main(manager, inventory, catalog))
|
||||
return manager
|
||||
results = session_results[request.node.callspec.id]
|
||||
|
||||
manager = benchmark(_)
|
||||
@benchmark
|
||||
def _() -> None:
|
||||
results.reset()
|
||||
catalog.clear_indexes()
|
||||
event_loop.run_until_complete(main(results, inventory, catalog))
|
||||
|
||||
logging.disable(logging.NOTSET)
|
||||
|
||||
if len(catalog.tests) * len(inventory) != len(manager.results):
|
||||
if len(catalog.tests) * len(inventory) != len(results.results):
|
||||
# This could mean duplicates exist.
|
||||
# TODO: consider removing this code and refactor unit test data as a dictionary with tuple keys instead of a list
|
||||
seen = set()
|
||||
|
@ -74,17 +91,17 @@ def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop
|
|||
for test in dupes:
|
||||
msg = f"Found duplicate in test catalog: {test}"
|
||||
logger.error(msg)
|
||||
pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(manager.results)}", pytrace=False)
|
||||
pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(results.results)}", pytrace=False)
|
||||
bench_info = (
|
||||
"\n--- ANTA NRFU Benchmark Information ---\n"
|
||||
f"Test results: {len(manager.results)}\n"
|
||||
f"Success: {manager.get_total_results({AntaTestStatus.SUCCESS})}\n"
|
||||
f"Failure: {manager.get_total_results({AntaTestStatus.FAILURE})}\n"
|
||||
f"Skipped: {manager.get_total_results({AntaTestStatus.SKIPPED})}\n"
|
||||
f"Error: {manager.get_total_results({AntaTestStatus.ERROR})}\n"
|
||||
f"Unset: {manager.get_total_results({AntaTestStatus.UNSET})}\n"
|
||||
f"Test results: {len(results.results)}\n"
|
||||
f"Success: {results.get_total_results({AntaTestStatus.SUCCESS})}\n"
|
||||
f"Failure: {results.get_total_results({AntaTestStatus.FAILURE})}\n"
|
||||
f"Skipped: {results.get_total_results({AntaTestStatus.SKIPPED})}\n"
|
||||
f"Error: {results.get_total_results({AntaTestStatus.ERROR})}\n"
|
||||
f"Unset: {results.get_total_results({AntaTestStatus.UNSET})}\n"
|
||||
"---------------------------------------"
|
||||
)
|
||||
logger.info(bench_info)
|
||||
assert manager.get_total_results({AntaTestStatus.ERROR}) == 0
|
||||
assert manager.get_total_results({AntaTestStatus.UNSET}) == 0
|
||||
assert results.get_total_results({AntaTestStatus.ERROR}) == 0
|
||||
assert results.get_total_results({AntaTestStatus.UNSET}) == 0
|
||||
|
|
71
tests/benchmark/test_reporter.py
Normal file
71
tests/benchmark/test_reporter.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Benchmark tests for anta.reporter."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.reporter import ReportJinja, ReportTable
|
||||
from anta.reporter.csv_reporter import ReportCsv
|
||||
from anta.reporter.md_reporter import MDReportGenerator
|
||||
from anta.result_manager import ResultManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DATA_DIR: Path = Path(__file__).parents[1].resolve() / "data"
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
|
||||
def test_table_all(results: ResultManager) -> None:
|
||||
"""Benchmark ReportTable.report_all()."""
|
||||
reporter = ReportTable()
|
||||
reporter.report_all(results)
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
|
||||
def test_table_devices(results: ResultManager) -> None:
|
||||
"""Benchmark ReportTable.report_summary_devices()."""
|
||||
reporter = ReportTable()
|
||||
reporter.report_summary_devices(results)
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
|
||||
def test_table_tests(results: ResultManager) -> None:
|
||||
"""Benchmark ReportTable.report_summary_tests()."""
|
||||
reporter = ReportTable()
|
||||
reporter.report_summary_tests(results)
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
|
||||
def test_json(results: ResultManager) -> None:
|
||||
"""Benchmark JSON report."""
|
||||
assert isinstance(results.json, str)
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
|
||||
def test_jinja(results: ResultManager) -> None:
|
||||
"""Benchmark ReportJinja."""
|
||||
assert isinstance(ReportJinja(template_path=DATA_DIR / "template.j2").render(json.loads(results.json)), str)
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
|
||||
def test_csv(results: ResultManager, tmp_path: Path) -> None:
|
||||
"""Benchmark ReportCsv.generate()."""
|
||||
ReportCsv.generate(results=results, csv_filename=tmp_path / "report.csv")
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
@pytest.mark.dependency(depends=["anta_benchmark"], scope="package")
|
||||
def test_markdown(results: ResultManager, tmp_path: Path) -> None:
|
||||
"""Benchmark MDReportGenerator.generate()."""
|
||||
MDReportGenerator.generate(results=results, md_filename=tmp_path / "report.md")
|
|
@ -5,19 +5,21 @@
|
|||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from anta.result_manager import ResultManager
|
||||
from anta.runner import get_coroutines, prepare_tests
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections import defaultdict
|
||||
from collections.abc import Coroutine
|
||||
|
||||
from pytest_codspeed import BenchmarkFixture
|
||||
|
||||
from anta.catalog import AntaCatalog, AntaTestDefinition
|
||||
from anta.device import AntaDevice
|
||||
from anta.inventory import AntaInventory
|
||||
from anta.result_manager.models import TestResult
|
||||
|
||||
|
||||
def test_prepare_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
|
||||
|
@ -40,9 +42,13 @@ def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inven
|
|||
|
||||
assert selected_tests is not None
|
||||
|
||||
coroutines = benchmark(lambda: get_coroutines(selected_tests=selected_tests, manager=ResultManager()))
|
||||
for coros in coroutines:
|
||||
coros.close()
|
||||
def bench() -> list[Coroutine[Any, Any, TestResult]]:
|
||||
coros = get_coroutines(selected_tests=selected_tests, manager=ResultManager())
|
||||
for c in coros:
|
||||
c.close()
|
||||
return coros
|
||||
|
||||
coroutines = benchmark(bench)
|
||||
|
||||
count = sum(len(tests) for tests in selected_tests.values())
|
||||
assert count == len(coroutines)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue