Merging upstream version 1.2.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
ae7b7df396
commit
afeccccd6a
154 changed files with 7346 additions and 5000 deletions
|
@ -4,12 +4,14 @@
|
|||
"""Fixtures for benchmarking ANTA."""
|
||||
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
|
||||
import pytest
|
||||
import respx
|
||||
from _pytest.terminal import TerminalReporter
|
||||
|
||||
from anta.catalog import AntaCatalog
|
||||
from anta.result_manager import ResultManager
|
||||
|
||||
from .utils import AntaMockEnvironment
|
||||
|
||||
|
@ -17,6 +19,12 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
TEST_CASE_COUNT = None
|
||||
|
||||
# Used to globally configure the benchmarks by specifying parameters for inventories
|
||||
BENCHMARK_PARAMETERS = [
|
||||
pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
|
||||
pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture(name="anta_mock_env", scope="session") # We want this fixture to have a scope set to session to avoid reparsing all the unit tests data.
|
||||
def anta_mock_env_fixture() -> AntaMockEnvironment:
|
||||
|
@ -35,6 +43,22 @@ def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog:
|
|||
return anta_mock_env.catalog
|
||||
|
||||
|
||||
@pytest.fixture(name="session_results", scope="session") # We want this fixture to be reused across test modules within tests/benchmark
|
||||
def session_results_fixture() -> defaultdict[str, ResultManager]:
|
||||
"""Return a dictionary of ResultManger objects for the benchmarks.
|
||||
|
||||
The key is the test id as defined in the pytest_generate_tests in this module.
|
||||
Used to pass a populated ResultManager from one benchmark to another.
|
||||
"""
|
||||
return defaultdict(lambda: ResultManager())
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def results(request: pytest.FixtureRequest, session_results: defaultdict[str, ResultManager]) -> ResultManager:
|
||||
"""Return the unique ResultManger object for the current benchmark parameter."""
|
||||
return session_results[request.node.callspec.id]
|
||||
|
||||
|
||||
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
|
||||
"""Display the total number of ANTA unit test cases used to benchmark."""
|
||||
terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases")
|
||||
|
@ -49,9 +73,12 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
|
|||
return
|
||||
metafunc.parametrize(
|
||||
"inventory",
|
||||
[
|
||||
pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
|
||||
pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
|
||||
],
|
||||
BENCHMARK_PARAMETERS,
|
||||
indirect=True,
|
||||
)
|
||||
elif "results" in metafunc.fixturenames:
|
||||
metafunc.parametrize(
|
||||
"results",
|
||||
BENCHMARK_PARAMETERS,
|
||||
indirect=True,
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue