Merging upstream version 1.1.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
50f8dbf7e8
commit
2044ea6182
196 changed files with 10121 additions and 3780 deletions
4
tests/benchmark/__init__.py
Normal file
4
tests/benchmark/__init__.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Benchmark tests for ANTA."""
|
57
tests/benchmark/conftest.py
Normal file
57
tests/benchmark/conftest.py
Normal file
|
@ -0,0 +1,57 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Fixtures for benchmarking ANTA."""
|
||||
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
import respx
|
||||
from _pytest.terminal import TerminalReporter
|
||||
|
||||
from anta.catalog import AntaCatalog
|
||||
|
||||
from .utils import AntaMockEnvironment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TEST_CASE_COUNT = None
|
||||
|
||||
|
||||
@pytest.fixture(name="anta_mock_env", scope="session") # We want this fixture to have a scope set to session to avoid reparsing all the unit tests data.
|
||||
def anta_mock_env_fixture() -> AntaMockEnvironment:
|
||||
"""Return an AntaMockEnvironment for this test session. Also configure respx to mock eAPI responses."""
|
||||
global TEST_CASE_COUNT # noqa: PLW0603
|
||||
eapi_route = respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"})
|
||||
env = AntaMockEnvironment()
|
||||
TEST_CASE_COUNT = env.tests_count
|
||||
eapi_route.side_effect = env.eapi_response
|
||||
return env
|
||||
|
||||
|
||||
@pytest.fixture # This fixture should have a scope set to function as the indexing result is stored in this object
|
||||
def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog:
|
||||
"""Fixture that return an ANTA catalog from the AntaMockEnvironment of this test session."""
|
||||
return anta_mock_env.catalog
|
||||
|
||||
|
||||
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
|
||||
"""Display the total number of ANTA unit test cases used to benchmark."""
|
||||
terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases")
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
|
||||
"""Parametrize inventory for benchmark tests."""
|
||||
if "inventory" in metafunc.fixturenames:
|
||||
for marker in metafunc.definition.iter_markers(name="parametrize"):
|
||||
if "inventory" in marker.args[0]:
|
||||
# Do not override test function parametrize marker for inventory arg
|
||||
return
|
||||
metafunc.parametrize(
|
||||
"inventory",
|
||||
[
|
||||
pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
|
||||
pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
|
||||
],
|
||||
indirect=True,
|
||||
)
|
90
tests/benchmark/test_anta.py
Normal file
90
tests/benchmark/test_anta.py
Normal file
|
@ -0,0 +1,90 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Benchmark tests for ANTA."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import respx
|
||||
from pytest_codspeed import BenchmarkFixture
|
||||
|
||||
from anta.catalog import AntaCatalog
|
||||
from anta.inventory import AntaInventory
|
||||
from anta.result_manager import ResultManager
|
||||
from anta.result_manager.models import AntaTestStatus
|
||||
from anta.runner import main
|
||||
|
||||
from .utils import collect, collect_commands
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def test_anta_dry_run(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
|
||||
"""Benchmark ANTA in Dry-Run Mode."""
|
||||
# Disable logging during ANTA execution to avoid having these function time in benchmarks
|
||||
logging.disable()
|
||||
|
||||
def _() -> ResultManager:
|
||||
manager = ResultManager()
|
||||
catalog.clear_indexes()
|
||||
event_loop.run_until_complete(main(manager, inventory, catalog, dry_run=True))
|
||||
return manager
|
||||
|
||||
manager = benchmark(_)
|
||||
|
||||
logging.disable(logging.NOTSET)
|
||||
if len(manager.results) != len(inventory) * len(catalog.tests):
|
||||
pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(manager.results)}", pytrace=False)
|
||||
bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(manager.results)}\n" "-----------------------------------------------"
|
||||
logger.info(bench_info)
|
||||
|
||||
|
||||
@patch("anta.models.AntaTest.collect", collect)
|
||||
@patch("anta.device.AntaDevice.collect_commands", collect_commands)
|
||||
@respx.mock # Mock eAPI responses
|
||||
def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
|
||||
"""Benchmark ANTA."""
|
||||
# Disable logging during ANTA execution to avoid having these function time in benchmarks
|
||||
logging.disable()
|
||||
|
||||
def _() -> ResultManager:
|
||||
manager = ResultManager()
|
||||
catalog.clear_indexes()
|
||||
event_loop.run_until_complete(main(manager, inventory, catalog))
|
||||
return manager
|
||||
|
||||
manager = benchmark(_)
|
||||
|
||||
logging.disable(logging.NOTSET)
|
||||
|
||||
if len(catalog.tests) * len(inventory) != len(manager.results):
|
||||
# This could mean duplicates exist.
|
||||
# TODO: consider removing this code and refactor unit test data as a dictionary with tuple keys instead of a list
|
||||
seen = set()
|
||||
dupes = []
|
||||
for test in catalog.tests:
|
||||
if test in seen:
|
||||
dupes.append(test)
|
||||
else:
|
||||
seen.add(test)
|
||||
if dupes:
|
||||
for test in dupes:
|
||||
msg = f"Found duplicate in test catalog: {test}"
|
||||
logger.error(msg)
|
||||
pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(manager.results)}", pytrace=False)
|
||||
bench_info = (
|
||||
"\n--- ANTA NRFU Benchmark Information ---\n"
|
||||
f"Test results: {len(manager.results)}\n"
|
||||
f"Success: {manager.get_total_results({AntaTestStatus.SUCCESS})}\n"
|
||||
f"Failure: {manager.get_total_results({AntaTestStatus.FAILURE})}\n"
|
||||
f"Skipped: {manager.get_total_results({AntaTestStatus.SKIPPED})}\n"
|
||||
f"Error: {manager.get_total_results({AntaTestStatus.ERROR})}\n"
|
||||
f"Unset: {manager.get_total_results({AntaTestStatus.UNSET})}\n"
|
||||
"---------------------------------------"
|
||||
)
|
||||
logger.info(bench_info)
|
||||
assert manager.get_total_results({AntaTestStatus.ERROR}) == 0
|
||||
assert manager.get_total_results({AntaTestStatus.UNSET}) == 0
|
48
tests/benchmark/test_runner.py
Normal file
48
tests/benchmark/test_runner.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Benchmark tests for anta.runner."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from anta.result_manager import ResultManager
|
||||
from anta.runner import get_coroutines, prepare_tests
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections import defaultdict
|
||||
|
||||
from pytest_codspeed import BenchmarkFixture
|
||||
|
||||
from anta.catalog import AntaCatalog, AntaTestDefinition
|
||||
from anta.device import AntaDevice
|
||||
from anta.inventory import AntaInventory
|
||||
|
||||
|
||||
def test_prepare_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
|
||||
"""Benchmark `anta.runner.prepare_tests`."""
|
||||
|
||||
def _() -> defaultdict[AntaDevice, set[AntaTestDefinition]] | None:
|
||||
catalog.clear_indexes()
|
||||
return prepare_tests(inventory=inventory, catalog=catalog, tests=None, tags=None)
|
||||
|
||||
selected_tests = benchmark(_)
|
||||
|
||||
assert selected_tests is not None
|
||||
assert len(selected_tests) == len(inventory)
|
||||
assert sum(len(tests) for tests in selected_tests.values()) == len(inventory) * len(catalog.tests)
|
||||
|
||||
|
||||
def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
|
||||
"""Benchmark `anta.runner.get_coroutines`."""
|
||||
selected_tests = prepare_tests(inventory=inventory, catalog=catalog, tests=None, tags=None)
|
||||
|
||||
assert selected_tests is not None
|
||||
|
||||
coroutines = benchmark(lambda: get_coroutines(selected_tests=selected_tests, manager=ResultManager()))
|
||||
for coros in coroutines:
|
||||
coros.close()
|
||||
|
||||
count = sum(len(tests) for tests in selected_tests.values())
|
||||
assert count == len(coroutines)
|
164
tests/benchmark/utils.py
Normal file
164
tests/benchmark/utils.py
Normal file
|
@ -0,0 +1,164 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Utils for the ANTA benchmark tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import copy
|
||||
import importlib
|
||||
import json
|
||||
import pkgutil
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import httpx
|
||||
|
||||
from anta.catalog import AntaCatalog, AntaTestDefinition
|
||||
from anta.models import AntaCommand, AntaTest
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Generator
|
||||
from types import ModuleType
|
||||
|
||||
from anta.device import AntaDevice
|
||||
|
||||
|
||||
async def collect(self: AntaTest) -> None:
|
||||
"""Patched anta.models.AntaTest.collect() method.
|
||||
|
||||
When generating the catalog, we inject a unit test case name in the custom_field input to be able to retrieve the eos_data for this specific test.
|
||||
We use this unit test case name in the eAPI request ID.
|
||||
"""
|
||||
if self.inputs.result_overwrite is None or self.inputs.result_overwrite.custom_field is None:
|
||||
msg = f"The custom_field input is not present for test {self.name}"
|
||||
raise RuntimeError(msg)
|
||||
await self.device.collect_commands(self.instance_commands, collection_id=f"{self.name}:{self.inputs.result_overwrite.custom_field}")
|
||||
|
||||
|
||||
async def collect_commands(self: AntaDevice, commands: list[AntaCommand], collection_id: str) -> None:
|
||||
"""Patched anta.device.AntaDevice.collect_commands() method.
|
||||
|
||||
For the same reason as above, we inject the command index of the test to the eAPI request ID.
|
||||
"""
|
||||
await asyncio.gather(*(self.collect(command=command, collection_id=f"{collection_id}:{idx}") for idx, command in enumerate(commands)))
|
||||
|
||||
|
||||
class AntaMockEnvironment: # pylint: disable=too-few-public-methods
|
||||
"""Generate an ANTA test catalog from the unit tests data. It can be accessed using the `catalog` attribute of this class instance.
|
||||
|
||||
Also provide the attribute 'eos_data_catalog` with the output of all the commands used in the test catalog.
|
||||
|
||||
Each module in `tests.units.anta_tests` has a `DATA` constant.
|
||||
The `DATA` structure is a list of dictionaries used to parametrize the test. The list elements have the following keys:
|
||||
- `name` (str): Test name as displayed by Pytest.
|
||||
- `test` (AntaTest): An AntaTest subclass imported in the test module - e.g. VerifyUptime.
|
||||
- `eos_data` (list[dict]): List of data mocking EOS returned data to be passed to the test.
|
||||
- `inputs` (dict): Dictionary to instantiate the `test` inputs as defined in the class from `test`.
|
||||
|
||||
The keys of `eos_data_catalog` is the tuple (DATA['test'], DATA['name']). The values are `eos_data`.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._catalog, self.eos_data_catalog = self._generate_catalog()
|
||||
self.tests_count = len(self._catalog.tests)
|
||||
|
||||
@property
|
||||
def catalog(self) -> AntaCatalog:
|
||||
"""AntaMockEnvironment object will always return a new AntaCatalog object based on the initial parsing.
|
||||
|
||||
This is because AntaCatalog objects store indexes when tests are run and we want a new object each time a test is run.
|
||||
"""
|
||||
return copy.deepcopy(self._catalog)
|
||||
|
||||
def _generate_catalog(self) -> tuple[AntaCatalog, dict[tuple[str, str], list[dict[str, Any]]]]:
|
||||
"""Generate the `catalog` and `eos_data_catalog` attributes."""
|
||||
|
||||
def import_test_modules() -> Generator[ModuleType, None, None]:
|
||||
"""Yield all test modules from the given package."""
|
||||
package = importlib.import_module("tests.units.anta_tests")
|
||||
prefix = package.__name__ + "."
|
||||
for _, module_name, is_pkg in pkgutil.walk_packages(package.__path__, prefix):
|
||||
if not is_pkg and module_name.split(".")[-1].startswith("test_"):
|
||||
module = importlib.import_module(module_name)
|
||||
if hasattr(module, "DATA"):
|
||||
yield module
|
||||
|
||||
test_definitions = []
|
||||
eos_data_catalog = {}
|
||||
for module in import_test_modules():
|
||||
for test_data in module.DATA:
|
||||
test = test_data["test"]
|
||||
result_overwrite = AntaTest.Input.ResultOverwrite(custom_field=test_data["name"])
|
||||
if test_data["inputs"] is None:
|
||||
inputs = test.Input(result_overwrite=result_overwrite)
|
||||
else:
|
||||
inputs = test.Input(**test_data["inputs"], result_overwrite=result_overwrite)
|
||||
test_definition = AntaTestDefinition(
|
||||
test=test,
|
||||
inputs=inputs,
|
||||
)
|
||||
eos_data_catalog[(test.__name__, test_data["name"])] = test_data["eos_data"]
|
||||
test_definitions.append(test_definition)
|
||||
|
||||
return (AntaCatalog(tests=test_definitions), eos_data_catalog)
|
||||
|
||||
def eapi_response(self, request: httpx.Request) -> httpx.Response:
|
||||
"""Mock eAPI response.
|
||||
|
||||
If the eAPI request ID has the format `ANTA-{test name}:{unit test name}:{command index}-{command ID}`,
|
||||
the function will return the eos_data from the unit test case.
|
||||
|
||||
Otherwise, it will mock 'show version' command or raise an Exception.
|
||||
"""
|
||||
words_count = 3
|
||||
|
||||
def parse_req_id(req_id: str) -> tuple[str, str, int] | None:
|
||||
"""Parse the patched request ID from the eAPI request."""
|
||||
req_id = req_id.removeprefix("ANTA-").rpartition("-")[0]
|
||||
words = req_id.split(":", words_count)
|
||||
if len(words) == words_count:
|
||||
test_name, unit_test_name, command_index = words
|
||||
return test_name, unit_test_name, int(command_index)
|
||||
return None
|
||||
|
||||
jsonrpc = json.loads(request.content)
|
||||
assert jsonrpc["method"] == "runCmds"
|
||||
commands = jsonrpc["params"]["cmds"]
|
||||
ofmt = jsonrpc["params"]["format"]
|
||||
req_id: str = jsonrpc["id"]
|
||||
result = None
|
||||
|
||||
# Extract the test name, unit test name, and command index from the request ID
|
||||
if (words := parse_req_id(req_id)) is not None:
|
||||
test_name, unit_test_name, idx = words
|
||||
|
||||
# This should never happen, but better be safe than sorry
|
||||
if (test_name, unit_test_name) not in self.eos_data_catalog:
|
||||
msg = f"Error while generating a mock response for unit test {unit_test_name} of test {test_name}: eos_data not found"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
eos_data = self.eos_data_catalog[(test_name, unit_test_name)]
|
||||
|
||||
# This could happen if the unit test data is not correctly defined
|
||||
if idx >= len(eos_data):
|
||||
msg = f"Error while generating a mock response for unit test {unit_test_name} of test {test_name}: missing test case in eos_data"
|
||||
raise RuntimeError(msg)
|
||||
result = {"output": eos_data[idx]} if ofmt == "text" else eos_data[idx]
|
||||
elif {"cmd": "show version"} in commands and ofmt == "json":
|
||||
# Mock 'show version' request performed during inventory refresh.
|
||||
result = {
|
||||
"modelName": "pytest",
|
||||
}
|
||||
|
||||
if result is not None:
|
||||
return httpx.Response(
|
||||
status_code=200,
|
||||
json={
|
||||
"jsonrpc": "2.0",
|
||||
"id": req_id,
|
||||
"result": [result],
|
||||
},
|
||||
)
|
||||
msg = f"The following eAPI Request has not been mocked: {jsonrpc}"
|
||||
raise NotImplementedError(msg)
|
Loading…
Add table
Add a link
Reference in a new issue