Adding upstream version 1.1.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-05 11:54:23 +01:00
parent f13b7abbd8
commit 77504588ab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
196 changed files with 10121 additions and 3780 deletions

View file

@ -1,4 +1,4 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Library for ANTA unit tests."""
"""Benchmark tests for ANTA."""

View file

@ -0,0 +1,57 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Fixtures for benchmarking ANTA."""
import logging
import pytest
import respx
from _pytest.terminal import TerminalReporter
from anta.catalog import AntaCatalog
from .utils import AntaMockEnvironment
logger = logging.getLogger(__name__)
TEST_CASE_COUNT = None
@pytest.fixture(name="anta_mock_env", scope="session") # We want this fixture to have a scope set to session to avoid reparsing all the unit tests data.
def anta_mock_env_fixture() -> AntaMockEnvironment:
"""Return an AntaMockEnvironment for this test session. Also configure respx to mock eAPI responses."""
global TEST_CASE_COUNT # noqa: PLW0603
eapi_route = respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"})
env = AntaMockEnvironment()
TEST_CASE_COUNT = env.tests_count
eapi_route.side_effect = env.eapi_response
return env
@pytest.fixture # This fixture should have a scope set to function as the indexing result is stored in this object
def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog:
"""Fixture that return an ANTA catalog from the AntaMockEnvironment of this test session."""
return anta_mock_env.catalog
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
"""Display the total number of ANTA unit test cases used to benchmark."""
terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases")
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Parametrize inventory for benchmark tests."""
if "inventory" in metafunc.fixturenames:
for marker in metafunc.definition.iter_markers(name="parametrize"):
if "inventory" in marker.args[0]:
# Do not override test function parametrize marker for inventory arg
return
metafunc.parametrize(
"inventory",
[
pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
],
indirect=True,
)

View file

@ -0,0 +1,90 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Benchmark tests for ANTA."""
import asyncio
import logging
from unittest.mock import patch
import pytest
import respx
from pytest_codspeed import BenchmarkFixture
from anta.catalog import AntaCatalog
from anta.inventory import AntaInventory
from anta.result_manager import ResultManager
from anta.result_manager.models import AntaTestStatus
from anta.runner import main
from .utils import collect, collect_commands
logger = logging.getLogger(__name__)
def test_anta_dry_run(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark ANTA in Dry-Run Mode."""
# Disable logging during ANTA execution to avoid having these function time in benchmarks
logging.disable()
def _() -> ResultManager:
manager = ResultManager()
catalog.clear_indexes()
event_loop.run_until_complete(main(manager, inventory, catalog, dry_run=True))
return manager
manager = benchmark(_)
logging.disable(logging.NOTSET)
if len(manager.results) != len(inventory) * len(catalog.tests):
pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(manager.results)}", pytrace=False)
bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(manager.results)}\n" "-----------------------------------------------"
logger.info(bench_info)
@patch("anta.models.AntaTest.collect", collect)
@patch("anta.device.AntaDevice.collect_commands", collect_commands)
@respx.mock # Mock eAPI responses
def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark ANTA."""
# Disable logging during ANTA execution to avoid having these function time in benchmarks
logging.disable()
def _() -> ResultManager:
manager = ResultManager()
catalog.clear_indexes()
event_loop.run_until_complete(main(manager, inventory, catalog))
return manager
manager = benchmark(_)
logging.disable(logging.NOTSET)
if len(catalog.tests) * len(inventory) != len(manager.results):
# This could mean duplicates exist.
# TODO: consider removing this code and refactor unit test data as a dictionary with tuple keys instead of a list
seen = set()
dupes = []
for test in catalog.tests:
if test in seen:
dupes.append(test)
else:
seen.add(test)
if dupes:
for test in dupes:
msg = f"Found duplicate in test catalog: {test}"
logger.error(msg)
pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(manager.results)}", pytrace=False)
bench_info = (
"\n--- ANTA NRFU Benchmark Information ---\n"
f"Test results: {len(manager.results)}\n"
f"Success: {manager.get_total_results({AntaTestStatus.SUCCESS})}\n"
f"Failure: {manager.get_total_results({AntaTestStatus.FAILURE})}\n"
f"Skipped: {manager.get_total_results({AntaTestStatus.SKIPPED})}\n"
f"Error: {manager.get_total_results({AntaTestStatus.ERROR})}\n"
f"Unset: {manager.get_total_results({AntaTestStatus.UNSET})}\n"
"---------------------------------------"
)
logger.info(bench_info)
assert manager.get_total_results({AntaTestStatus.ERROR}) == 0
assert manager.get_total_results({AntaTestStatus.UNSET}) == 0

View file

@ -0,0 +1,48 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Benchmark tests for anta.runner."""
from __future__ import annotations
from typing import TYPE_CHECKING
from anta.result_manager import ResultManager
from anta.runner import get_coroutines, prepare_tests
if TYPE_CHECKING:
from collections import defaultdict
from pytest_codspeed import BenchmarkFixture
from anta.catalog import AntaCatalog, AntaTestDefinition
from anta.device import AntaDevice
from anta.inventory import AntaInventory
def test_prepare_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark `anta.runner.prepare_tests`."""
def _() -> defaultdict[AntaDevice, set[AntaTestDefinition]] | None:
catalog.clear_indexes()
return prepare_tests(inventory=inventory, catalog=catalog, tests=None, tags=None)
selected_tests = benchmark(_)
assert selected_tests is not None
assert len(selected_tests) == len(inventory)
assert sum(len(tests) for tests in selected_tests.values()) == len(inventory) * len(catalog.tests)
def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None:
"""Benchmark `anta.runner.get_coroutines`."""
selected_tests = prepare_tests(inventory=inventory, catalog=catalog, tests=None, tags=None)
assert selected_tests is not None
coroutines = benchmark(lambda: get_coroutines(selected_tests=selected_tests, manager=ResultManager()))
for coros in coroutines:
coros.close()
count = sum(len(tests) for tests in selected_tests.values())
assert count == len(coroutines)

164
tests/benchmark/utils.py Normal file
View file

@ -0,0 +1,164 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Utils for the ANTA benchmark tests."""
from __future__ import annotations
import asyncio
import copy
import importlib
import json
import pkgutil
from typing import TYPE_CHECKING, Any
import httpx
from anta.catalog import AntaCatalog, AntaTestDefinition
from anta.models import AntaCommand, AntaTest
if TYPE_CHECKING:
from collections.abc import Generator
from types import ModuleType
from anta.device import AntaDevice
async def collect(self: AntaTest) -> None:
"""Patched anta.models.AntaTest.collect() method.
When generating the catalog, we inject a unit test case name in the custom_field input to be able to retrieve the eos_data for this specific test.
We use this unit test case name in the eAPI request ID.
"""
if self.inputs.result_overwrite is None or self.inputs.result_overwrite.custom_field is None:
msg = f"The custom_field input is not present for test {self.name}"
raise RuntimeError(msg)
await self.device.collect_commands(self.instance_commands, collection_id=f"{self.name}:{self.inputs.result_overwrite.custom_field}")
async def collect_commands(self: AntaDevice, commands: list[AntaCommand], collection_id: str) -> None:
"""Patched anta.device.AntaDevice.collect_commands() method.
For the same reason as above, we inject the command index of the test to the eAPI request ID.
"""
await asyncio.gather(*(self.collect(command=command, collection_id=f"{collection_id}:{idx}") for idx, command in enumerate(commands)))
class AntaMockEnvironment: # pylint: disable=too-few-public-methods
"""Generate an ANTA test catalog from the unit tests data. It can be accessed using the `catalog` attribute of this class instance.
Also provide the attribute 'eos_data_catalog` with the output of all the commands used in the test catalog.
Each module in `tests.units.anta_tests` has a `DATA` constant.
The `DATA` structure is a list of dictionaries used to parametrize the test. The list elements have the following keys:
- `name` (str): Test name as displayed by Pytest.
- `test` (AntaTest): An AntaTest subclass imported in the test module - e.g. VerifyUptime.
- `eos_data` (list[dict]): List of data mocking EOS returned data to be passed to the test.
- `inputs` (dict): Dictionary to instantiate the `test` inputs as defined in the class from `test`.
The keys of `eos_data_catalog` is the tuple (DATA['test'], DATA['name']). The values are `eos_data`.
"""
def __init__(self) -> None:
self._catalog, self.eos_data_catalog = self._generate_catalog()
self.tests_count = len(self._catalog.tests)
@property
def catalog(self) -> AntaCatalog:
"""AntaMockEnvironment object will always return a new AntaCatalog object based on the initial parsing.
This is because AntaCatalog objects store indexes when tests are run and we want a new object each time a test is run.
"""
return copy.deepcopy(self._catalog)
def _generate_catalog(self) -> tuple[AntaCatalog, dict[tuple[str, str], list[dict[str, Any]]]]:
"""Generate the `catalog` and `eos_data_catalog` attributes."""
def import_test_modules() -> Generator[ModuleType, None, None]:
"""Yield all test modules from the given package."""
package = importlib.import_module("tests.units.anta_tests")
prefix = package.__name__ + "."
for _, module_name, is_pkg in pkgutil.walk_packages(package.__path__, prefix):
if not is_pkg and module_name.split(".")[-1].startswith("test_"):
module = importlib.import_module(module_name)
if hasattr(module, "DATA"):
yield module
test_definitions = []
eos_data_catalog = {}
for module in import_test_modules():
for test_data in module.DATA:
test = test_data["test"]
result_overwrite = AntaTest.Input.ResultOverwrite(custom_field=test_data["name"])
if test_data["inputs"] is None:
inputs = test.Input(result_overwrite=result_overwrite)
else:
inputs = test.Input(**test_data["inputs"], result_overwrite=result_overwrite)
test_definition = AntaTestDefinition(
test=test,
inputs=inputs,
)
eos_data_catalog[(test.__name__, test_data["name"])] = test_data["eos_data"]
test_definitions.append(test_definition)
return (AntaCatalog(tests=test_definitions), eos_data_catalog)
def eapi_response(self, request: httpx.Request) -> httpx.Response:
"""Mock eAPI response.
If the eAPI request ID has the format `ANTA-{test name}:{unit test name}:{command index}-{command ID}`,
the function will return the eos_data from the unit test case.
Otherwise, it will mock 'show version' command or raise an Exception.
"""
words_count = 3
def parse_req_id(req_id: str) -> tuple[str, str, int] | None:
"""Parse the patched request ID from the eAPI request."""
req_id = req_id.removeprefix("ANTA-").rpartition("-")[0]
words = req_id.split(":", words_count)
if len(words) == words_count:
test_name, unit_test_name, command_index = words
return test_name, unit_test_name, int(command_index)
return None
jsonrpc = json.loads(request.content)
assert jsonrpc["method"] == "runCmds"
commands = jsonrpc["params"]["cmds"]
ofmt = jsonrpc["params"]["format"]
req_id: str = jsonrpc["id"]
result = None
# Extract the test name, unit test name, and command index from the request ID
if (words := parse_req_id(req_id)) is not None:
test_name, unit_test_name, idx = words
# This should never happen, but better be safe than sorry
if (test_name, unit_test_name) not in self.eos_data_catalog:
msg = f"Error while generating a mock response for unit test {unit_test_name} of test {test_name}: eos_data not found"
raise RuntimeError(msg)
eos_data = self.eos_data_catalog[(test_name, unit_test_name)]
# This could happen if the unit test data is not correctly defined
if idx >= len(eos_data):
msg = f"Error while generating a mock response for unit test {unit_test_name} of test {test_name}: missing test case in eos_data"
raise RuntimeError(msg)
result = {"output": eos_data[idx]} if ofmt == "text" else eos_data[idx]
elif {"cmd": "show version"} in commands and ofmt == "json":
# Mock 'show version' request performed during inventory refresh.
result = {
"modelName": "pytest",
}
if result is not None:
return httpx.Response(
status_code=200,
json={
"jsonrpc": "2.0",
"id": req_id,
"result": [result],
},
)
msg = f"The following eAPI Request has not been mocked: {jsonrpc}"
raise NotImplementedError(msg)

View file

@ -1,53 +1,58 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""conftest.py - used to store anta specific fixtures used for tests."""
"""See https://docs.pytest.org/en/stable/reference/fixtures.html#conftest-py-sharing-fixtures-across-multiple-files."""
from __future__ import annotations
import logging
from typing import Any
import asyncio
from collections.abc import Iterator
from pathlib import Path
from unittest.mock import AsyncMock, Mock, patch
import pytest
import respx
# Load fixtures from dedicated file tests/lib/fixture.py
# As well as pytest_asyncio plugin to test co-routines
pytest_plugins = [
"tests.lib.fixture",
"pytest_asyncio",
]
from anta.device import AsyncEOSDevice
from anta.inventory import AntaInventory
# Enable nice assert messages
# https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#assertion-rewriting
pytest.register_assert_rewrite("tests.lib.anta")
# Placeholder to disable logging of some external libs
for _ in ("asyncio", "httpx"):
logging.getLogger(_).setLevel(logging.CRITICAL)
DATA_DIR: Path = Path(__file__).parent.resolve() / "data"
def build_test_id(val: dict[str, Any]) -> str:
"""Build id for a unit test of an AntaTest subclass.
{
"name": "meaniful test name",
"test": <AntaTest instance>,
...
}
"""
return f"{val['test'].module}.{val['test'].__name__}-{val['name']}"
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Generate ANTA testts unit tests dynamically during test collection.
It will parametrize test cases based on the `DATA` data structure defined in `tests.units.anta_tests` modules.
See `tests/units/anta_tests/README.md` for more information on how to use it.
Test IDs are generated using the `build_test_id` function above.
Checking that only the function "test" is parametrized with data to allow for writing tests for helper functions
in each module.
"""
if "tests.units.anta_tests" in metafunc.module.__package__ and metafunc.function.__name__ == "test":
# This is a unit test for an AntaTest subclass
metafunc.parametrize("data", metafunc.module.DATA, ids=build_test_id)
@pytest.fixture
def inventory(request: pytest.FixtureRequest) -> Iterator[AntaInventory]:
"""Generate an ANTA inventory."""
user = "admin"
password = "password" # noqa: S105
params = request.param if hasattr(request, "param") else {}
count = params.get("count", 1)
disable_cache = params.get("disable_cache", True)
reachable = params.get("reachable", True)
if "filename" in params:
inv = AntaInventory.parse(DATA_DIR / params["filename"], username=user, password=password, disable_cache=disable_cache)
else:
inv = AntaInventory()
for i in range(count):
inv.add_device(
AsyncEOSDevice(
host=f"device-{i}.anta.arista.com",
username=user,
password=password,
name=f"device-{i}",
disable_cache=disable_cache,
)
)
if reachable:
# This context manager makes all devices reachable
with patch("asyncio.open_connection", AsyncMock(spec=asyncio.open_connection, return_value=(Mock(), Mock()))), respx.mock:
respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="show version").respond(
json={
"result": [
{
"modelName": "pytest",
}
],
}
)
yield inv
else:
with patch("asyncio.open_connection", AsyncMock(spec=asyncio.open_connection, side_effect=TimeoutError)):
yield inv

View file

@ -1,259 +0,0 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
# pylint: skip-file
"""JSON Data for unit tests."""
INVENTORY_MODEL_HOST_VALID = [
{"name": "validIPv4", "input": "1.1.1.1", "expected_result": "valid"},
{
"name": "validIPv6",
"input": "fe80::cc62:a9ff:feef:932a",
},
]
INVENTORY_MODEL_HOST_INVALID = [
{
"name": "invalidIPv4_with_netmask",
"input": "1.1.1.1/32",
},
{
"name": "invalidIPv6_with_netmask",
"input": "fe80::cc62:a9ff:feef:932a/128",
},
{"name": "invalidHost_format", "input": "@", "expected_result": "invalid"},
{
"name": "invalidIPv6_format",
"input": "fe80::cc62:a9ff:feef:",
},
]
INVENTORY_MODEL_HOST_CACHE = [
{"name": "Host cache default", "input": {"host": "1.1.1.1"}, "expected_result": False},
{"name": "Host cache enabled", "input": {"host": "1.1.1.1", "disable_cache": False}, "expected_result": False},
{"name": "Host cache disabled", "input": {"host": "1.1.1.1", "disable_cache": True}, "expected_result": True},
]
INVENTORY_MODEL_NETWORK_VALID = [
{"name": "ValidIPv4_Subnet", "input": "1.1.1.0/24", "expected_result": "valid"},
{"name": "ValidIPv6_Subnet", "input": "2001:db8::/32", "expected_result": "valid"},
]
INVENTORY_MODEL_NETWORK_INVALID = [
{"name": "ValidIPv4_Subnet", "input": "1.1.1.0/17", "expected_result": "invalid"},
{
"name": "InvalidIPv6_Subnet",
"input": "2001:db8::/16",
"expected_result": "invalid",
},
]
INVENTORY_MODEL_NETWORK_CACHE = [
{"name": "Network cache default", "input": {"network": "1.1.1.0/24"}, "expected_result": False},
{"name": "Network cache enabled", "input": {"network": "1.1.1.0/24", "disable_cache": False}, "expected_result": False},
{"name": "Network cache disabled", "input": {"network": "1.1.1.0/24", "disable_cache": True}, "expected_result": True},
]
INVENTORY_MODEL_RANGE_VALID = [
{
"name": "ValidIPv4_Range",
"input": {"start": "10.1.0.1", "end": "10.1.0.10"},
"expected_result": "valid",
},
]
INVENTORY_MODEL_RANGE_INVALID = [
{
"name": "InvalidIPv4_Range_name",
"input": {"start": "toto", "end": "10.1.0.1"},
"expected_result": "invalid",
},
]
INVENTORY_MODEL_RANGE_CACHE = [
{"name": "Range cache default", "input": {"start": "1.1.1.1", "end": "1.1.1.10"}, "expected_result": False},
{"name": "Range cache enabled", "input": {"start": "1.1.1.1", "end": "1.1.1.10", "disable_cache": False}, "expected_result": False},
{"name": "Range cache disabled", "input": {"start": "1.1.1.1", "end": "1.1.1.10", "disable_cache": True}, "expected_result": True},
]
INVENTORY_MODEL_VALID = [
{
"name": "Valid_Host_Only",
"input": {"hosts": [{"host": "192.168.0.17"}, {"host": "192.168.0.2"}]},
"expected_result": "valid",
},
{
"name": "Valid_Networks_Only",
"input": {"networks": [{"network": "192.168.0.0/16"}, {"network": "192.168.1.0/24"}]},
"expected_result": "valid",
},
{
"name": "Valid_Ranges_Only",
"input": {
"ranges": [
{"start": "10.1.0.1", "end": "10.1.0.10"},
{"start": "10.2.0.1", "end": "10.2.1.10"},
],
},
"expected_result": "valid",
},
]
INVENTORY_MODEL_INVALID = [
{
"name": "Host_with_Invalid_entry",
"input": {"hosts": [{"host": "192.168.0.17"}, {"host": "192.168.0.2/32"}]},
"expected_result": "invalid",
},
]
INVENTORY_DEVICE_MODEL_VALID = [
{
"name": "Valid_Inventory",
"input": [{"host": "1.1.1.1", "username": "arista", "password": "arista123!"}, {"host": "1.1.1.2", "username": "arista", "password": "arista123!"}],
"expected_result": "valid",
},
]
INVENTORY_DEVICE_MODEL_INVALID = [
{
"name": "Invalid_Inventory",
"input": [{"host": "1.1.1.1", "password": "arista123!"}, {"host": "1.1.1.1", "username": "arista"}],
"expected_result": "invalid",
},
]
ANTA_INVENTORY_TESTS_VALID = [
{
"name": "ValidInventory_with_host_only",
"input": {"anta_inventory": {"hosts": [{"host": "192.168.0.17"}, {"host": "192.168.0.2"}, {"host": "my.awesome.host.com"}]}},
"expected_result": "valid",
"parameters": {
"ipaddress_in_scope": "192.168.0.17",
"ipaddress_out_of_scope": "192.168.1.1",
"nb_hosts": 2,
},
},
{
"name": "ValidInventory_with_networks_only",
"input": {"anta_inventory": {"networks": [{"network": "192.168.0.0/24"}]}},
"expected_result": "valid",
"parameters": {
"ipaddress_in_scope": "192.168.0.1",
"ipaddress_out_of_scope": "192.168.1.1",
"nb_hosts": 256,
},
},
{
"name": "ValidInventory_with_ranges_only",
"input": {
"anta_inventory": {
"ranges": [
{"start": "10.0.0.1", "end": "10.0.0.11"},
{"start": "10.0.0.101", "end": "10.0.0.111"},
],
},
},
"expected_result": "valid",
"parameters": {
"ipaddress_in_scope": "10.0.0.10",
"ipaddress_out_of_scope": "192.168.1.1",
"nb_hosts": 22,
},
},
{
"name": "ValidInventory_with_host_port",
"input": {"anta_inventory": {"hosts": [{"host": "192.168.0.17", "port": 443}, {"host": "192.168.0.2", "port": 80}]}},
"expected_result": "valid",
"parameters": {
"ipaddress_in_scope": "192.168.0.17",
"ipaddress_out_of_scope": "192.168.1.1",
"nb_hosts": 2,
},
},
{
"name": "ValidInventory_with_host_tags",
"input": {"anta_inventory": {"hosts": [{"host": "192.168.0.17", "tags": ["leaf"]}, {"host": "192.168.0.2", "tags": ["spine"]}]}},
"expected_result": "valid",
"parameters": {
"ipaddress_in_scope": "192.168.0.17",
"ipaddress_out_of_scope": "192.168.1.1",
"nb_hosts": 2,
},
},
{
"name": "ValidInventory_with_networks_tags",
"input": {"anta_inventory": {"networks": [{"network": "192.168.0.0/24", "tags": ["leaf"]}]}},
"expected_result": "valid",
"parameters": {
"ipaddress_in_scope": "192.168.0.1",
"ipaddress_out_of_scope": "192.168.1.1",
"nb_hosts": 256,
},
},
{
"name": "ValidInventory_with_ranges_tags",
"input": {
"anta_inventory": {
"ranges": [
{"start": "10.0.0.1", "end": "10.0.0.11", "tags": ["leaf"]},
{"start": "10.0.0.101", "end": "10.0.0.111", "tags": ["spine"]},
],
},
},
"expected_result": "valid",
"parameters": {
"ipaddress_in_scope": "10.0.0.10",
"ipaddress_out_of_scope": "192.168.1.1",
"nb_hosts": 22,
},
},
]
ANTA_INVENTORY_TESTS_INVALID = [
{
"name": "InvalidInventory_with_host_only",
"input": {"anta_inventory": {"hosts": [{"host": "192.168.0.17/32"}, {"host": "192.168.0.2"}]}},
"expected_result": "invalid",
},
{
"name": "InvalidInventory_wrong_network_bits",
"input": {"anta_inventory": {"networks": [{"network": "192.168.42.0/8"}]}},
"expected_result": "invalid",
},
{
"name": "InvalidInventory_wrong_network",
"input": {"anta_inventory": {"networks": [{"network": "toto"}]}},
"expected_result": "invalid",
},
{
"name": "InvalidInventory_wrong_range",
"input": {"anta_inventory": {"ranges": [{"start": "toto", "end": "192.168.42.42"}]}},
"expected_result": "invalid",
},
{
"name": "InvalidInventory_wrong_range_type_mismatch",
"input": {"anta_inventory": {"ranges": [{"start": "fe80::cafe", "end": "192.168.42.42"}]}},
"expected_result": "invalid",
},
{
"name": "Invalid_Root_Key",
"input": {
"inventory": {
"ranges": [
{"start": "10.0.0.1", "end": "10.0.0.11"},
{"start": "10.0.0.100", "end": "10.0.0.111"},
],
},
},
"expected_result": "invalid",
},
]
TEST_RESULT_SET_STATUS = [
{"name": "set_success", "target": "success", "message": "success"},
{"name": "set_error", "target": "error", "message": "error"},
{"name": "set_failure", "target": "failure", "message": "failure"},
{"name": "set_skipped", "target": "skipped", "message": "skipped"},
{"name": "set_unset", "target": "unset", "message": "unset"},
]

View file

@ -0,0 +1,11 @@
{
"anta.tests.software": [
{
"VerifyEOSVersion": {
"versions": [
"4.31.1F"
]
}
}
]
}

View file

@ -0,0 +1 @@
{aasas"anta.tests.software":[{"VerifyEOSVersion":{"versions":["4.31.1F"]}}]}

View file

@ -3,30 +3,28 @@ anta.tests.system:
- VerifyUptime:
minimum: 10
filters:
tags: ['fabric']
tags: ['spine']
- VerifyUptime:
minimum: 9
filters:
tags: ['leaf']
- VerifyReloadCause:
filters:
tags: ['leaf', 'spine']
tags: ['spine', 'leaf']
- VerifyCoredump:
- VerifyAgentLogs:
- VerifyCPUUtilization:
filters:
tags: ['leaf']
- VerifyMemoryUtilization:
filters:
tags: ['testdevice']
- VerifyFileSystemUtilization:
- VerifyNTP:
anta.tests.mlag:
- VerifyMlagStatus:
filters:
tags: ['leaf']
anta.tests.interfaces:
- VerifyL3MTU:
mtu: 1500
filters:
tags: ['demo']
tags: ['spine']

View file

@ -1,12 +0,0 @@
---
anta_inventory:
hosts:
- name: dummy
host: dummy.anta.ninja
tags: ["leaf"]
- name: dummy2
host: dummy2.anta.ninja
tags: ["leaf"]
- name: dummy3
host: dummy3.anta.ninja
tags: ["spine"]

View file

@ -0,0 +1,12 @@
---
anta_inventory:
hosts:
- name: leaf1
host: leaf1.anta.arista.com
tags: ["leaf"]
- name: leaf2
host: leaf2.anta.arista.com
tags: ["leaf"]
- name: spine1
host: spine1.anta.arista.com
tags: ["spine"]

View file

@ -0,0 +1,79 @@
# ANTA Report
**Table of Contents:**
- [ANTA Report](#anta-report)
- [Test Results Summary](#test-results-summary)
- [Summary Totals](#summary-totals)
- [Summary Totals Device Under Test](#summary-totals-device-under-test)
- [Summary Totals Per Category](#summary-totals-per-category)
- [Test Results](#test-results)
## Test Results Summary
### Summary Totals
| Total Tests | Total Tests Success | Total Tests Skipped | Total Tests Failure | Total Tests Error |
| ----------- | ------------------- | ------------------- | ------------------- | ------------------|
| 30 | 7 | 2 | 19 | 2 |
### Summary Totals Device Under Test
| Device Under Test | Total Tests | Tests Success | Tests Skipped | Tests Failure | Tests Error | Categories Skipped | Categories Failed |
| ------------------| ----------- | ------------- | ------------- | ------------- | ----------- | -------------------| ------------------|
| DC1-SPINE1 | 15 | 2 | 2 | 10 | 1 | MLAG, VXLAN | AAA, BFD, BGP, Connectivity, Routing, SNMP, STP, Services, Software, System |
| DC1-LEAF1A | 15 | 5 | 0 | 9 | 1 | - | AAA, BFD, BGP, Connectivity, SNMP, STP, Services, Software, System |
### Summary Totals Per Category
| Test Category | Total Tests | Tests Success | Tests Skipped | Tests Failure | Tests Error |
| ------------- | ----------- | ------------- | ------------- | ------------- | ----------- |
| AAA | 2 | 0 | 0 | 2 | 0 |
| BFD | 2 | 0 | 0 | 2 | 0 |
| BGP | 2 | 0 | 0 | 2 | 0 |
| Connectivity | 4 | 0 | 0 | 2 | 2 |
| Interfaces | 2 | 2 | 0 | 0 | 0 |
| MLAG | 2 | 1 | 1 | 0 | 0 |
| Routing | 2 | 1 | 0 | 1 | 0 |
| SNMP | 2 | 0 | 0 | 2 | 0 |
| STP | 2 | 0 | 0 | 2 | 0 |
| Security | 2 | 2 | 0 | 0 | 0 |
| Services | 2 | 0 | 0 | 2 | 0 |
| Software | 2 | 0 | 0 | 2 | 0 |
| System | 2 | 0 | 0 | 2 | 0 |
| VXLAN | 2 | 1 | 1 | 0 | 0 |
## Test Results
| Device Under Test | Categories | Test | Description | Custom Field | Result | Messages |
| ----------------- | ---------- | ---- | ----------- | ------------ | ------ | -------- |
| DC1-LEAF1A | BFD | VerifyBFDSpecificPeers | Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF. | - | failure | Following BFD peers are not configured, status is not up or remote disc is zero: {'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}} |
| DC1-LEAF1A | BGP | VerifyBGPPeerCount | Verifies the count of BGP peers. | - | failure | Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Expected: 2, Actual: 1'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Expected: 3, Actual: 0'}}] |
| DC1-LEAF1A | Software | VerifyEOSVersion | Verifies the EOS version of the device. | - | failure | device is running version "4.31.1F-34554157.4311F (engineering build)" not in expected versions: ['4.25.4M', '4.26.1F'] |
| DC1-LEAF1A | Services | VerifyHostname | Verifies the hostname of a device. | - | failure | Expected 's1-spine1' as the hostname, but found 'DC1-LEAF1A' instead. |
| DC1-LEAF1A | Interfaces | VerifyInterfaceUtilization | Verifies that the utilization of interfaces is below a certain threshold. | - | success | - |
| DC1-LEAF1A | Connectivity | VerifyLLDPNeighbors | Verifies that the provided LLDP neighbors are connected properly. | - | failure | Wrong LLDP neighbor(s) on port(s): Ethernet1 DC1-SPINE1_Ethernet1 Ethernet2 DC1-SPINE2_Ethernet1 Port(s) not configured: Ethernet7 |
| DC1-LEAF1A | MLAG | VerifyMlagStatus | Verifies the health status of the MLAG configuration. | - | success | - |
| DC1-LEAF1A | System | VerifyNTP | Verifies if NTP is synchronised. | - | failure | The device is not synchronized with the configured NTP server(s): 'NTP is disabled.' |
| DC1-LEAF1A | Connectivity | VerifyReachability | Test the network reachability to one or many destination IP(s). | - | error | ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1 |
| DC1-LEAF1A | Routing | VerifyRoutingTableEntry | Verifies that the provided routes are present in the routing table of a specified VRF. | - | success | - |
| DC1-LEAF1A | STP | VerifySTPMode | Verifies the configured STP mode for a provided list of VLAN(s). | - | failure | Wrong STP mode configured for the following VLAN(s): [10, 20] |
| DC1-LEAF1A | SNMP | VerifySnmpStatus | Verifies if the SNMP agent is enabled. | - | failure | SNMP agent disabled in vrf default |
| DC1-LEAF1A | AAA | VerifyTacacsSourceIntf | Verifies TACACS source-interface for a specified VRF. | - | failure | Source-interface Management0 is not configured in VRF default |
| DC1-LEAF1A | Security | VerifyTelnetStatus | Verifies if Telnet is disabled in the default VRF. | - | success | - |
| DC1-LEAF1A | VXLAN | VerifyVxlan1Interface | Verifies the Vxlan1 interface status. | - | success | - |
| DC1-SPINE1 | BFD | VerifyBFDSpecificPeers | Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF. | - | failure | Following BFD peers are not configured, status is not up or remote disc is zero: {'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}} |
| DC1-SPINE1 | BGP | VerifyBGPPeerCount | Verifies the count of BGP peers. | - | failure | Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Not Configured', 'default': 'Expected: 3, Actual: 4'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Not Configured'}}, {'afi': 'evpn', 'vrfs': {'default': 'Expected: 2, Actual: 4'}}] |
| DC1-SPINE1 | Software | VerifyEOSVersion | Verifies the EOS version of the device. | - | failure | device is running version "4.31.1F-34554157.4311F (engineering build)" not in expected versions: ['4.25.4M', '4.26.1F'] |
| DC1-SPINE1 | Services | VerifyHostname | Verifies the hostname of a device. | - | failure | Expected 's1-spine1' as the hostname, but found 'DC1-SPINE1' instead. |
| DC1-SPINE1 | Interfaces | VerifyInterfaceUtilization | Verifies that the utilization of interfaces is below a certain threshold. | - | success | - |
| DC1-SPINE1 | Connectivity | VerifyLLDPNeighbors | Verifies that the provided LLDP neighbors are connected properly. | - | failure | Wrong LLDP neighbor(s) on port(s): Ethernet1 DC1-LEAF1A_Ethernet1 Ethernet2 DC1-LEAF1B_Ethernet1 Port(s) not configured: Ethernet7 |
| DC1-SPINE1 | MLAG | VerifyMlagStatus | Verifies the health status of the MLAG configuration. | - | skipped | MLAG is disabled |
| DC1-SPINE1 | System | VerifyNTP | Verifies if NTP is synchronised. | - | failure | The device is not synchronized with the configured NTP server(s): 'NTP is disabled.' |
| DC1-SPINE1 | Connectivity | VerifyReachability | Test the network reachability to one or many destination IP(s). | - | error | ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1 |
| DC1-SPINE1 | Routing | VerifyRoutingTableEntry | Verifies that the provided routes are present in the routing table of a specified VRF. | - | failure | The following route(s) are missing from the routing table of VRF default: ['10.1.0.2'] |
| DC1-SPINE1 | STP | VerifySTPMode | Verifies the configured STP mode for a provided list of VLAN(s). | - | failure | STP mode 'rapidPvst' not configured for the following VLAN(s): [10, 20] |
| DC1-SPINE1 | SNMP | VerifySnmpStatus | Verifies if the SNMP agent is enabled. | - | failure | SNMP agent disabled in vrf default |
| DC1-SPINE1 | AAA | VerifyTacacsSourceIntf | Verifies TACACS source-interface for a specified VRF. | - | failure | Source-interface Management0 is not configured in VRF default |
| DC1-SPINE1 | Security | VerifyTelnetStatus | Verifies if Telnet is disabled in the default VRF. | - | success | - |
| DC1-SPINE1 | VXLAN | VerifyVxlan1Interface | Verifies the Vxlan1 interface status. | - | skipped | Vxlan1 interface is not configured |

View file

@ -1,16 +0,0 @@
anta_inventory:
hosts:
- host: 10.73.1.238
name: cv_atd1
- host: 192.168.0.10
name: spine1
- host: 192.168.0.11
name: spine2
- host: 192.168.0.12
name: leaf1
- host: 192.168.0.13
name: leaf2
- host: 192.168.0.14
name: leaf3
- host: 192.168.0.15
name: leaf4

View file

@ -1,34 +0,0 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""generic test function used to generate unit tests for each AntaTest."""
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from anta.device import AntaDevice
def test(device: AntaDevice, data: dict[str, Any]) -> None:
"""Generic test function for AntaTest subclass.
See `tests/units/anta_tests/README.md` for more information on how to use it.
"""
# Instantiate the AntaTest subclass
test_instance = data["test"](device, inputs=data["inputs"], eos_data=data["eos_data"])
# Run the test() method
asyncio.run(test_instance.test())
# Assert expected result
assert test_instance.result.result == data["expected"]["result"], test_instance.result.messages
if "messages" in data["expected"]:
# We expect messages in test result
assert len(test_instance.result.messages) == len(data["expected"]["messages"])
# Test will pass if the expected message is included in the test result message
for message, expected in zip(test_instance.result.messages, data["expected"]["messages"]): # NOTE: zip(strict=True) has been added in Python 3.10
assert expected in message
else:
# Test result should not have messages
assert test_instance.result.messages == []

View file

@ -1,244 +0,0 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Fixture for Anta Testing."""
from __future__ import annotations
import logging
import shutil
from typing import TYPE_CHECKING, Any, Callable
from unittest.mock import patch
import pytest
from click.testing import CliRunner, Result
import asynceapi
from anta.cli.console import console
from anta.device import AntaDevice, AsyncEOSDevice
from anta.inventory import AntaInventory
from anta.result_manager import ResultManager
from anta.result_manager.models import TestResult
from tests.lib.utils import default_anta_env
if TYPE_CHECKING:
from collections.abc import Iterator
from pathlib import Path
from anta.models import AntaCommand
logger = logging.getLogger(__name__)
DEVICE_HW_MODEL = "pytest"
DEVICE_NAME = "pytest"
COMMAND_OUTPUT = "retrieved"
MOCK_CLI_JSON: dict[str, asynceapi.EapiCommandError | dict[str, Any]] = {
"show version": {
"modelName": "DCS-7280CR3-32P4-F",
"version": "4.31.1F",
},
"enable": {},
"clear counters": {},
"clear hardware counter drop": {},
"undefined": asynceapi.EapiCommandError(
passed=[],
failed="show version",
errors=["Authorization denied for command 'show version'"],
errmsg="Invalid command",
not_exec=[],
),
}
MOCK_CLI_TEXT: dict[str, asynceapi.EapiCommandError | str] = {
"show version": "Arista cEOSLab",
"bash timeout 10 ls -1t /mnt/flash/schedule/tech-support": "dummy_tech-support_2023-12-01.1115.log.gz\ndummy_tech-support_2023-12-01.1015.log.gz",
"bash timeout 10 ls -1t /mnt/flash/schedule/tech-support | head -1": "dummy_tech-support_2023-12-01.1115.log.gz",
"show running-config | include aaa authorization exec default": "aaa authorization exec default local",
}
@pytest.fixture()
def device(request: pytest.FixtureRequest) -> Iterator[AntaDevice]:
"""Return an AntaDevice instance with mocked abstract method."""
def _collect(command: AntaCommand, *args: Any, **kwargs: Any) -> None: # noqa: ARG001, ANN401 #pylint: disable=unused-argument
command.output = COMMAND_OUTPUT
kwargs = {"name": DEVICE_NAME, "hw_model": DEVICE_HW_MODEL}
if hasattr(request, "param"):
# Fixture is parametrized indirectly
kwargs.update(request.param)
with patch.object(AntaDevice, "__abstractmethods__", set()), patch("anta.device.AntaDevice._collect", side_effect=_collect):
# AntaDevice constructor does not have hw_model argument
hw_model = kwargs.pop("hw_model")
dev = AntaDevice(**kwargs) # type: ignore[abstract, arg-type] # pylint: disable=abstract-class-instantiated, unexpected-keyword-arg
dev.hw_model = hw_model
yield dev
@pytest.fixture()
def test_inventory() -> AntaInventory:
"""Return the test_inventory."""
env = default_anta_env()
assert env["ANTA_INVENTORY"]
assert env["ANTA_USERNAME"]
assert env["ANTA_PASSWORD"] is not None
return AntaInventory.parse(
filename=env["ANTA_INVENTORY"],
username=env["ANTA_USERNAME"],
password=env["ANTA_PASSWORD"],
)
# tests.unit.test_device.py fixture
@pytest.fixture()
def async_device(request: pytest.FixtureRequest) -> AsyncEOSDevice:
"""Return an AsyncEOSDevice instance."""
kwargs = {
"name": DEVICE_NAME,
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
}
if hasattr(request, "param"):
# Fixture is parametrized indirectly
kwargs.update(request.param)
return AsyncEOSDevice(**kwargs) # type: ignore[arg-type]
# tests.units.result_manager fixtures
@pytest.fixture()
def test_result_factory(device: AntaDevice) -> Callable[[int], TestResult]:
"""Return a anta.result_manager.models.TestResult object."""
# pylint: disable=redefined-outer-name
def _create(index: int = 0) -> TestResult:
"""Actual Factory."""
return TestResult(
name=device.name,
test=f"VerifyTest{index}",
categories=["test"],
description=f"Verifies Test {index}",
custom_field=None,
)
return _create
@pytest.fixture()
def list_result_factory(test_result_factory: Callable[[int], TestResult]) -> Callable[[int], list[TestResult]]:
"""Return a list[TestResult] with 'size' TestResult instantiated using the test_result_factory fixture."""
# pylint: disable=redefined-outer-name
def _factory(size: int = 0) -> list[TestResult]:
"""Create a factory for list[TestResult] entry of size entries."""
return [test_result_factory(i) for i in range(size)]
return _factory
@pytest.fixture()
def result_manager_factory(list_result_factory: Callable[[int], list[TestResult]]) -> Callable[[int], ResultManager]:
"""Return a ResultManager factory that takes as input a number of tests."""
# pylint: disable=redefined-outer-name
def _factory(number: int = 0) -> ResultManager:
"""Create a factory for list[TestResult] entry of size entries."""
result_manager = ResultManager()
result_manager.results = list_result_factory(number)
return result_manager
return _factory
# tests.units.cli fixtures
@pytest.fixture()
def temp_env(tmp_path: Path) -> dict[str, str | None]:
"""Fixture that create a temporary ANTA inventory.
The inventory can be overridden and returns the corresponding environment variables.
"""
env = default_anta_env()
anta_inventory = str(env["ANTA_INVENTORY"])
temp_inventory = tmp_path / "test_inventory.yml"
shutil.copy(anta_inventory, temp_inventory)
env["ANTA_INVENTORY"] = str(temp_inventory)
return env
@pytest.fixture()
# Disabling C901 - too complex as we like our runner like this
def click_runner(capsys: pytest.CaptureFixture[str]) -> Iterator[CliRunner]: # noqa: C901
"""Return a click.CliRunner for cli testing."""
class AntaCliRunner(CliRunner):
"""Override CliRunner to inject specific variables for ANTA."""
def invoke(
self,
*args: Any, # noqa: ANN401
**kwargs: Any, # noqa: ANN401
) -> Result:
# Inject default env if not provided
kwargs["env"] = kwargs["env"] if "env" in kwargs else default_anta_env()
# Deterministic terminal width
kwargs["env"]["COLUMNS"] = "165"
kwargs["auto_envvar_prefix"] = "ANTA"
# Way to fix https://github.com/pallets/click/issues/824
with capsys.disabled():
result = super().invoke(*args, **kwargs)
# disabling T201 as we want to print here
print("--- CLI Output ---") # noqa: T201
print(result.output) # noqa: T201
return result
def cli(
command: str | None = None,
commands: list[dict[str, Any]] | None = None,
ofmt: str = "json",
_version: int | str | None = "latest",
**_kwargs: Any, # noqa: ANN401
) -> dict[str, Any] | list[dict[str, Any]]:
def get_output(command: str | dict[str, Any]) -> dict[str, Any]:
if isinstance(command, dict):
command = command["cmd"]
mock_cli: dict[str, Any]
if ofmt == "json":
mock_cli = MOCK_CLI_JSON
elif ofmt == "text":
mock_cli = MOCK_CLI_TEXT
for mock_cmd, output in mock_cli.items():
if command == mock_cmd:
logger.info("Mocking command %s", mock_cmd)
if isinstance(output, asynceapi.EapiCommandError):
raise output
return output
message = f"Command '{command}' is not mocked"
logger.critical(message)
raise NotImplementedError(message)
res: dict[str, Any] | list[dict[str, Any]]
if command is not None:
logger.debug("Mock input %s", command)
res = get_output(command)
if commands is not None:
logger.debug("Mock input %s", commands)
res = list(map(get_output, commands))
logger.debug("Mock output %s", res)
return res
# Patch asynceapi methods used by AsyncEOSDevice. See tests/units/test_device.py
with (
patch("asynceapi.device.Device.check_connection", return_value=True),
patch("asynceapi.device.Device.cli", side_effect=cli),
patch("asyncssh.connect"),
patch(
"asyncssh.scp",
),
):
console._color_system = None # pylint: disable=protected-access
yield AntaCliRunner()

View file

@ -1,41 +0,0 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""tests.lib.utils."""
from __future__ import annotations
from pathlib import Path
from typing import Any
def generate_test_ids_dict(val: dict[str, Any], key: str = "name") -> str:
"""generate_test_ids Helper to generate test ID for parametrize."""
return val.get(key, "unamed_test")
def generate_test_ids_list(val: list[dict[str, Any]], key: str = "name") -> list[str]:
"""generate_test_ids Helper to generate test ID for parametrize."""
return [entry.get(key, "unamed_test") for entry in val]
def generate_test_ids(data: list[dict[str, Any]]) -> list[str]:
"""Build id for a unit test of an AntaTest subclass.
{
"name": "meaniful test name",
"test": <AntaTest instance>,
...
}
"""
return [f"{val['test'].module}.{val['test'].__name__}-{val['name']}" for val in data]
def default_anta_env() -> dict[str, str | None]:
"""Return a default_anta_environement which can be passed to a cliRunner.invoke method."""
return {
"ANTA_USERNAME": "anta",
"ANTA_PASSWORD": "formica",
"ANTA_INVENTORY": str(Path(__file__).parent.parent / "data" / "test_inventory.yml"),
"ANTA_CATALOG": str(Path(__file__).parent.parent / "data" / "test_catalog.yml"),
}

View file

@ -1 +0,0 @@
[{'output': 'synchronised to NTP server (51.254.83.231) at stratum 3\n time correct to within 82 ms\n polling server every 1024 s\n\n'}]

View file

@ -1 +0,0 @@
[{'upTime': 1000000.68, 'loadAvg': [0.17, 0.21, 0.18], 'users': 1, 'currentTime': 1643761588.030645}]

View file

@ -1 +0,0 @@
[{'imageFormatVersion': '2.0', 'uptime': 2697.76, 'modelName': 'DCS-7280TRA-48C6-F', 'internalVersion': '4.27.1.1F-25536724.42711F', 'memTotal': 8098984, 'mfgName': 'Arista', 'serialNumber': 'SSJ16376415', 'systemMacAddress': '44:4c:a8:c7:1f:6b', 'bootupTimestamp': 1643715179.0, 'memFree': 6131068, 'version': '4.27.1.1F', 'configMacAddress': '00:00:00:00:00:00', 'isIntlVersion': False, 'internalBuildId': '38c43eab-c660-477a-915b-5a7b28da781d', 'hardwareRevision': '21.02', 'hwMacAddress': '44:4c:a8:c7:1f:6b', 'architecture': 'i686'}]

View file

@ -1,4 +1,10 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Unit tests for anta."""
"""Unit tests for ANTA."""
import pytest
# Enable nice assert messages for tests.units.anta_tests unit tests
# https://docs.pytest.org/en/stable/how-to/writing_plugins.html#assertion-rewriting
pytest.register_assert_rewrite("tests.units.anta_tests")

View file

@ -1,4 +1,33 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Test for anta.tests submodule."""
"""Tests for anta.tests module."""
import asyncio
from typing import Any
from anta.device import AntaDevice
def test(device: AntaDevice, data: dict[str, Any]) -> None:
"""Generic test function for AntaTest subclass.
Generate unit tests for each AntaTest subclass.
See `tests/units/anta_tests/README.md` for more information on how to use it.
"""
# Instantiate the AntaTest subclass
test_instance = data["test"](device, inputs=data["inputs"], eos_data=data["eos_data"])
# Run the test() method
asyncio.run(test_instance.test())
# Assert expected result
assert test_instance.result.result == data["expected"]["result"], f"Expected '{data['expected']['result']}' result, got '{test_instance.result.result}'"
if "messages" in data["expected"]:
# We expect messages in test result
assert len(test_instance.result.messages) == len(data["expected"]["messages"])
# Test will pass if the expected message is included in the test result message
for message, expected in zip(test_instance.result.messages, data["expected"]["messages"]): # NOTE: zip(strict=True) has been added in Python 3.10
assert expected in message
else:
# Test result should not have messages
assert test_instance.result.messages == []

View file

@ -0,0 +1,35 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""See https://docs.pytest.org/en/stable/reference/fixtures.html#conftest-py-sharing-fixtures-across-multiple-files."""
from typing import Any
import pytest
def build_test_id(val: dict[str, Any]) -> str:
"""Build id for a unit test of an AntaTest subclass.
{
"name": "meaniful test name",
"test": <AntaTest instance>,
...
}
"""
return f"{val['test'].__module__}.{val['test'].__name__}-{val['name']}"
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Generate ANTA testts unit tests dynamically during test collection.
It will parametrize test cases based on the `DATA` data structure defined in `tests.units.anta_tests` modules.
See `tests/units/anta_tests/README.md` for more information on how to use it.
Test IDs are generated using the `build_test_id` function above.
Checking that only the function "test" is parametrized with data to allow for writing tests for helper functions
in each module.
"""
if "tests.units.anta_tests" in metafunc.module.__package__ and metafunc.function.__name__ == "test":
# This is a unit test for an AntaTest subclass
metafunc.parametrize("data", metafunc.module.DATA, ids=build_test_id)

File diff suppressed because it is too large Load diff

View file

@ -5,10 +5,14 @@
from __future__ import annotations
import sys
from typing import Any
import pytest
from pydantic import ValidationError
from anta.tests.routing.generic import VerifyRoutingProtocolModel, VerifyRoutingTableEntry, VerifyRoutingTableSize
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -66,16 +70,6 @@ DATA: list[dict[str, Any]] = [
"inputs": {"minimum": 42, "maximum": 666},
"expected": {"result": "failure", "messages": ["routing-table has 1000 routes and not between min (42) and maximum (666)"]},
},
{
"name": "error-max-smaller-than-min",
"test": VerifyRoutingTableSize,
"eos_data": [{}],
"inputs": {"minimum": 666, "maximum": 42},
"expected": {
"result": "error",
"messages": ["Minimum 666 is greater than maximum 42"],
},
},
{
"name": "success",
"test": VerifyRoutingTableEntry,
@ -130,6 +124,48 @@ DATA: list[dict[str, Any]] = [
"inputs": {"vrf": "default", "routes": ["10.1.0.1", "10.1.0.2"]},
"expected": {"result": "success"},
},
{
"name": "success-collect-all",
"test": VerifyRoutingTableEntry,
"eos_data": [
{
"vrfs": {
"default": {
"routingDisabled": False,
"allRoutesProgrammedHardware": True,
"allRoutesProgrammedKernel": True,
"defaultRouteState": "notSet",
"routes": {
"10.1.0.1/32": {
"hardwareProgrammed": True,
"routeType": "eBGP",
"routeLeaked": False,
"kernelProgrammed": True,
"routeAction": "forward",
"directlyConnected": False,
"preference": 20,
"metric": 0,
"vias": [{"nexthopAddr": "10.1.255.4", "interface": "Ethernet1"}],
},
"10.1.0.2/32": {
"hardwareProgrammed": True,
"routeType": "eBGP",
"routeLeaked": False,
"kernelProgrammed": True,
"routeAction": "forward",
"directlyConnected": False,
"preference": 20,
"metric": 0,
"vias": [{"nexthopAddr": "10.1.255.6", "interface": "Ethernet2"}],
},
},
},
},
},
],
"inputs": {"vrf": "default", "routes": ["10.1.0.1", "10.1.0.2"], "collect": "all"},
"expected": {"result": "success"},
},
{
"name": "failure-missing-route",
"test": VerifyRoutingTableEntry,
@ -226,4 +262,75 @@ DATA: list[dict[str, Any]] = [
"inputs": {"vrf": "default", "routes": ["10.1.0.1", "10.1.0.2"]},
"expected": {"result": "failure", "messages": ["The following route(s) are missing from the routing table of VRF default: ['10.1.0.2']"]},
},
{
"name": "failure-wrong-route-collect-all",
"test": VerifyRoutingTableEntry,
"eos_data": [
{
"vrfs": {
"default": {
"routingDisabled": False,
"allRoutesProgrammedHardware": True,
"allRoutesProgrammedKernel": True,
"defaultRouteState": "notSet",
"routes": {
"10.1.0.1/32": {
"hardwareProgrammed": True,
"routeType": "eBGP",
"routeLeaked": False,
"kernelProgrammed": True,
"routeAction": "forward",
"directlyConnected": False,
"preference": 20,
"metric": 0,
"vias": [{"nexthopAddr": "10.1.255.4", "interface": "Ethernet1"}],
},
"10.1.0.55/32": {
"hardwareProgrammed": True,
"routeType": "eBGP",
"routeLeaked": False,
"kernelProgrammed": True,
"routeAction": "forward",
"directlyConnected": False,
"preference": 20,
"metric": 0,
"vias": [{"nexthopAddr": "10.1.255.6", "interface": "Ethernet2"}],
},
},
},
},
},
],
"inputs": {"vrf": "default", "routes": ["10.1.0.1", "10.1.0.2"], "collect": "all"},
"expected": {"result": "failure", "messages": ["The following route(s) are missing from the routing table of VRF default: ['10.1.0.2']"]},
},
]
class TestVerifyRoutingTableSizeInputs:
"""Test anta.tests.routing.generic.VerifyRoutingTableSize.Input."""
@pytest.mark.parametrize(
("minimum", "maximum"),
[
pytest.param(0, 0, id="zero"),
pytest.param(1, 2, id="1<2"),
pytest.param(0, sys.maxsize, id="max"),
],
)
def test_valid(self, minimum: int, maximum: int) -> None:
"""Test VerifyRoutingTableSize valid inputs."""
VerifyRoutingTableSize.Input(minimum=minimum, maximum=maximum)
@pytest.mark.parametrize(
("minimum", "maximum"),
[
pytest.param(-2, -1, id="negative"),
pytest.param(2, 1, id="2<1"),
pytest.param(sys.maxsize, 0, id="max"),
],
)
def test_invalid(self, minimum: int, maximum: int) -> None:
"""Test VerifyRoutingTableSize invalid inputs."""
with pytest.raises(ValidationError):
VerifyRoutingTableSize.Input(minimum=minimum, maximum=maximum)

View file

@ -20,7 +20,7 @@ from anta.tests.routing.isis import (
VerifyISISSegmentRoutingTunnels,
_get_interface_data,
)
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.routing.ospf import VerifyOSPFMaxLSA, VerifyOSPFNeighborCount, VerifyOSPFNeighborState
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -16,7 +16,7 @@ from anta.tests.aaa import (
VerifyTacacsServers,
VerifyTacacsSourceIntf,
)
from tests.lib.anta import test # noqa: F401; pylint: disable=unused-import
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.avt import VerifyAVTPathHealth, VerifyAVTRole, VerifyAVTSpecificPath
from tests.lib.anta import test # noqa: F401; pylint: disable=unused-import
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -8,10 +8,8 @@ from __future__ import annotations
from typing import Any
# pylint: disable=C0413
# because of the patch above
from anta.tests.bfd import VerifyBFDPeersHealth, VerifyBFDPeersIntervals, VerifyBFDSpecificPeers
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from anta.tests.bfd import VerifyBFDPeersHealth, VerifyBFDPeersIntervals, VerifyBFDPeersRegProtocols, VerifyBFDSpecificPeers
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -163,8 +161,8 @@ DATA: list[dict[str, Any]] = [
"result": "failure",
"messages": [
"Following BFD peers are not configured or timers are not correct:\n"
"{'192.0.255.7': {'default': {'tx_interval': 1300000, 'rx_interval': 1200000, 'multiplier': 4}}, "
"'192.0.255.70': {'MGMT': {'tx_interval': 120000, 'rx_interval': 120000, 'multiplier': 5}}}"
"{'192.0.255.7': {'default': {'tx_interval': 1300, 'rx_interval': 1200, 'multiplier': 4}}, "
"'192.0.255.70': {'MGMT': {'tx_interval': 120, 'rx_interval': 120, 'multiplier': 5}}}"
],
},
},
@ -519,4 +517,133 @@ DATA: list[dict[str, Any]] = [
],
},
},
{
"name": "success",
"test": VerifyBFDPeersRegProtocols,
"eos_data": [
{
"vrfs": {
"default": {
"ipv4Neighbors": {
"192.0.255.7": {
"peerStats": {
"": {
"status": "up",
"remoteDisc": 108328132,
"peerStatsDetail": {
"role": "active",
"apps": ["ospf"],
},
}
}
}
}
},
"MGMT": {
"ipv4Neighbors": {
"192.0.255.70": {
"peerStats": {
"": {
"status": "up",
"remoteDisc": 108328132,
"peerStatsDetail": {
"role": "active",
"apps": ["bgp"],
},
}
}
}
}
},
}
}
],
"inputs": {
"bfd_peers": [
{"peer_address": "192.0.255.7", "vrf": "default", "protocols": ["ospf"]},
{"peer_address": "192.0.255.70", "vrf": "MGMT", "protocols": ["bgp"]},
]
},
"expected": {"result": "success"},
},
{
"name": "failure",
"test": VerifyBFDPeersRegProtocols,
"eos_data": [
{
"vrfs": {
"default": {
"ipv4Neighbors": {
"192.0.255.7": {
"peerStats": {
"": {
"status": "up",
"peerStatsDetail": {
"role": "active",
"apps": ["ospf"],
},
}
}
}
}
},
"MGMT": {
"ipv4Neighbors": {
"192.0.255.70": {
"peerStats": {
"": {
"status": "up",
"remoteDisc": 0,
"peerStatsDetail": {
"role": "active",
"apps": ["bgp"],
},
}
}
}
}
},
}
}
],
"inputs": {
"bfd_peers": [
{"peer_address": "192.0.255.7", "vrf": "default", "protocols": ["isis"]},
{"peer_address": "192.0.255.70", "vrf": "MGMT", "protocols": ["isis"]},
]
},
"expected": {
"result": "failure",
"messages": [
"The following BFD peers are not configured or have non-registered protocol(s):\n"
"{'192.0.255.7': {'default': ['isis']}, "
"'192.0.255.70': {'MGMT': ['isis']}}"
],
},
},
{
"name": "failure-not-found",
"test": VerifyBFDPeersRegProtocols,
"eos_data": [
{
"vrfs": {
"default": {},
"MGMT": {},
}
}
],
"inputs": {
"bfd_peers": [
{"peer_address": "192.0.255.7", "vrf": "default", "protocols": ["isis"]},
{"peer_address": "192.0.255.70", "vrf": "MGMT", "protocols": ["isis"]},
]
},
"expected": {
"result": "failure",
"messages": [
"The following BFD peers are not configured or have non-registered protocol(s):\n"
"{'192.0.255.7': {'default': 'Not Configured'}, '192.0.255.70': {'MGMT': 'Not Configured'}}"
],
},
},
]

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.configuration import VerifyRunningConfigDiffs, VerifyRunningConfigLines, VerifyZeroTouch
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -60,14 +60,4 @@ DATA: list[dict[str, Any]] = [
"inputs": {"regex_patterns": ["bla", "bleh"]},
"expected": {"result": "failure", "messages": ["Following patterns were not found: 'bla','bleh'"]},
},
{
"name": "failure-invalid-regex",
"test": VerifyRunningConfigLines,
"eos_data": ["enable password something\nsome other line"],
"inputs": {"regex_patterns": ["["]},
"expected": {
"result": "error",
"messages": ["1 validation error for Input\nregex_patterns.0\n Value error, Invalid regex: unterminated character set at position 0"],
},
},
]

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.connectivity import VerifyLLDPNeighbors, VerifyReachability
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -99,6 +99,28 @@ DATA: list[dict[str, Any]] = [
],
"expected": {"result": "success"},
},
{
"name": "success-df-bit-size",
"test": VerifyReachability,
"inputs": {"hosts": [{"destination": "10.0.0.1", "source": "Management0", "repeat": 5, "size": 1500, "df_bit": True}]},
"eos_data": [
{
"messages": [
"""PING 10.0.0.1 (10.0.0.1) from 172.20.20.6 : 1472(1500) bytes of data.
1480 bytes from 10.0.0.1: icmp_seq=1 ttl=64 time=0.085 ms
1480 bytes from 10.0.0.1: icmp_seq=2 ttl=64 time=0.020 ms
1480 bytes from 10.0.0.1: icmp_seq=3 ttl=64 time=0.019 ms
1480 bytes from 10.0.0.1: icmp_seq=4 ttl=64 time=0.018 ms
1480 bytes from 10.0.0.1: icmp_seq=5 ttl=64 time=0.017 ms
--- 10.0.0.1 ping statistics ---
5 packets transmitted, 5 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.017/0.031/0.085/0.026 ms, ipg/ewma 0.061/0.057 ms""",
],
},
],
"expected": {"result": "success"},
},
{
"name": "failure-ip",
"test": VerifyReachability,
@ -167,6 +189,28 @@ DATA: list[dict[str, Any]] = [
],
"expected": {"result": "failure", "messages": ["Connectivity test failed for the following source-destination pairs: [('Management0', '10.0.0.11')]"]},
},
{
"name": "failure-size",
"test": VerifyReachability,
"inputs": {"hosts": [{"destination": "10.0.0.1", "source": "Management0", "repeat": 5, "size": 1501, "df_bit": True}]},
"eos_data": [
{
"messages": [
"""PING 10.0.0.1 (10.0.0.1) from 172.20.20.6 : 1473(1501) bytes of data.
ping: local error: message too long, mtu=1500
ping: local error: message too long, mtu=1500
ping: local error: message too long, mtu=1500
ping: local error: message too long, mtu=1500
ping: local error: message too long, mtu=1500
--- 10.0.0.1 ping statistics ---
5 packets transmitted, 0 received, +5 errors, 100% packet loss, time 40ms
""",
],
},
],
"expected": {"result": "failure", "messages": ["Connectivity test failed for the following source-destination pairs: [('Management0', '10.0.0.1')]"]},
},
{
"name": "success",
"test": VerifyLLDPNeighbors,

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.field_notices import VerifyFieldNotice44Resolution, VerifyFieldNotice72Resolution
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -358,8 +358,8 @@ DATA: list[dict[str, Any]] = [
],
"inputs": None,
"expected": {
"result": "error",
"messages": ["Error in running test - FixedSystemvrm1 not found"],
"result": "failure",
"messages": ["Error in running test - Component FixedSystemvrm1 not found in 'show version'"],
},
},
]

View file

@ -0,0 +1,391 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Test inputs for anta.tests.flow_tracking."""
from __future__ import annotations
from typing import Any
from anta.tests.flow_tracking import VerifyHardwareFlowTrackerStatus
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
"name": "success",
"test": VerifyHardwareFlowTrackerStatus,
"eos_data": [
{
"trackers": {
"FLOW-TRACKER": {
"active": True,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CV-TELEMETRY": {"localIntf": "Loopback0", "templateInterval": 3600000}},
}
},
"running": True,
},
{
"trackers": {
"HARDWARE-TRACKER": {
"active": True,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CV-TELEMETRY": {"localIntf": "Loopback0", "templateInterval": 3600000}},
}
},
"running": True,
},
],
"inputs": {"trackers": [{"name": "FLOW-TRACKER"}, {"name": "HARDWARE-TRACKER"}]},
"expected": {"result": "success"},
},
{
"name": "success-with-optional-field",
"test": VerifyHardwareFlowTrackerStatus,
"eos_data": [
{
"trackers": {
"FLOW-TRACKER": {
"active": True,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CV-TELEMETRY": {"localIntf": "Loopback0", "templateInterval": 3600000}},
}
},
"running": True,
},
{
"trackers": {
"HARDWARE-TRACKER": {
"active": True,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CVP-TELEMETRY": {"localIntf": "Loopback10", "templateInterval": 3600000}},
}
},
"running": True,
},
],
"inputs": {
"trackers": [
{
"name": "FLOW-TRACKER",
"record_export": {"on_inactive_timeout": 60000, "on_interval": 300000},
"exporters": [{"name": "CV-TELEMETRY", "local_interface": "Loopback0", "template_interval": 3600000}],
},
{
"name": "HARDWARE-TRACKER",
"record_export": {"on_inactive_timeout": 60000, "on_interval": 300000},
"exporters": [{"name": "CVP-TELEMETRY", "local_interface": "Loopback10", "template_interval": 3600000}],
},
]
},
"expected": {"result": "success"},
},
{
"name": "failure-flow-tracking-not-running",
"test": VerifyHardwareFlowTrackerStatus,
"eos_data": [{"trackers": {}, "running": False}],
"inputs": {"trackers": [{"name": "FLOW-TRACKER"}]},
"expected": {
"result": "failure",
"messages": ["Hardware flow tracking is not running."],
},
},
{
"name": "failure-tracker-not-configured",
"test": VerifyHardwareFlowTrackerStatus,
"eos_data": [
{
"trackers": {
"HARDWARE-TRACKER": {
"active": True,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CVP-TELEMETRY": {"localIntf": "Loopback10", "templateInterval": 3600000}},
}
},
"running": True,
}
],
"inputs": {"trackers": [{"name": "FLOW-Sample"}]},
"expected": {
"result": "failure",
"messages": ["Hardware flow tracker `FLOW-Sample` is not configured."],
},
},
{
"name": "failure-tracker-not-active",
"test": VerifyHardwareFlowTrackerStatus,
"eos_data": [
{
"trackers": {
"FLOW-TRACKER": {
"active": False,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CV-TELEMETRY": {"localIntf": "Loopback0", "templateInterval": 3600000}},
}
},
"running": True,
},
{
"trackers": {
"HARDWARE-TRACKER": {
"active": False,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CVP-TELEMETRY": {"localIntf": "Loopback10", "templateInterval": 3600000}},
}
},
"running": True,
},
],
"inputs": {
"trackers": [
{
"name": "FLOW-TRACKER",
"record_export": {"on_inactive_timeout": 60000, "on_interval": 300000},
"exporters": [{"name": "CV-TELEMETRY", "local_interface": "Loopback0", "template_interval": 3600000}],
},
{
"name": "HARDWARE-TRACKER",
"record_export": {"on_inactive_timeout": 60000, "on_interval": 300000},
"exporters": [{"name": "CVP-TELEMETRY", "local_interface": "Loopback10", "template_interval": 3600000}],
},
]
},
"expected": {
"result": "failure",
"messages": ["Hardware flow tracker `FLOW-TRACKER` is not active.", "Hardware flow tracker `HARDWARE-TRACKER` is not active."],
},
},
{
"name": "failure-incorrect-record-export",
"test": VerifyHardwareFlowTrackerStatus,
"eos_data": [
{
"trackers": {
"FLOW-TRACKER": {
"active": True,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CV-TELEMETRY": {"localIntf": "Loopback0", "templateInterval": 3600000}},
}
},
"running": True,
},
{
"trackers": {
"HARDWARE-TRACKER": {
"active": True,
"inactiveTimeout": 6000,
"activeInterval": 30000,
"exporters": {"CVP-TELEMETRY": {"localIntf": "Loopback10", "templateInterval": 3600000}},
}
},
"running": True,
},
],
"inputs": {
"trackers": [
{
"name": "FLOW-TRACKER",
"record_export": {"on_inactive_timeout": 6000, "on_interval": 30000},
},
{
"name": "HARDWARE-TRACKER",
"record_export": {"on_inactive_timeout": 60000, "on_interval": 300000},
},
]
},
"expected": {
"result": "failure",
"messages": [
"FLOW-TRACKER: \n"
"Expected `6000` as the inactive timeout, but found `60000` instead.\nExpected `30000` as the interval, but found `300000` instead.\n",
"HARDWARE-TRACKER: \n"
"Expected `60000` as the inactive timeout, but found `6000` instead.\nExpected `300000` as the interval, but found `30000` instead.\n",
],
},
},
{
"name": "failure-incorrect-exporters",
"test": VerifyHardwareFlowTrackerStatus,
"eos_data": [
{
"trackers": {
"FLOW-TRACKER": {
"active": True,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {
"CV-TELEMETRY": {"localIntf": "Loopback0", "templateInterval": 3600000},
"CVP-FLOW": {"localIntf": "Loopback0", "templateInterval": 3600000},
},
}
},
"running": True,
},
{
"trackers": {
"HARDWARE-TRACKER": {
"active": True,
"inactiveTimeout": 6000,
"activeInterval": 30000,
"exporters": {
"CVP-TELEMETRY": {"localIntf": "Loopback10", "templateInterval": 3600000},
"Hardware-flow": {"localIntf": "Loopback10", "templateInterval": 3600000},
},
}
},
"running": True,
},
],
"inputs": {
"trackers": [
{
"name": "FLOW-TRACKER",
"exporters": [
{"name": "CV-TELEMETRY", "local_interface": "Loopback0", "template_interval": 3600000},
{"name": "CVP-FLOW", "local_interface": "Loopback10", "template_interval": 3500000},
],
},
{
"name": "HARDWARE-TRACKER",
"exporters": [
{"name": "Hardware-flow", "local_interface": "Loopback99", "template_interval": 3000000},
{"name": "Reverse-flow", "local_interface": "Loopback101", "template_interval": 3000000},
],
},
]
},
"expected": {
"result": "failure",
"messages": [
"FLOW-TRACKER: \n"
"Exporter `CVP-FLOW`: \n"
"Expected `Loopback10` as the local interface, but found `Loopback0` instead.\n"
"Expected `3500000` as the template interval, but found `3600000` instead.\n",
"HARDWARE-TRACKER: \n"
"Exporter `Hardware-flow`: \n"
"Expected `Loopback99` as the local interface, but found `Loopback10` instead.\n"
"Expected `3000000` as the template interval, but found `3600000` instead.\n"
"Exporter `Reverse-flow` is not configured.\n",
],
},
},
{
"name": "failure-all-type",
"test": VerifyHardwareFlowTrackerStatus,
"eos_data": [
{
"trackers": {
"HARDWARE-TRACKER": {
"active": True,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CVP-TELEMETRY": {"localIntf": "Loopback10", "templateInterval": 3600000}},
}
},
"running": True,
},
{
"trackers": {
"FLOW-TRIGGER": {
"active": False,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {"CV-TELEMETRY": {"localIntf": "Loopback0", "templateInterval": 3600000}},
}
},
"running": True,
},
{
"trackers": {
"HARDWARE-FLOW": {
"active": True,
"inactiveTimeout": 6000,
"activeInterval": 30000,
"exporters": {"CVP-TELEMETRY": {"localIntf": "Loopback10", "templateInterval": 3600000}},
}
},
"running": True,
},
{
"trackers": {
"FLOW-TRACKER2": {
"active": True,
"inactiveTimeout": 60000,
"activeInterval": 300000,
"exporters": {
"CV-TELEMETRY": {"localIntf": "Loopback0", "templateInterval": 3600000},
"CVP-FLOW": {"localIntf": "Loopback0", "templateInterval": 3600000},
},
}
},
"running": True,
},
{
"trackers": {
"HARDWARE-TRACKER2": {
"active": True,
"inactiveTimeout": 6000,
"activeInterval": 30000,
"exporters": {
"CVP-TELEMETRY": {"localIntf": "Loopback10", "templateInterval": 3600000},
"Hardware-flow": {"localIntf": "Loopback10", "templateInterval": 3600000},
},
}
},
"running": True,
},
],
"inputs": {
"trackers": [
{"name": "FLOW-Sample"},
{
"name": "FLOW-TRIGGER",
"record_export": {"on_inactive_timeout": 60000, "on_interval": 300000},
"exporters": [{"name": "CV-TELEMETRY", "local_interface": "Loopback0", "template_interval": 3600000}],
},
{
"name": "HARDWARE-FLOW",
"record_export": {"on_inactive_timeout": 60000, "on_interval": 300000},
},
{
"name": "FLOW-TRACKER2",
"exporters": [
{"name": "CV-TELEMETRY", "local_interface": "Loopback0", "template_interval": 3600000},
{"name": "CVP-FLOW", "local_interface": "Loopback10", "template_interval": 3500000},
],
},
{
"name": "HARDWARE-TRACKER2",
"exporters": [
{"name": "Hardware-flow", "local_interface": "Loopback99", "template_interval": 3000000},
{"name": "Reverse-flow", "local_interface": "Loopback101", "template_interval": 3000000},
],
},
]
},
"expected": {
"result": "failure",
"messages": [
"Hardware flow tracker `FLOW-Sample` is not configured.",
"Hardware flow tracker `FLOW-TRIGGER` is not active.",
"HARDWARE-FLOW: \n"
"Expected `60000` as the inactive timeout, but found `6000` instead.\nExpected `300000` as the interval, but found `30000` instead.\n",
"FLOW-TRACKER2: \nExporter `CVP-FLOW`: \n"
"Expected `Loopback10` as the local interface, but found `Loopback0` instead.\n"
"Expected `3500000` as the template interval, but found `3600000` instead.\n",
"HARDWARE-TRACKER2: \nExporter `Hardware-flow`: \n"
"Expected `Loopback99` as the local interface, but found `Loopback10` instead.\n"
"Expected `3000000` as the template interval, but found `3600000` instead.\n"
"Exporter `Reverse-flow` is not configured.\n",
],
},
},
]

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.greent import VerifyGreenT, VerifyGreenTCounters
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -16,7 +16,7 @@ from anta.tests.hardware import (
VerifyTransceiversManufacturers,
VerifyTransceiversTemperature,
)
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -21,12 +21,13 @@ from anta.tests.interfaces import (
VerifyIpVirtualRouterMac,
VerifyL2MTU,
VerifyL3MTU,
VerifyLACPInterfacesStatus,
VerifyLoopbackCount,
VerifyPortChannels,
VerifyStormControlDrops,
VerifySVI,
)
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -651,7 +652,7 @@ DATA: list[dict[str, Any]] = [
],
"inputs": {"threshold": 70.0},
"expected": {
"result": "error",
"result": "failure",
"messages": ["Interface Ethernet1/1 or one of its member interfaces is not Full-Duplex. VerifyInterfaceUtilization has not been implemented."],
},
},
@ -796,7 +797,7 @@ DATA: list[dict[str, Any]] = [
],
"inputs": {"threshold": 70.0},
"expected": {
"result": "error",
"result": "failure",
"messages": ["Interface Port-Channel31 or one of its member interfaces is not Full-Duplex. VerifyInterfaceUtilization has not been implemented."],
},
},
@ -2441,4 +2442,127 @@ DATA: list[dict[str, Any]] = [
],
},
},
{
"name": "success",
"test": VerifyLACPInterfacesStatus,
"eos_data": [
{
"portChannels": {
"Port-Channel5": {
"interfaces": {
"Ethernet5": {
"actorPortStatus": "bundled",
"partnerPortState": {
"activity": True,
"timeout": False,
"aggregation": True,
"synchronization": True,
"collecting": True,
"distributing": True,
},
"actorPortState": {
"activity": True,
"timeout": False,
"aggregation": True,
"synchronization": True,
"collecting": True,
"distributing": True,
},
}
}
}
},
"interface": "Ethernet5",
"orphanPorts": {},
}
],
"inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Port-Channel5"}]},
"expected": {"result": "success"},
},
{
"name": "failure-not-bundled",
"test": VerifyLACPInterfacesStatus,
"eos_data": [
{
"portChannels": {
"Port-Channel5": {
"interfaces": {
"Ethernet5": {
"actorPortStatus": "No Aggregate",
}
}
}
},
"interface": "Ethernet5",
"orphanPorts": {},
}
],
"inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Po5"}]},
"expected": {
"result": "failure",
"messages": ["For Interface Ethernet5:\nExpected `bundled` as the local port status, but found `No Aggregate` instead.\n"],
},
},
{
"name": "failure-no-details-found",
"test": VerifyLACPInterfacesStatus,
"eos_data": [
{
"portChannels": {"Port-Channel5": {"interfaces": {}}},
}
],
"inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Po 5"}]},
"expected": {
"result": "failure",
"messages": ["Interface 'Ethernet5' is not configured to be a member of LACP 'Port-Channel5'."],
},
},
{
"name": "failure-lacp-params",
"test": VerifyLACPInterfacesStatus,
"eos_data": [
{
"portChannels": {
"Port-Channel5": {
"interfaces": {
"Ethernet5": {
"actorPortStatus": "bundled",
"partnerPortState": {
"activity": False,
"timeout": False,
"aggregation": False,
"synchronization": False,
"collecting": True,
"distributing": True,
},
"actorPortState": {
"activity": False,
"timeout": False,
"aggregation": False,
"synchronization": False,
"collecting": True,
"distributing": True,
},
}
}
}
},
"interface": "Ethernet5",
"orphanPorts": {},
}
],
"inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "port-channel 5"}]},
"expected": {
"result": "failure",
"messages": [
"For Interface Ethernet5:\n"
"Actor port details:\nExpected `True` as the activity, but found `False` instead."
"\nExpected `True` as the aggregation, but found `False` instead."
"\nExpected `True` as the synchronization, but found `False` instead."
"\nPartner port details:\nExpected `True` as the activity, but found `False` instead.\n"
"Expected `True` as the aggregation, but found `False` instead.\n"
"Expected `True` as the synchronization, but found `False` instead.\n"
],
},
},
]

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.lanz import VerifyLANZ
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -17,7 +17,7 @@ from anta.tests.logging import (
VerifyLoggingSourceIntf,
VerifyLoggingTimestamp,
)
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -201,7 +201,7 @@ DATA: list[dict[str, Any]] = [
"expected": {"result": "failure", "messages": ["Logs are not generated with the device FQDN"]},
},
{
"name": "success",
"name": "success-negative-offset",
"test": VerifyLoggingTimestamp,
"eos_data": [
"",
@ -213,6 +213,19 @@ DATA: list[dict[str, Any]] = [
"inputs": None,
"expected": {"result": "success"},
},
{
"name": "success-positive-offset",
"test": VerifyLoggingTimestamp,
"eos_data": [
"",
"2023-05-10T15:41:44.680813+05:00 NW-CORE.example.org ConfigAgent: %SYS-6-LOGMSG_INFO: "
"Message from arista on command-api (10.22.1.107): ANTA VerifyLoggingTimestamp validation\n"
"2023-05-10T15:42:44.680813+05:00 NW-CORE.example.org ConfigAgent: %SYS-6-LOGMSG_INFO: "
"Other log\n",
],
"inputs": None,
"expected": {"result": "success"},
},
{
"name": "failure",
"test": VerifyLoggingTimestamp,

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.mlag import VerifyMlagConfigSanity, VerifyMlagDualPrimary, VerifyMlagInterfaces, VerifyMlagPrimaryPriority, VerifyMlagReloadDelay, VerifyMlagStatus
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -110,17 +110,6 @@ DATA: list[dict[str, Any]] = [
"inputs": None,
"expected": {"result": "skipped", "messages": ["MLAG is disabled"]},
},
{
"name": "error",
"test": VerifyMlagConfigSanity,
"eos_data": [
{
"dummy": False,
},
],
"inputs": None,
"expected": {"result": "error", "messages": ["Incorrect JSON response - 'mlagActive' state was not found"]},
},
{
"name": "failure-global",
"test": VerifyMlagConfigSanity,

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.multicast import VerifyIGMPSnoopingGlobal, VerifyIGMPSnoopingVlans
from tests.lib.anta import test # noqa: F401; pylint: disable=unused-import
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.path_selection import VerifyPathsHealth, VerifySpecificPath
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.profiles import VerifyTcamProfile, VerifyUnifiedForwardingTableMode
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.ptp import VerifyPtpGMStatus, VerifyPtpLockStatus, VerifyPtpModeStatus, VerifyPtpOffset, VerifyPtpPortModeStatus
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -295,14 +295,14 @@ DATA: list[dict[str, Any]] = [
"expected": {"result": "success"},
},
{
"name": "failure",
"name": "failure-no-interfaces",
"test": VerifyPtpPortModeStatus,
"eos_data": [{"ptpIntfSummaries": {}}],
"inputs": None,
"expected": {"result": "failure", "messages": ["No interfaces are PTP enabled"]},
},
{
"name": "failure",
"name": "failure-invalid-state",
"test": VerifyPtpPortModeStatus,
"eos_data": [
{

View file

@ -7,6 +7,9 @@ from __future__ import annotations
from typing import Any
import pytest
from pydantic import ValidationError
from anta.tests.security import (
VerifyAPIHttpsSSL,
VerifyAPIHttpStatus,
@ -15,6 +18,7 @@ from anta.tests.security import (
VerifyAPISSLCertificate,
VerifyBannerLogin,
VerifyBannerMotd,
VerifyHardwareEntropy,
VerifyIPSecConnHealth,
VerifyIPv4ACL,
VerifySpecificIPSecConn,
@ -23,7 +27,7 @@ from anta.tests.security import (
VerifySSHStatus,
VerifyTelnetStatus,
)
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -38,15 +42,35 @@ DATA: list[dict[str, Any]] = [
"test": VerifySSHStatus,
"eos_data": ["SSH per host connection limit is 20\nFIPS status: disabled\n\n"],
"inputs": None,
"expected": {"result": "error", "messages": ["Could not find SSH status in returned output."]},
"expected": {"result": "failure", "messages": ["Could not find SSH status in returned output."]},
},
{
"name": "failure-ssh-disabled",
"name": "failure-ssh-enabled",
"test": VerifySSHStatus,
"eos_data": ["SSHD status for Default VRF is enabled\nSSH connection limit is 50\nSSH per host connection limit is 20\nFIPS status: disabled\n\n"],
"inputs": None,
"expected": {"result": "failure", "messages": ["SSHD status for Default VRF is enabled"]},
},
{
"name": "success-4.32",
"test": VerifySSHStatus,
"eos_data": [
"User certificate authentication methods: none (neither trusted CA nor SSL profile configured)\n"
"SSHD status for Default VRF: disabled\nSSH connection limit: 50\nSSH per host connection limit: 20\nFIPS status: disabled\n\n"
],
"inputs": None,
"expected": {"result": "success"},
},
{
"name": "failure-ssh-enabled-4.32",
"test": VerifySSHStatus,
"eos_data": [
"User certificate authentication methods: none (neither trusted CA nor SSL profile configured)\n"
"SSHD status for Default VRF: enabled\nSSH connection limit: 50\nSSH per host connection limit: 20\nFIPS status: disabled\n\n"
],
"inputs": None,
"expected": {"result": "failure", "messages": ["SSHD status for Default VRF: enabled"]},
},
{
"name": "success",
"test": VerifySSHIPv4Acl,
@ -580,40 +604,6 @@ DATA: list[dict[str, Any]] = [
],
},
},
{
"name": "error-wrong-input-rsa",
"test": VerifyAPISSLCertificate,
"eos_data": [],
"inputs": {
"certificates": [
{
"certificate_name": "ARISTA_ROOT_CA.crt",
"expiry_threshold": 30,
"common_name": "Arista Networks Internal IT Root Cert Authority",
"encryption_algorithm": "RSA",
"key_size": 256,
},
]
},
"expected": {"result": "error", "messages": ["Allowed sizes are (2048, 3072, 4096)."]},
},
{
"name": "error-wrong-input-ecdsa",
"test": VerifyAPISSLCertificate,
"eos_data": [],
"inputs": {
"certificates": [
{
"certificate_name": "ARISTA_SIGNING_CA.crt",
"expiry_threshold": 30,
"common_name": "AristaIT-ICA ECDSA Issuing Cert Authority",
"encryption_algorithm": "ECDSA",
"key_size": 2048,
},
]
},
"expected": {"result": "error", "messages": ["Allowed sizes are (256, 384, 512)."]},
},
{
"name": "success",
"test": VerifyBannerLogin,
@ -1213,4 +1203,84 @@ DATA: list[dict[str, Any]] = [
],
},
},
{
"name": "success",
"test": VerifyHardwareEntropy,
"eos_data": [{"cpuModel": "2.20GHz", "cryptoModule": "Crypto Module v3.0", "hardwareEntropyEnabled": True, "blockedNetworkProtocols": []}],
"inputs": {},
"expected": {"result": "success"},
},
{
"name": "failure",
"test": VerifyHardwareEntropy,
"eos_data": [{"cpuModel": "2.20GHz", "cryptoModule": "Crypto Module v3.0", "hardwareEntropyEnabled": False, "blockedNetworkProtocols": []}],
"inputs": {},
"expected": {"result": "failure", "messages": ["Hardware entropy generation is disabled."]},
},
]
class TestAPISSLCertificate:
"""Test anta.tests.security.VerifyAPISSLCertificate.Input.APISSLCertificate."""
@pytest.mark.parametrize(
("model_params", "error"),
[
pytest.param(
{
"certificate_name": "ARISTA_ROOT_CA.crt",
"expiry_threshold": 30,
"common_name": "Arista Networks Internal IT Root Cert Authority",
"encryption_algorithm": "RSA",
"key_size": 256,
},
"Value error, `ARISTA_ROOT_CA.crt` key size 256 is invalid for RSA encryption. Allowed sizes are (2048, 3072, 4096).",
id="RSA_wrong_size",
),
pytest.param(
{
"certificate_name": "ARISTA_SIGNING_CA.crt",
"expiry_threshold": 30,
"common_name": "AristaIT-ICA ECDSA Issuing Cert Authority",
"encryption_algorithm": "ECDSA",
"key_size": 2048,
},
"Value error, `ARISTA_SIGNING_CA.crt` key size 2048 is invalid for ECDSA encryption. Allowed sizes are (256, 384, 512).",
id="ECDSA_wrong_size",
),
],
)
def test_invalid(self, model_params: dict[str, Any], error: str) -> None:
"""Test invalid inputs for anta.tests.security.VerifyAPISSLCertificate.Input.APISSLCertificate."""
with pytest.raises(ValidationError) as exec_info:
VerifyAPISSLCertificate.Input.APISSLCertificate.model_validate(model_params)
assert error == exec_info.value.errors()[0]["msg"]
@pytest.mark.parametrize(
"model_params",
[
pytest.param(
{
"certificate_name": "ARISTA_SIGNING_CA.crt",
"expiry_threshold": 30,
"common_name": "AristaIT-ICA ECDSA Issuing Cert Authority",
"encryption_algorithm": "ECDSA",
"key_size": 256,
},
id="ECDSA",
),
pytest.param(
{
"certificate_name": "ARISTA_ROOT_CA.crt",
"expiry_threshold": 30,
"common_name": "Arista Networks Internal IT Root Cert Authority",
"encryption_algorithm": "RSA",
"key_size": 4096,
},
id="RSA",
),
],
)
def test_valid(self, model_params: dict[str, Any]) -> None:
"""Test valid inputs for anta.tests.security.VerifyAPISSLCertificate.Input.APISSLCertificate."""
VerifyAPISSLCertificate.Input.APISSLCertificate.model_validate(model_params)

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.services import VerifyDNSLookup, VerifyDNSServers, VerifyErrdisableRecovery, VerifyHostname
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -7,8 +7,16 @@ from __future__ import annotations
from typing import Any
from anta.tests.snmp import VerifySnmpContact, VerifySnmpIPv4Acl, VerifySnmpIPv6Acl, VerifySnmpLocation, VerifySnmpStatus
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from anta.tests.snmp import (
VerifySnmpContact,
VerifySnmpErrorCounters,
VerifySnmpIPv4Acl,
VerifySnmpIPv6Acl,
VerifySnmpLocation,
VerifySnmpPDUCounters,
VerifySnmpStatus,
)
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -99,6 +107,20 @@ DATA: list[dict[str, Any]] = [
"messages": ["Expected `New York` as the location, but found `Europe` instead."],
},
},
{
"name": "failure-details-not-configured",
"test": VerifySnmpLocation,
"eos_data": [
{
"location": {"location": ""},
}
],
"inputs": {"location": "New York"},
"expected": {
"result": "failure",
"messages": ["SNMP location is not configured."],
},
},
{
"name": "success",
"test": VerifySnmpContact,
@ -124,4 +146,177 @@ DATA: list[dict[str, Any]] = [
"messages": ["Expected `Bob@example.com` as the contact, but found `Jon@example.com` instead."],
},
},
{
"name": "failure-details-not-configured",
"test": VerifySnmpContact,
"eos_data": [
{
"contact": {"contact": ""},
}
],
"inputs": {"contact": "Bob@example.com"},
"expected": {
"result": "failure",
"messages": ["SNMP contact is not configured."],
},
},
{
"name": "success",
"test": VerifySnmpPDUCounters,
"eos_data": [
{
"counters": {
"inGetPdus": 3,
"inGetNextPdus": 2,
"inSetPdus": 3,
"outGetResponsePdus": 3,
"outTrapPdus": 9,
},
}
],
"inputs": {},
"expected": {"result": "success"},
},
{
"name": "success-specific-pdus",
"test": VerifySnmpPDUCounters,
"eos_data": [
{
"counters": {
"inGetPdus": 3,
"inGetNextPdus": 0,
"inSetPdus": 0,
"outGetResponsePdus": 0,
"outTrapPdus": 9,
},
}
],
"inputs": {"pdus": ["inGetPdus", "outTrapPdus"]},
"expected": {"result": "success"},
},
{
"name": "failure-counters-not-found",
"test": VerifySnmpPDUCounters,
"eos_data": [
{
"counters": {},
}
],
"inputs": {},
"expected": {"result": "failure", "messages": ["SNMP counters not found."]},
},
{
"name": "failure-incorrect-counters",
"test": VerifySnmpPDUCounters,
"eos_data": [
{
"counters": {
"inGetPdus": 0,
"inGetNextPdus": 2,
"inSetPdus": 0,
"outGetResponsePdus": 3,
"outTrapPdus": 9,
},
}
],
"inputs": {},
"expected": {
"result": "failure",
"messages": ["The following SNMP PDU counters are not found or have zero PDU counters:\n{'inGetPdus': 0, 'inSetPdus': 0}"],
},
},
{
"name": "failure-pdu-not-found",
"test": VerifySnmpPDUCounters,
"eos_data": [
{
"counters": {
"inGetNextPdus": 0,
"inSetPdus": 0,
"outGetResponsePdus": 0,
},
}
],
"inputs": {"pdus": ["inGetPdus", "outTrapPdus"]},
"expected": {
"result": "failure",
"messages": ["The following SNMP PDU counters are not found or have zero PDU counters:\n{'inGetPdus': 'Not Found', 'outTrapPdus': 'Not Found'}"],
},
},
{
"name": "success",
"test": VerifySnmpErrorCounters,
"eos_data": [
{
"counters": {
"inVersionErrs": 0,
"inBadCommunityNames": 0,
"inBadCommunityUses": 0,
"inParseErrs": 0,
"outTooBigErrs": 0,
"outNoSuchNameErrs": 0,
"outBadValueErrs": 0,
"outGeneralErrs": 0,
},
}
],
"inputs": {},
"expected": {"result": "success"},
},
{
"name": "success-specific-counters",
"test": VerifySnmpErrorCounters,
"eos_data": [
{
"counters": {
"inVersionErrs": 0,
"inBadCommunityNames": 0,
"inBadCommunityUses": 0,
"inParseErrs": 0,
"outTooBigErrs": 5,
"outNoSuchNameErrs": 0,
"outBadValueErrs": 10,
"outGeneralErrs": 1,
},
}
],
"inputs": {"error_counters": ["inVersionErrs", "inParseErrs"]},
"expected": {"result": "success"},
},
{
"name": "failure-counters-not-found",
"test": VerifySnmpErrorCounters,
"eos_data": [
{
"counters": {},
}
],
"inputs": {},
"expected": {"result": "failure", "messages": ["SNMP counters not found."]},
},
{
"name": "failure-incorrect-counters",
"test": VerifySnmpErrorCounters,
"eos_data": [
{
"counters": {
"inVersionErrs": 1,
"inBadCommunityNames": 0,
"inBadCommunityUses": 0,
"inParseErrs": 2,
"outTooBigErrs": 0,
"outNoSuchNameErrs": 0,
"outBadValueErrs": 2,
"outGeneralErrs": 0,
},
}
],
"inputs": {},
"expected": {
"result": "failure",
"messages": [
"The following SNMP error counters are not found or have non-zero error counters:\n{'inVersionErrs': 1, 'inParseErrs': 2, 'outBadValueErrs': 2}"
],
},
},
]

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.software import VerifyEOSExtensions, VerifyEOSVersion, VerifyTerminAttrVersion
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -7,8 +7,8 @@ from __future__ import annotations
from typing import Any
from anta.tests.stp import VerifySTPBlockedPorts, VerifySTPCounters, VerifySTPForwardingPorts, VerifySTPMode, VerifySTPRootPriority
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from anta.tests.stp import VerifySTPBlockedPorts, VerifySTPCounters, VerifySTPForwardingPorts, VerifySTPMode, VerifySTPRootPriority, VerifyStpTopologyChanges
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -324,4 +324,166 @@ DATA: list[dict[str, Any]] = [
"inputs": {"priority": 32768, "instances": [10, 20, 30]},
"expected": {"result": "failure", "messages": ["The following instance(s) have the wrong STP root priority configured: ['VL20', 'VL30']"]},
},
{
"name": "success-mstp",
"test": VerifyStpTopologyChanges,
"eos_data": [
{
"unmappedVlans": [],
"topologies": {
"Cist": {
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1723990624.735365},
"Port-Channel5": {"state": "forwarding", "numChanges": 1, "lastChange": 1723990624.7353542},
}
},
"NoStp": {
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1723990624.735365},
"Ethernet1": {"state": "forwarding", "numChanges": 15, "lastChange": 1723990624.7353542},
}
},
},
},
],
"inputs": {"threshold": 10},
"expected": {"result": "success"},
},
{
"name": "success-rstp",
"test": VerifyStpTopologyChanges,
"eos_data": [
{
"unmappedVlans": [],
"topologies": {
"Cist": {
"interfaces": {
"Vxlan1": {"state": "forwarding", "numChanges": 1, "lastChange": 1723990624.735365},
"PeerEthernet3": {"state": "forwarding", "numChanges": 1, "lastChange": 1723990624.7353542},
}
},
"NoStp": {
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1723990624.735365},
"Ethernet1": {"state": "forwarding", "numChanges": 15, "lastChange": 1723990624.7353542},
}
},
},
},
],
"inputs": {"threshold": 10},
"expected": {"result": "success"},
},
{
"name": "success-rapid-pvst",
"test": VerifyStpTopologyChanges,
"eos_data": [
{
"unmappedVlans": [],
"topologies": {
"NoStp": {
"vlans": [4094, 4093, 1006],
"interfaces": {
"PeerEthernet2": {"state": "forwarding", "numChanges": 1, "lastChange": 1727151356.1330667},
},
},
"Vl1": {"vlans": [1], "interfaces": {"Port-Channel5": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0615358}}},
"Vl10": {
"vlans": [10],
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0673406},
"Vxlan1": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0677001},
"Port-Channel5": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0728855},
"Ethernet3": {"state": "forwarding", "numChanges": 3, "lastChange": 1727326730.255137},
},
},
"Vl1198": {
"vlans": [1198],
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.074386},
"Vxlan1": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0743902},
"Port-Channel5": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0743942},
},
},
"Vl1199": {
"vlans": [1199],
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0744},
"Vxlan1": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.07453},
"Port-Channel5": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.074535},
},
},
"Vl20": {
"vlans": [20],
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.073489},
"Vxlan1": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0743747},
"Port-Channel5": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0743794},
"Ethernet3": {"state": "forwarding", "numChanges": 3, "lastChange": 1727326730.2551405},
},
},
"Vl3009": {
"vlans": [3009],
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.074541},
"Port-Channel5": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0745454},
},
},
"Vl3019": {
"vlans": [3019],
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0745502},
"Port-Channel5": {"state": "forwarding", "numChanges": 1, "lastChange": 1727326710.0745537},
},
},
},
},
],
"inputs": {"threshold": 10},
"expected": {"result": "success"},
},
{
"name": "failure-unstable-topology",
"test": VerifyStpTopologyChanges,
"eos_data": [
{
"unmappedVlans": [],
"topologies": {
"Cist": {
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 15, "lastChange": 1723990624.735365},
"Port-Channel5": {"state": "forwarding", "numChanges": 15, "lastChange": 1723990624.7353542},
}
},
},
},
],
"inputs": {"threshold": 10},
"expected": {
"result": "failure",
"messages": [
"The following STP topologies are not configured or number of changes not within the threshold:\n"
"{'topologies': {'Cist': {'Cpu': {'Number of changes': 15}, 'Port-Channel5': {'Number of changes': 15}}}}"
],
},
},
{
"name": "failure-topologies-not-configured",
"test": VerifyStpTopologyChanges,
"eos_data": [
{
"unmappedVlans": [],
"topologies": {
"NoStp": {
"interfaces": {
"Cpu": {"state": "forwarding", "numChanges": 1, "lastChange": 1723990624.735365},
"Ethernet1": {"state": "forwarding", "numChanges": 15, "lastChange": 1723990624.7353542},
}
}
},
},
],
"inputs": {"threshold": 10},
"expected": {"result": "failure", "messages": ["STP is not configured."]},
},
]

View file

@ -7,8 +7,8 @@ from __future__ import annotations
from typing import Any
from anta.tests.stun import VerifyStunClient
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from anta.tests.stun import VerifyStunClient, VerifyStunServer
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -173,4 +173,61 @@ DATA: list[dict[str, Any]] = [
],
},
},
{
"name": "success",
"test": VerifyStunServer,
"eos_data": [
{
"enabled": True,
"pid": 1895,
}
],
"inputs": {},
"expected": {"result": "success"},
},
{
"name": "failure-disabled",
"test": VerifyStunServer,
"eos_data": [
{
"enabled": False,
"pid": 1895,
}
],
"inputs": {},
"expected": {
"result": "failure",
"messages": ["STUN server status is disabled."],
},
},
{
"name": "failure-not-running",
"test": VerifyStunServer,
"eos_data": [
{
"enabled": True,
"pid": 0,
}
],
"inputs": {},
"expected": {
"result": "failure",
"messages": ["STUN server is not running."],
},
},
{
"name": "failure-not-running-disabled",
"test": VerifyStunServer,
"eos_data": [
{
"enabled": False,
"pid": 0,
}
],
"inputs": {},
"expected": {
"result": "failure",
"messages": ["STUN server status is disabled and not running."],
},
},
]

View file

@ -14,10 +14,11 @@ from anta.tests.system import (
VerifyFileSystemUtilization,
VerifyMemoryUtilization,
VerifyNTP,
VerifyNTPAssociations,
VerifyReloadCause,
VerifyUptime,
)
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -75,13 +76,6 @@ DATA: list[dict[str, Any]] = [
"inputs": None,
"expected": {"result": "failure", "messages": ["Reload cause is: 'Reload after crash.'"]},
},
{
"name": "error",
"test": VerifyReloadCause,
"eos_data": [{}],
"inputs": None,
"expected": {"result": "error", "messages": ["No reload causes available"]},
},
{
"name": "success-without-minidump",
"test": VerifyCoredump,
@ -286,4 +280,186 @@ poll interval unknown
"inputs": None,
"expected": {"result": "failure", "messages": ["The device is not synchronized with the configured NTP server(s): 'unsynchronised'"]},
},
{
"name": "success",
"test": VerifyNTPAssociations,
"eos_data": [
{
"peers": {
"1.1.1.1": {
"condition": "sys.peer",
"peerIpAddr": "1.1.1.1",
"stratumLevel": 1,
},
"2.2.2.2": {
"condition": "candidate",
"peerIpAddr": "2.2.2.2",
"stratumLevel": 2,
},
"3.3.3.3": {
"condition": "candidate",
"peerIpAddr": "3.3.3.3",
"stratumLevel": 2,
},
}
}
],
"inputs": {
"ntp_servers": [
{"server_address": "1.1.1.1", "preferred": True, "stratum": 1},
{"server_address": "2.2.2.2", "stratum": 2},
{"server_address": "3.3.3.3", "stratum": 2},
]
},
"expected": {"result": "success"},
},
{
"name": "success-pool-name",
"test": VerifyNTPAssociations,
"eos_data": [
{
"peers": {
"1.ntp.networks.com": {
"condition": "sys.peer",
"peerIpAddr": "1.1.1.1",
"stratumLevel": 1,
},
"2.ntp.networks.com": {
"condition": "candidate",
"peerIpAddr": "2.2.2.2",
"stratumLevel": 2,
},
"3.ntp.networks.com": {
"condition": "candidate",
"peerIpAddr": "3.3.3.3",
"stratumLevel": 2,
},
}
}
],
"inputs": {
"ntp_servers": [
{"server_address": "1.ntp.networks.com", "preferred": True, "stratum": 1},
{"server_address": "2.ntp.networks.com", "stratum": 2},
{"server_address": "3.ntp.networks.com", "stratum": 2},
]
},
"expected": {"result": "success"},
},
{
"name": "failure",
"test": VerifyNTPAssociations,
"eos_data": [
{
"peers": {
"1.1.1.1": {
"condition": "candidate",
"peerIpAddr": "1.1.1.1",
"stratumLevel": 2,
},
"2.2.2.2": {
"condition": "sys.peer",
"peerIpAddr": "2.2.2.2",
"stratumLevel": 2,
},
"3.3.3.3": {
"condition": "sys.peer",
"peerIpAddr": "3.3.3.3",
"stratumLevel": 3,
},
}
}
],
"inputs": {
"ntp_servers": [
{"server_address": "1.1.1.1", "preferred": True, "stratum": 1},
{"server_address": "2.2.2.2", "stratum": 2},
{"server_address": "3.3.3.3", "stratum": 2},
]
},
"expected": {
"result": "failure",
"messages": [
"For NTP peer 1.1.1.1:\nExpected `sys.peer` as the condition, but found `candidate` instead.\nExpected `1` as the stratum, but found `2` instead.\n"
"For NTP peer 2.2.2.2:\nExpected `candidate` as the condition, but found `sys.peer` instead.\n"
"For NTP peer 3.3.3.3:\nExpected `candidate` as the condition, but found `sys.peer` instead.\nExpected `2` as the stratum, but found `3` instead."
],
},
},
{
"name": "failure-no-peers",
"test": VerifyNTPAssociations,
"eos_data": [{"peers": {}}],
"inputs": {
"ntp_servers": [
{"server_address": "1.1.1.1", "preferred": True, "stratum": 1},
{"server_address": "2.2.2.2", "stratum": 1},
{"server_address": "3.3.3.3", "stratum": 1},
]
},
"expected": {
"result": "failure",
"messages": ["None of NTP peers are not configured."],
},
},
{
"name": "failure-one-peer-not-found",
"test": VerifyNTPAssociations,
"eos_data": [
{
"peers": {
"1.1.1.1": {
"condition": "sys.peer",
"peerIpAddr": "1.1.1.1",
"stratumLevel": 1,
},
"2.2.2.2": {
"condition": "candidate",
"peerIpAddr": "2.2.2.2",
"stratumLevel": 1,
},
}
}
],
"inputs": {
"ntp_servers": [
{"server_address": "1.1.1.1", "preferred": True, "stratum": 1},
{"server_address": "2.2.2.2", "stratum": 1},
{"server_address": "3.3.3.3", "stratum": 1},
]
},
"expected": {
"result": "failure",
"messages": ["NTP peer 3.3.3.3 is not configured."],
},
},
{
"name": "failure-with-two-peers-not-found",
"test": VerifyNTPAssociations,
"eos_data": [
{
"peers": {
"1.1.1.1": {
"condition": "candidate",
"peerIpAddr": "1.1.1.1",
"stratumLevel": 1,
}
}
}
],
"inputs": {
"ntp_servers": [
{"server_address": "1.1.1.1", "preferred": True, "stratum": 1},
{"server_address": "2.2.2.2", "stratum": 1},
{"server_address": "3.3.3.3", "stratum": 1},
]
},
"expected": {
"result": "failure",
"messages": [
"For NTP peer 1.1.1.1:\nExpected `sys.peer` as the condition, but found `candidate` instead.\n"
"NTP peer 2.2.2.2 is not configured.\nNTP peer 3.3.3.3 is not configured."
],
},
},
]

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.vlan import VerifyVlanInternalPolicy
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{

View file

@ -8,7 +8,7 @@ from __future__ import annotations
from typing import Any
from anta.tests.vxlan import VerifyVxlan1ConnSettings, VerifyVxlan1Interface, VerifyVxlanConfigSanity, VerifyVxlanVniBinding, VerifyVxlanVtep
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
from tests.units.anta_tests import test
DATA: list[dict[str, Any]] = [
{
@ -26,21 +26,21 @@ DATA: list[dict[str, Any]] = [
"expected": {"result": "skipped", "messages": ["Vxlan1 interface is not configured"]},
},
{
"name": "failure",
"name": "failure-down-up",
"test": VerifyVxlan1Interface,
"eos_data": [{"interfaceDescriptions": {"Vxlan1": {"lineProtocolStatus": "down", "interfaceStatus": "up"}}}],
"inputs": None,
"expected": {"result": "failure", "messages": ["Vxlan1 interface is down/up"]},
},
{
"name": "failure",
"name": "failure-up-down",
"test": VerifyVxlan1Interface,
"eos_data": [{"interfaceDescriptions": {"Vxlan1": {"lineProtocolStatus": "up", "interfaceStatus": "down"}}}],
"inputs": None,
"expected": {"result": "failure", "messages": ["Vxlan1 interface is up/down"]},
},
{
"name": "failure",
"name": "failure-down-down",
"test": VerifyVxlan1Interface,
"eos_data": [{"interfaceDescriptions": {"Vxlan1": {"lineProtocolStatus": "down", "interfaceStatus": "down"}}}],
"inputs": None,

View file

@ -0,0 +1,4 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Unit tests for the asynceapi client package used by ANTA."""

View file

@ -0,0 +1,20 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Fixtures for the asynceapi client package."""
import pytest
from asynceapi import Device
@pytest.fixture
def asynceapi_device() -> Device:
"""Return an asynceapi Device instance."""
return Device(
host="localhost",
username="admin",
password="admin",
proto="https",
port=443,
)

View file

@ -0,0 +1,88 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Unit tests data for the asynceapi client package."""
SUCCESS_EAPI_RESPONSE = {
"jsonrpc": "2.0",
"id": "EapiExplorer-1",
"result": [
{
"mfgName": "Arista",
"modelName": "cEOSLab",
"hardwareRevision": "",
"serialNumber": "5E9D49D20F09DA471333DD835835FD1A",
"systemMacAddress": "00:1c:73:2e:7b:a3",
"hwMacAddress": "00:00:00:00:00:00",
"configMacAddress": "00:00:00:00:00:00",
"version": "4.31.1F-34554157.4311F (engineering build)",
"architecture": "i686",
"internalVersion": "4.31.1F-34554157.4311F",
"internalBuildId": "47114ca4-ae9f-4f32-8c1f-2864db93b7e8",
"imageFormatVersion": "1.0",
"imageOptimization": "None",
"cEosToolsVersion": "(unknown)",
"kernelVersion": "6.5.0-44-generic",
"bootupTimestamp": 1723429239.9352903,
"uptime": 1300202.749528885,
"memTotal": 65832112,
"memFree": 41610316,
"isIntlVersion": False,
},
{
"utcTime": 1724729442.6863558,
"timezone": "EST",
"localTime": {
"year": 2024,
"month": 8,
"dayOfMonth": 26,
"hour": 22,
"min": 30,
"sec": 42,
"dayOfWeek": 0,
"dayOfYear": 239,
"daylightSavingsAdjust": 0,
},
"clockSource": {"local": True},
},
],
}
"""Successful eAPI JSON response."""
ERROR_EAPI_RESPONSE = {
"jsonrpc": "2.0",
"id": "EapiExplorer-1",
"error": {
"code": 1002,
"message": "CLI command 2 of 3 'bad command' failed: invalid command",
"data": [
{
"mfgName": "Arista",
"modelName": "cEOSLab",
"hardwareRevision": "",
"serialNumber": "5E9D49D20F09DA471333DD835835FD1A",
"systemMacAddress": "00:1c:73:2e:7b:a3",
"hwMacAddress": "00:00:00:00:00:00",
"configMacAddress": "00:00:00:00:00:00",
"version": "4.31.1F-34554157.4311F (engineering build)",
"architecture": "i686",
"internalVersion": "4.31.1F-34554157.4311F",
"internalBuildId": "47114ca4-ae9f-4f32-8c1f-2864db93b7e8",
"imageFormatVersion": "1.0",
"imageOptimization": "None",
"cEosToolsVersion": "(unknown)",
"kernelVersion": "6.5.0-44-generic",
"bootupTimestamp": 1723429239.9352903,
"uptime": 1300027.2297976017,
"memTotal": 65832112,
"memFree": 41595080,
"isIntlVersion": False,
},
{"errors": ["Invalid input (at token 1: 'bad')"]},
],
},
}
"""Error eAPI JSON response."""
JSONRPC_REQUEST_TEMPLATE = {"jsonrpc": "2.0", "method": "runCmds", "params": {"version": 1, "cmds": [], "format": "json"}, "id": "EapiExplorer-1"}
"""Template for JSON-RPC eAPI request. `cmds` must be filled by the parametrize decorator."""

View file

@ -0,0 +1,85 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Unit tests the asynceapi.device module."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import pytest
from httpx import HTTPStatusError
from asynceapi import Device, EapiCommandError
from .test_data import ERROR_EAPI_RESPONSE, JSONRPC_REQUEST_TEMPLATE, SUCCESS_EAPI_RESPONSE
if TYPE_CHECKING:
from pytest_httpx import HTTPXMock
@pytest.mark.parametrize(
"cmds",
[
(["show version", "show clock"]),
([{"cmd": "show version"}, {"cmd": "show clock"}]),
([{"cmd": "show version"}, "show clock"]),
],
ids=["simple_commands", "complex_commands", "mixed_commands"],
)
async def test_jsonrpc_exec_success(
asynceapi_device: Device,
httpx_mock: HTTPXMock,
cmds: list[str | dict[str, Any]],
) -> None:
"""Test the Device.jsonrpc_exec method with a successful response. Simple and complex commands are tested."""
jsonrpc_request: dict[str, Any] = JSONRPC_REQUEST_TEMPLATE.copy()
jsonrpc_request["params"]["cmds"] = cmds
httpx_mock.add_response(json=SUCCESS_EAPI_RESPONSE)
result = await asynceapi_device.jsonrpc_exec(jsonrpc=jsonrpc_request)
assert result == SUCCESS_EAPI_RESPONSE["result"]
@pytest.mark.parametrize(
"cmds",
[
(["show version", "bad command", "show clock"]),
([{"cmd": "show version"}, {"cmd": "bad command"}, {"cmd": "show clock"}]),
([{"cmd": "show version"}, {"cmd": "bad command"}, "show clock"]),
],
ids=["simple_commands", "complex_commands", "mixed_commands"],
)
async def test_jsonrpc_exec_eapi_command_error(
asynceapi_device: Device,
httpx_mock: HTTPXMock,
cmds: list[str | dict[str, Any]],
) -> None:
"""Test the Device.jsonrpc_exec method with an error response. Simple and complex commands are tested."""
jsonrpc_request: dict[str, Any] = JSONRPC_REQUEST_TEMPLATE.copy()
jsonrpc_request["params"]["cmds"] = cmds
error_eapi_response: dict[str, Any] = ERROR_EAPI_RESPONSE.copy()
httpx_mock.add_response(json=error_eapi_response)
with pytest.raises(EapiCommandError) as exc_info:
await asynceapi_device.jsonrpc_exec(jsonrpc=jsonrpc_request)
assert exc_info.value.passed == [error_eapi_response["error"]["data"][0]]
assert exc_info.value.failed == "bad command"
assert exc_info.value.errors == ["Invalid input (at token 1: 'bad')"]
assert exc_info.value.errmsg == "CLI command 2 of 3 'bad command' failed: invalid command"
assert exc_info.value.not_exec == [jsonrpc_request["params"]["cmds"][2]]
async def test_jsonrpc_exec_http_status_error(asynceapi_device: Device, httpx_mock: HTTPXMock) -> None:
"""Test the Device.jsonrpc_exec method with an HTTPStatusError."""
jsonrpc_request: dict[str, Any] = JSONRPC_REQUEST_TEMPLATE.copy()
jsonrpc_request["params"]["cmds"] = ["show version"]
httpx_mock.add_response(status_code=500, text="Internal Server Error")
with pytest.raises(HTTPStatusError):
await asynceapi_device.jsonrpc_exec(jsonrpc=jsonrpc_request)

133
tests/units/cli/conftest.py Normal file
View file

@ -0,0 +1,133 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""See https://docs.pytest.org/en/stable/reference/fixtures.html#conftest-py-sharing-fixtures-across-multiple-files."""
from __future__ import annotations
import logging
import shutil
from typing import TYPE_CHECKING, Any
from unittest.mock import patch
import pytest
from click.testing import CliRunner, Result
import asynceapi
from anta.cli.console import console
if TYPE_CHECKING:
from collections.abc import Iterator
from pathlib import Path
logger = logging.getLogger(__name__)
MOCK_CLI_JSON: dict[str, asynceapi.EapiCommandError | dict[str, Any]] = {
"show version": {
"modelName": "DCS-7280CR3-32P4-F",
"version": "4.31.1F",
},
"enable": {},
"clear counters": {},
"clear hardware counter drop": {},
"undefined": asynceapi.EapiCommandError(
passed=[],
failed="show version",
errors=["Authorization denied for command 'show version'"],
errmsg="Invalid command",
not_exec=[],
),
}
MOCK_CLI_TEXT: dict[str, asynceapi.EapiCommandError | str] = {
"show version": "Arista cEOSLab",
"bash timeout 10 ls -1t /mnt/flash/schedule/tech-support": "dummy_tech-support_2023-12-01.1115.log.gz\ndummy_tech-support_2023-12-01.1015.log.gz",
"bash timeout 10 ls -1t /mnt/flash/schedule/tech-support | head -1": "dummy_tech-support_2023-12-01.1115.log.gz",
"show running-config | include aaa authorization exec default": "aaa authorization exec default local",
}
@pytest.fixture
def temp_env(anta_env: dict[str, str], tmp_path: Path) -> dict[str, str]:
"""Fixture that create a temporary ANTA inventory.
The inventory can be overridden and returns the corresponding environment variables.
"""
anta_inventory = str(anta_env["ANTA_INVENTORY"])
temp_inventory = tmp_path / "test_inventory.yml"
shutil.copy(anta_inventory, temp_inventory)
anta_env["ANTA_INVENTORY"] = str(temp_inventory)
return anta_env
@pytest.fixture
# Disabling C901 - too complex as we like our runner like this
def click_runner(capsys: pytest.CaptureFixture[str], anta_env: dict[str, str]) -> Iterator[CliRunner]: # noqa: C901
"""Return a click.CliRunner for cli testing."""
class AntaCliRunner(CliRunner):
"""Override CliRunner to inject specific variables for ANTA."""
def invoke(self, *args: Any, **kwargs: Any) -> Result: # noqa: ANN401
# Inject default env vars if not provided
kwargs["env"] = anta_env | kwargs.get("env", {})
# Deterministic terminal width
kwargs["env"]["COLUMNS"] = "165"
kwargs["auto_envvar_prefix"] = "ANTA"
# Way to fix https://github.com/pallets/click/issues/824
with capsys.disabled():
result = super().invoke(*args, **kwargs)
# disabling T201 as we want to print here
print("--- CLI Output ---") # noqa: T201
print(result.output) # noqa: T201
return result
def cli(
command: str | None = None,
commands: list[dict[str, Any]] | None = None,
ofmt: str = "json",
_version: int | str | None = "latest",
**_kwargs: Any, # noqa: ANN401
) -> dict[str, Any] | list[dict[str, Any]]:
def get_output(command: str | dict[str, Any]) -> dict[str, Any]:
if isinstance(command, dict):
command = command["cmd"]
mock_cli: dict[str, Any]
if ofmt == "json":
mock_cli = MOCK_CLI_JSON
elif ofmt == "text":
mock_cli = MOCK_CLI_TEXT
for mock_cmd, output in mock_cli.items():
if command == mock_cmd:
logger.info("Mocking command %s", mock_cmd)
if isinstance(output, asynceapi.EapiCommandError):
raise output
return output
message = f"Command '{command}' is not mocked"
logger.critical(message)
raise NotImplementedError(message)
res: dict[str, Any] | list[dict[str, Any]]
if command is not None:
logger.debug("Mock input %s", command)
res = get_output(command)
if commands is not None:
logger.debug("Mock input %s", commands)
res = list(map(get_output, commands))
logger.debug("Mock output %s", res)
return res
# Patch asynceapi methods used by AsyncEOSDevice. See tests/units/test_device.py
with (
patch("asynceapi.device.Device.check_connection", return_value=True),
patch("asynceapi.device.Device.cli", side_effect=cli),
patch("asyncssh.connect"),
patch(
"asyncssh.scp",
),
):
console._color_system = None
yield AntaCliRunner()

View file

@ -19,12 +19,12 @@ if TYPE_CHECKING:
@pytest.mark.parametrize(
("command", "ofmt", "version", "revision", "device", "failed"),
[
pytest.param("show version", "json", None, None, "dummy", False, id="json command"),
pytest.param("show version", "text", None, None, "dummy", False, id="text command"),
pytest.param("show version", None, "latest", None, "dummy", False, id="version-latest"),
pytest.param("show version", None, "1", None, "dummy", False, id="version"),
pytest.param("show version", None, None, 3, "dummy", False, id="revision"),
pytest.param("undefined", None, None, None, "dummy", True, id="command fails"),
pytest.param("show version", "json", None, None, "leaf1", False, id="json command"),
pytest.param("show version", "text", None, None, "leaf1", False, id="text command"),
pytest.param("show version", None, "latest", None, "leaf1", False, id="version-latest"),
pytest.param("show version", None, "1", None, "leaf1", False, id="version"),
pytest.param("show version", None, None, 3, "leaf1", False, id="revision"),
pytest.param("undefined", None, None, None, "leaf1", True, id="command fails"),
pytest.param("undefined", None, None, None, "doesnotexist", True, id="Device does not exist"),
],
)
@ -38,7 +38,6 @@ def test_run_cmd(
failed: bool,
) -> None:
"""Test `anta debug run-cmd`."""
# pylint: disable=too-many-arguments
cli_args = ["-l", "debug", "debug", "run-cmd", "--command", command, "--device", device]
# ofmt

View file

@ -5,17 +5,19 @@
from __future__ import annotations
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Any
from unittest.mock import call, patch
import pytest
import respx
from anta.cli.exec.utils import (
clear_counters,
)
from anta.cli.exec.utils import clear_counters, collect_commands
from anta.models import AntaCommand
from anta.tools import safe_command
# , collect_commands, collect_scheduled_show_tech
# collect_scheduled_show_tech
if TYPE_CHECKING:
from anta.device import AntaDevice
@ -23,55 +25,59 @@ if TYPE_CHECKING:
# TODO: complete test cases
@pytest.mark.asyncio()
@pytest.mark.parametrize(
("inventory_state", "per_device_command_output", "tags"),
("inventory", "inventory_state", "per_device_command_output", "tags"),
[
pytest.param(
{"count": 3},
{
"dummy": {"is_online": False},
"dummy2": {"is_online": False},
"dummy3": {"is_online": False},
"device-0": {"is_online": False},
"device-1": {"is_online": False},
"device-2": {"is_online": False},
},
{},
None,
id="no_connected_device",
),
pytest.param(
{"count": 3},
{
"dummy": {"is_online": True, "hw_model": "cEOSLab"},
"dummy2": {"is_online": True, "hw_model": "vEOS-lab"},
"dummy3": {"is_online": False},
"device-0": {"is_online": True, "hw_model": "cEOSLab"},
"device-1": {"is_online": True, "hw_model": "vEOS-lab"},
"device-2": {"is_online": False},
},
{},
None,
id="cEOSLab and vEOS-lab devices",
),
pytest.param(
{"count": 3},
{
"dummy": {"is_online": True},
"dummy2": {"is_online": True},
"dummy3": {"is_online": False},
"device-0": {"is_online": True},
"device-1": {"is_online": True},
"device-2": {"is_online": False},
},
{"dummy": None}, # None means the command failed to collect
{"device-0": None}, # None means the command failed to collect
None,
id="device with error",
),
pytest.param(
{"count": 3},
{
"dummy": {"is_online": True},
"dummy2": {"is_online": True},
"dummy3": {"is_online": True},
"device-0": {"is_online": True},
"device-1": {"is_online": True},
"device-2": {"is_online": True},
},
{},
["spine"],
id="tags",
),
],
indirect=["inventory"],
)
async def test_clear_counters(
caplog: pytest.LogCaptureFixture,
test_inventory: AntaInventory,
inventory: AntaInventory,
inventory_state: dict[str, Any],
per_device_command_output: dict[str, Any],
tags: set[str] | None,
@ -80,12 +86,12 @@ async def test_clear_counters(
async def mock_connect_inventory() -> None:
"""Mock connect_inventory coroutine."""
for name, device in test_inventory.items():
for name, device in inventory.items():
device.is_online = inventory_state[name].get("is_online", True)
device.established = inventory_state[name].get("established", device.is_online)
device.hw_model = inventory_state[name].get("hw_model", "dummy")
async def collect(self: AntaDevice, command: AntaCommand, *args: Any, **kwargs: Any) -> None: # noqa: ARG001, ANN401 #pylint: disable=unused-argument
async def collect(self: AntaDevice, command: AntaCommand, *args: Any, **kwargs: Any) -> None: # noqa: ARG001, ANN401
"""Mock collect coroutine."""
command.output = per_device_command_output.get(self.name, "")
@ -97,10 +103,10 @@ async def test_clear_counters(
side_effect=mock_connect_inventory,
) as mocked_connect_inventory,
):
await clear_counters(test_inventory, tags=tags)
await clear_counters(inventory, tags=tags)
mocked_connect_inventory.assert_awaited_once()
devices_established = test_inventory.get_inventory(established_only=True, tags=tags).devices
devices_established = inventory.get_inventory(established_only=True, tags=tags).devices
if devices_established:
# Building the list of calls
calls = []
@ -142,3 +148,172 @@ async def test_clear_counters(
assert f"Could not clear counters on device {key}: []" in caplog.text
else:
mocked_collect.assert_not_awaited()
# TODO: test with changing root_dir, test with failing to write (OSError)
@pytest.mark.parametrize(
("inventory", "inventory_state", "commands", "tags"),
[
pytest.param(
{"count": 1},
{
"device-0": {"is_online": False},
},
{"json_format": ["show version"]},
None,
id="no_connected_device",
),
pytest.param(
{"count": 3},
{
"device-0": {"is_online": True},
"device-1": {"is_online": True},
"device-2": {"is_online": False},
},
{"json_format": ["show version", "show ip interface brief"]},
None,
id="JSON commands",
),
pytest.param(
{"count": 3},
{
"device-0": {"is_online": True},
"device-1": {"is_online": True},
"device-2": {"is_online": False},
},
{"json_format": ["show version"], "text_format": ["show running-config", "show ip interface"]},
None,
id="Text commands",
),
pytest.param(
{"count": 2},
{
"device-0": {"is_online": True, "tags": {"spine"}},
"device-1": {"is_online": True},
},
{"json_format": ["show version"]},
{"spine"},
id="tags",
),
pytest.param( # TODO: This test should not be there we should catch the wrong user input with pydantic.
{"count": 1},
{
"device-0": {"is_online": True},
},
{"blah_format": ["42"]},
None,
id="bad-input",
),
pytest.param(
{"count": 1},
{
"device-0": {"is_online": True},
},
{"json_format": ["undefined command", "show version"]},
None,
id="command-failed-to-be-collected",
),
pytest.param(
{"count": 1},
{
"device-0": {"is_online": True},
},
{"json_format": ["uncaught exception"]},
None,
id="uncaught-exception",
),
],
indirect=["inventory"],
)
async def test_collect_commands(
caplog: pytest.LogCaptureFixture,
tmp_path: Path,
inventory: AntaInventory,
inventory_state: dict[str, Any],
commands: dict[str, list[str]],
tags: set[str] | None,
) -> None:
"""Test anta.cli.exec.utils.collect_commands."""
caplog.set_level(logging.INFO)
root_dir = tmp_path
async def mock_connect_inventory() -> None:
"""Mock connect_inventory coroutine."""
for name, device in inventory.items():
device.is_online = inventory_state[name].get("is_online", True)
device.established = inventory_state[name].get("established", device.is_online)
device.hw_model = inventory_state[name].get("hw_model", "dummy")
device.tags = inventory_state[name].get("tags", set())
# Need to patch the child device class
# ruff: noqa: C901
with (
respx.mock,
patch(
"anta.inventory.AntaInventory.connect_inventory",
side_effect=mock_connect_inventory,
) as mocked_connect_inventory,
):
# Mocking responses from devices
respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="show version").respond(
json={"result": [{"toto": 42}]}
)
respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="show ip interface brief").respond(
json={"result": [{"toto": 42}]}
)
respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="show running-config").respond(
json={"result": [{"output": "blah"}]}
)
respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="show ip interface").respond(
json={"result": [{"output": "blah"}]}
)
respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="undefined command").respond(
json={
"error": {
"code": 1002,
"message": "CLI command 1 of 1 'undefined command' failed: invalid command",
"data": [{"errors": ["Invalid input (at token 0: 'undefined')"]}],
}
}
)
await collect_commands(inventory, commands, root_dir, tags=tags)
mocked_connect_inventory.assert_awaited_once()
devices_established = inventory.get_inventory(established_only=True, tags=tags or None).devices
if not devices_established:
assert "INFO" in caplog.text
assert "No online device found. Exiting" in caplog.text
return
for device in devices_established:
# Verify tags selection
assert device.tags.intersection(tags) != {} if tags else True
json_path = root_dir / device.name / "json"
text_path = root_dir / device.name / "text"
if "json_format" in commands:
# Handle undefined command
if "undefined command" in commands["json_format"]:
assert "ERROR" in caplog.text
assert "Command 'undefined command' failed on device-0: Invalid input (at token 0: 'undefined')" in caplog.text
# Verify we don't claim it was collected
assert f"Collected command 'undefined command' from device {device.name}" not in caplog.text
commands["json_format"].remove("undefined command")
# Handle uncaught exception
elif "uncaught exception" in commands["json_format"]:
assert "ERROR" in caplog.text
assert "Error when collecting commands: " in caplog.text
# Verify we don't claim it was collected
assert f"Collected command 'uncaught exception' from device {device.name}" not in caplog.text
commands["json_format"].remove("uncaught exception")
assert json_path.is_dir()
assert len(list(Path.iterdir(json_path))) == len(commands["json_format"])
for command in commands["json_format"]:
assert Path.is_file(json_path / f"{safe_command(command)}.json")
assert f"Collected command '{command}' from device {device.name}" in caplog.text
if "text_format" in commands:
assert text_path.is_dir()
assert len(list(text_path.iterdir())) == len(commands["text_format"])
for command in commands["text_format"]:
assert Path.is_file(text_path / f"{safe_command(command)}.log")
assert f"Collected command '{command}' from device {device.name}" in caplog.text

View file

@ -42,7 +42,6 @@ def test_from_cvp(
cv_token_failure: bool,
cvp_connect_failure: bool,
) -> None:
# pylint: disable=too-many-arguments
# ruff: noqa: C901
"""Test `anta get from-cvp`.
@ -144,7 +143,6 @@ def test_from_ansible(
expected_exit: int,
expected_log: str | None,
) -> None:
# pylint: disable=too-many-arguments
"""Test `anta get from-ansible`.
This test verifies:
@ -230,7 +228,6 @@ def test_from_ansible_overwrite(
expected_exit: int,
expected_log: str | None,
) -> None:
# pylint: disable=too-many-arguments
"""Test `anta get from-ansible` overwrite mechanism.
The test uses a static ansible-inventory and output as these are tested in other functions

View file

@ -144,7 +144,6 @@ def test_create_inventory_from_ansible(
expected_inv_length: int,
) -> None:
"""Test anta.get.utils.create_inventory_from_ansible."""
# pylint: disable=R0913
target_file = tmp_path / "inventory.yml"
inventory_file_path = DATA_DIR / inventory_filename

View file

@ -9,7 +9,6 @@ from typing import TYPE_CHECKING
from anta.cli import anta
from anta.cli.utils import ExitCode
from tests.lib.utils import default_anta_env
if TYPE_CHECKING:
from click.testing import CliRunner
@ -49,10 +48,16 @@ def test_anta_nrfu_dry_run(click_runner: CliRunner) -> None:
assert "Dry-run" in result.output
def test_anta_nrfu_wrong_catalog_format(click_runner: CliRunner) -> None:
"""Test anta nrfu --dry-run, catalog is given via env."""
result = click_runner.invoke(anta, ["nrfu", "--dry-run", "--catalog-format", "toto"])
assert result.exit_code == ExitCode.USAGE_ERROR
assert "Invalid value for '--catalog-format': 'toto' is not one of 'yaml', 'json'." in result.output
def test_anta_password_required(click_runner: CliRunner) -> None:
"""Test that password is provided."""
env = default_anta_env()
env["ANTA_PASSWORD"] = None
env = {"ANTA_PASSWORD": None}
result = click_runner.invoke(anta, ["nrfu"], env=env)
assert result.exit_code == ExitCode.USAGE_ERROR
@ -61,8 +66,7 @@ def test_anta_password_required(click_runner: CliRunner) -> None:
def test_anta_password(click_runner: CliRunner) -> None:
"""Test that password can be provided either via --password or --prompt."""
env = default_anta_env()
env["ANTA_PASSWORD"] = None
env = {"ANTA_PASSWORD": None}
result = click_runner.invoke(anta, ["nrfu", "--password", "secret"], env=env)
assert result.exit_code == ExitCode.OK
result = click_runner.invoke(anta, ["nrfu", "--prompt"], input="password\npassword\n", env=env)
@ -113,3 +117,9 @@ def test_disable_cache(click_runner: CliRunner) -> None:
if "disable_cache" in line:
assert "True" in line
assert result.exit_code == ExitCode.OK
def test_hide(click_runner: CliRunner) -> None:
"""Test the `--hide` option of the `anta nrfu` command."""
result = click_runner.invoke(anta, ["nrfu", "--hide", "success", "text"])
assert "SUCCESS" not in result.output

View file

@ -8,7 +8,8 @@ from __future__ import annotations
import json
import re
from pathlib import Path
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Any
from unittest.mock import patch
from anta.cli import anta
from anta.cli.utils import ExitCode
@ -51,7 +52,7 @@ def test_anta_nrfu_table(click_runner: CliRunner) -> None:
"""Test anta nrfu, catalog is given via env."""
result = click_runner.invoke(anta, ["nrfu", "table"])
assert result.exit_code == ExitCode.OK
assert "dummy │ VerifyEOSVersion │ success" in result.output
assert "leaf1 │ VerifyEOSVersion │ success" in result.output
def test_anta_nrfu_table_group_by_device(click_runner: CliRunner) -> None:
@ -72,7 +73,7 @@ def test_anta_nrfu_text(click_runner: CliRunner) -> None:
"""Test anta nrfu, catalog is given via env."""
result = click_runner.invoke(anta, ["nrfu", "text"])
assert result.exit_code == ExitCode.OK
assert "dummy :: VerifyEOSVersion :: SUCCESS" in result.output
assert "leaf1 :: VerifyEOSVersion :: SUCCESS" in result.output
def test_anta_nrfu_json(click_runner: CliRunner) -> None:
@ -84,13 +85,113 @@ def test_anta_nrfu_json(click_runner: CliRunner) -> None:
assert match is not None
result_list = json.loads(match.group())
for res in result_list:
if res["name"] == "dummy":
if res["name"] == "leaf1":
assert res["test"] == "VerifyEOSVersion"
assert res["result"] == "success"
def test_anta_nrfu_json_output(click_runner: CliRunner, tmp_path: Path) -> None:
"""Test anta nrfu json with output file."""
json_output = tmp_path / "test.json"
result = click_runner.invoke(anta, ["nrfu", "json", "--output", str(json_output)])
# Making sure the output is not printed to stdout
match = re.search(r"\[\n {2}{[\s\S]+ {2}}\n\]", result.output)
assert match is None
assert result.exit_code == ExitCode.OK
assert "JSON results saved to" in result.output
assert json_output.exists()
def test_anta_nrfu_json_output_failure(click_runner: CliRunner, tmp_path: Path) -> None:
"""Test anta nrfu json with output file."""
json_output = tmp_path / "test.json"
original_open = Path.open
def mock_path_open(*args: Any, **kwargs: Any) -> Path: # noqa: ANN401
"""Mock Path.open only for the json_output file of this test."""
if args[0] == json_output:
msg = "Simulated OSError"
raise OSError(msg)
# If not the json_output file, call the original Path.open
return original_open(*args, **kwargs)
with patch("pathlib.Path.open", mock_path_open):
result = click_runner.invoke(anta, ["nrfu", "json", "--output", str(json_output)])
assert result.exit_code == ExitCode.USAGE_ERROR
assert "Failed to save JSON results to" in result.output
assert not json_output.exists()
def test_anta_nrfu_template(click_runner: CliRunner) -> None:
"""Test anta nrfu, catalog is given via env."""
result = click_runner.invoke(anta, ["nrfu", "tpl-report", "--template", str(DATA_DIR / "template.j2")])
assert result.exit_code == ExitCode.OK
assert "* VerifyEOSVersion is SUCCESS for dummy" in result.output
assert "* VerifyEOSVersion is SUCCESS for leaf1" in result.output
def test_anta_nrfu_csv(click_runner: CliRunner, tmp_path: Path) -> None:
"""Test anta nrfu csv."""
csv_output = tmp_path / "test.csv"
result = click_runner.invoke(anta, ["nrfu", "csv", "--csv-output", str(csv_output)])
assert result.exit_code == ExitCode.OK
assert "CSV report saved to" in result.output
assert csv_output.exists()
def test_anta_nrfu_csv_failure(click_runner: CliRunner, tmp_path: Path) -> None:
"""Test anta nrfu csv."""
csv_output = tmp_path / "test.csv"
with patch("anta.reporter.csv_reporter.ReportCsv.generate", side_effect=OSError()):
result = click_runner.invoke(anta, ["nrfu", "csv", "--csv-output", str(csv_output)])
assert result.exit_code == ExitCode.USAGE_ERROR
assert "Failed to save CSV report to" in result.output
assert not csv_output.exists()
def test_anta_nrfu_md_report(click_runner: CliRunner, tmp_path: Path) -> None:
"""Test anta nrfu md-report."""
md_output = tmp_path / "test.md"
result = click_runner.invoke(anta, ["nrfu", "md-report", "--md-output", str(md_output)])
assert result.exit_code == ExitCode.OK
assert "Markdown report saved to" in result.output
assert md_output.exists()
def test_anta_nrfu_md_report_failure(click_runner: CliRunner, tmp_path: Path) -> None:
"""Test anta nrfu md-report failure."""
md_output = tmp_path / "test.md"
with patch("anta.reporter.md_reporter.MDReportGenerator.generate", side_effect=OSError()):
result = click_runner.invoke(anta, ["nrfu", "md-report", "--md-output", str(md_output)])
assert result.exit_code == ExitCode.USAGE_ERROR
assert "Failed to save Markdown report to" in result.output
assert not md_output.exists()
def test_anta_nrfu_md_report_with_hide(click_runner: CliRunner, tmp_path: Path) -> None:
"""Test anta nrfu md-report with the `--hide` option."""
md_output = tmp_path / "test.md"
result = click_runner.invoke(anta, ["nrfu", "--hide", "success", "md-report", "--md-output", str(md_output)])
assert result.exit_code == ExitCode.OK
assert "Markdown report saved to" in result.output
assert md_output.exists()
with md_output.open("r", encoding="utf-8") as f:
content = f.read()
# Use regex to find the "Total Tests Success" value
match = re.search(r"\| (\d+) \| (\d+) \| \d+ \| \d+ \| \d+ \|", content)
assert match is not None
total_tests = int(match.group(1))
total_tests_success = int(match.group(2))
assert total_tests == 0
assert total_tests_success == 0

85
tests/units/conftest.py Normal file
View file

@ -0,0 +1,85 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""See https://docs.pytest.org/en/stable/reference/fixtures.html#conftest-py-sharing-fixtures-across-multiple-files."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any
from unittest.mock import patch
import pytest
import yaml
from anta.device import AntaDevice, AsyncEOSDevice
if TYPE_CHECKING:
from collections.abc import Iterator
from anta.models import AntaCommand
DEVICE_HW_MODEL = "pytest"
DEVICE_NAME = "pytest"
COMMAND_OUTPUT = "retrieved"
@pytest.fixture(name="anta_env")
def anta_env_fixture() -> dict[str, str]:
"""Return an ANTA environment for testing."""
return {
"ANTA_USERNAME": "anta",
"ANTA_PASSWORD": "formica",
"ANTA_INVENTORY": str(Path(__file__).parent.parent / "data" / "test_inventory_with_tags.yml"),
"ANTA_CATALOG": str(Path(__file__).parent.parent / "data" / "test_catalog.yml"),
}
@pytest.fixture
def device(request: pytest.FixtureRequest) -> Iterator[AntaDevice]:
"""Return an AntaDevice instance with mocked abstract method."""
def _collect(command: AntaCommand, *args: Any, **kwargs: Any) -> None: # noqa: ARG001, ANN401
command.output = COMMAND_OUTPUT
kwargs = {"name": DEVICE_NAME, "hw_model": DEVICE_HW_MODEL}
if hasattr(request, "param"):
# Fixture is parametrized indirectly
kwargs.update(request.param)
with patch.object(AntaDevice, "__abstractmethods__", set()), patch("anta.device.AntaDevice._collect", side_effect=_collect):
# AntaDevice constructor does not have hw_model argument
hw_model = kwargs.pop("hw_model")
dev = AntaDevice(**kwargs) # type: ignore[abstract, arg-type]
dev.hw_model = hw_model
yield dev
@pytest.fixture
def async_device(request: pytest.FixtureRequest) -> AsyncEOSDevice:
"""Return an AsyncEOSDevice instance."""
kwargs = {
"name": DEVICE_NAME,
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
}
if hasattr(request, "param"):
# Fixture is parametrized indirectly
kwargs.update(request.param)
return AsyncEOSDevice(**kwargs) # type: ignore[arg-type]
@pytest.fixture
def yaml_file(request: pytest.FixtureRequest, tmp_path: Path) -> Path:
"""Fixture to create a temporary YAML file and return the path.
Fixture is indirectly parametrized with the YAML file content.
"""
assert hasattr(request, "param")
file = tmp_path / "test_file.yaml"
assert isinstance(request.param, dict)
content: dict[str, Any] = request.param
file.write_text(yaml.dump(content, allow_unicode=True))
return file

View file

@ -0,0 +1,78 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""ANTA Inventory unit tests."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from pydantic import ValidationError
from anta.inventory import AntaInventory
from anta.inventory.exceptions import InventoryIncorrectSchemaError, InventoryRootKeyError
if TYPE_CHECKING:
from _pytest.mark.structures import ParameterSet
FILE_DIR: Path = Path(__file__).parent.parent.resolve() / "data" / "inventory"
INIT_VALID_PARAMS: list[ParameterSet] = [
pytest.param(
{"anta_inventory": {"hosts": [{"host": "192.168.0.17"}, {"host": "192.168.0.2"}, {"host": "my.awesome.host.com"}]}},
id="Inventory_with_host_only",
),
pytest.param({"anta_inventory": {"networks": [{"network": "192.168.0.0/24"}]}}, id="ValidInventory_with_networks_only"),
pytest.param(
{"anta_inventory": {"ranges": [{"start": "10.0.0.1", "end": "10.0.0.11"}, {"start": "10.0.0.101", "end": "10.0.0.111"}]}},
id="Inventory_with_ranges_only",
),
pytest.param(
{"anta_inventory": {"hosts": [{"host": "192.168.0.17", "port": 443}, {"host": "192.168.0.2", "port": 80}]}},
id="Inventory_with_host_port",
),
pytest.param(
{"anta_inventory": {"hosts": [{"host": "192.168.0.17", "tags": ["leaf"]}, {"host": "192.168.0.2", "tags": ["spine"]}]}},
id="Inventory_with_host_tags",
),
pytest.param({"anta_inventory": {"networks": [{"network": "192.168.0.0/24", "tags": ["leaf"]}]}}, id="ValidInventory_with_networks_tags"),
pytest.param(
{
"anta_inventory": {
"ranges": [{"start": "10.0.0.1", "end": "10.0.0.11", "tags": ["leaf"]}, {"start": "10.0.0.101", "end": "10.0.0.111", "tags": ["spine"]}]
}
},
id="Inventory_with_ranges_tags",
),
]
INIT_INVALID_PARAMS = [
pytest.param({"anta_inventory": {"hosts": [{"host": "192.168.0.17/32"}, {"host": "192.168.0.2"}]}}, id="Inventory_with_host_only"),
pytest.param({"anta_inventory": {"networks": [{"network": "192.168.42.0/8"}]}}, id="Inventory_wrong_network_bits"),
pytest.param({"anta_inventory": {"networks": [{"network": "toto"}]}}, id="Inventory_wrong_network"),
pytest.param({"anta_inventory": {"ranges": [{"start": "toto", "end": "192.168.42.42"}]}}, id="Inventory_wrong_range"),
pytest.param({"anta_inventory": {"ranges": [{"start": "fe80::cafe", "end": "192.168.42.42"}]}}, id="Inventory_wrong_range_type_mismatch"),
pytest.param(
{"inventory": {"ranges": [{"start": "10.0.0.1", "end": "10.0.0.11"}, {"start": "10.0.0.100", "end": "10.0.0.111"}]}},
id="Invalid_Root_Key",
),
]
class TestAntaInventory:
"""Tests for anta.inventory.AntaInventory."""
@pytest.mark.parametrize("yaml_file", INIT_VALID_PARAMS, indirect=["yaml_file"])
def test_parse_valid(self, yaml_file: Path) -> None:
"""Parse valid YAML file to create ANTA inventory."""
AntaInventory.parse(filename=yaml_file, username="arista", password="arista123")
@pytest.mark.parametrize("yaml_file", INIT_INVALID_PARAMS, indirect=["yaml_file"])
def test_parse_invalid(self, yaml_file: Path) -> None:
"""Parse invalid YAML file to create ANTA inventory."""
with pytest.raises((InventoryIncorrectSchemaError, InventoryRootKeyError, ValidationError)):
AntaInventory.parse(filename=yaml_file, username="arista", password="arista123")

View file

@ -1,82 +0,0 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""ANTA Inventory unit tests."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import pytest
import yaml
from pydantic import ValidationError
from anta.inventory import AntaInventory
from anta.inventory.exceptions import InventoryIncorrectSchemaError, InventoryRootKeyError
from tests.data.json_data import ANTA_INVENTORY_TESTS_INVALID, ANTA_INVENTORY_TESTS_VALID
from tests.lib.utils import generate_test_ids_dict
if TYPE_CHECKING:
from pathlib import Path
class TestAntaInventory:
"""Test AntaInventory class."""
def create_inventory(self, content: str, tmp_path: Path) -> str:
"""Create fakefs inventory file."""
tmp_inventory = tmp_path / "mydir/myfile"
tmp_inventory.parent.mkdir()
tmp_inventory.touch()
tmp_inventory.write_text(yaml.dump(content, allow_unicode=True))
return str(tmp_inventory)
def check_parameter(self, parameter: str, test_definition: dict[Any, Any]) -> bool:
"""Check if parameter is configured in testbed."""
return "parameters" in test_definition and parameter in test_definition["parameters"]
@pytest.mark.parametrize("test_definition", ANTA_INVENTORY_TESTS_VALID, ids=generate_test_ids_dict)
def test_init_valid(self, test_definition: dict[str, Any], tmp_path: Path) -> None:
"""Test class constructor with valid data.
Test structure:
---------------
{
'name': 'ValidInventory_with_host_only',
'input': {"anta_inventory":{"hosts":[{"host":"192.168.0.17"},{"host":"192.168.0.2"}]}},
'expected_result': 'valid',
'parameters': {
'ipaddress_in_scope': '192.168.0.17',
'ipaddress_out_of_scope': '192.168.1.1',
}
}
"""
inventory_file = self.create_inventory(content=test_definition["input"], tmp_path=tmp_path)
try:
AntaInventory.parse(filename=inventory_file, username="arista", password="arista123")
except ValidationError as exc:
raise AssertionError from exc
@pytest.mark.parametrize("test_definition", ANTA_INVENTORY_TESTS_INVALID, ids=generate_test_ids_dict)
def test_init_invalid(self, test_definition: dict[str, Any], tmp_path: Path) -> None:
"""Test class constructor with invalid data.
Test structure:
---------------
{
'name': 'ValidInventory_with_host_only',
'input': {"anta_inventory":{"hosts":[{"host":"192.168.0.17"},{"host":"192.168.0.2"}]}},
'expected_result': 'invalid',
'parameters': {
'ipaddress_in_scope': '192.168.0.17',
'ipaddress_out_of_scope': '192.168.1.1',
}
}
"""
inventory_file = self.create_inventory(content=test_definition["input"], tmp_path=tmp_path)
with pytest.raises((InventoryIncorrectSchemaError, InventoryRootKeyError, ValidationError)):
AntaInventory.parse(filename=inventory_file, username="arista", password="arista123")

View file

@ -5,387 +5,162 @@
from __future__ import annotations
import logging
from typing import Any
from typing import TYPE_CHECKING, Any
import pytest
from pydantic import ValidationError
from anta.device import AsyncEOSDevice
from anta.inventory.models import AntaInventoryHost, AntaInventoryInput, AntaInventoryNetwork, AntaInventoryRange
from tests.data.json_data import (
INVENTORY_DEVICE_MODEL_INVALID,
INVENTORY_DEVICE_MODEL_VALID,
INVENTORY_MODEL_HOST_CACHE,
INVENTORY_MODEL_HOST_INVALID,
INVENTORY_MODEL_HOST_VALID,
INVENTORY_MODEL_INVALID,
INVENTORY_MODEL_NETWORK_CACHE,
INVENTORY_MODEL_NETWORK_INVALID,
INVENTORY_MODEL_NETWORK_VALID,
INVENTORY_MODEL_RANGE_CACHE,
INVENTORY_MODEL_RANGE_INVALID,
INVENTORY_MODEL_RANGE_VALID,
INVENTORY_MODEL_VALID,
)
from tests.lib.utils import generate_test_ids_dict
from anta.inventory.models import AntaInventoryHost, AntaInventoryNetwork, AntaInventoryRange
if TYPE_CHECKING:
from _pytest.mark.structures import ParameterSet
INVENTORY_HOST_VALID_PARAMS: list[ParameterSet] = [
pytest.param(None, "1.1.1.1", None, None, None, id="IPv4"),
pytest.param(None, "fe80::cc62:a9ff:feef:932a", None, None, None, id="IPv6"),
pytest.param(None, "1.1.1.1", 666, None, None, id="IPv4_with_port"),
pytest.param(None, "1.1.1.1", None, None, True, id="cache_enabled"),
pytest.param(None, "1.1.1.1", None, None, False, id="cache_disabled"),
]
INVENTORY_HOST_INVALID_PARAMS: list[ParameterSet] = [
pytest.param(None, "1.1.1.1/32", None, None, False, id="IPv4_with_netmask"),
pytest.param(None, "1.1.1.1", 66666, None, False, id="IPv4_with_wrong_port"),
pytest.param(None, "fe80::cc62:a9ff:feef:932a/128", None, None, False, id="IPv6_with_netmask"),
pytest.param(None, "fe80::cc62:a9ff:feef:", None, None, False, id="invalid_IPv6"),
pytest.param(None, "@", None, None, False, id="special_char"),
pytest.param(None, "1.1.1.1", None, None, None, id="cache_is_None"),
]
INVENTORY_NETWORK_VALID_PARAMS: list[ParameterSet] = [
pytest.param("1.1.1.0/24", None, None, id="IPv4_subnet"),
pytest.param("2001:db8::/32", None, None, id="IPv6_subnet"),
pytest.param("1.1.1.0/24", None, False, id="cache_enabled"),
pytest.param("1.1.1.0/24", None, True, id="cache_disabled"),
]
INVENTORY_NETWORK_INVALID_PARAMS: list[ParameterSet] = [
pytest.param("1.1.1.0/17", None, False, id="IPv4_subnet"),
pytest.param("2001:db8::/16", None, False, id="IPv6_subnet"),
pytest.param("1.1.1.0/24", None, None, id="cache_is_None"),
]
INVENTORY_RANGE_VALID_PARAMS: list[ParameterSet] = [
pytest.param("10.1.0.1", "10.1.0.10", None, None, id="IPv4_range"),
pytest.param("10.1.0.1", "10.1.0.10", None, True, id="cache_enabled"),
pytest.param("10.1.0.1", "10.1.0.10", None, False, id="cache_disabled"),
]
INVENTORY_RANGE_INVALID_PARAMS: list[ParameterSet] = [
pytest.param("toto", "10.1.0.10", None, False, id="IPv4_range"),
pytest.param("10.1.0.1", "10.1.0.10", None, None, id="cache_is_None"),
]
INVENTORY_MODEL_VALID = [
{
"name": "Valid_Host_Only",
"input": {"hosts": [{"host": "192.168.0.17"}, {"host": "192.168.0.2"}]},
"expected_result": "valid",
},
{
"name": "Valid_Networks_Only",
"input": {"networks": [{"network": "192.168.0.0/16"}, {"network": "192.168.1.0/24"}]},
"expected_result": "valid",
},
{
"name": "Valid_Ranges_Only",
"input": {
"ranges": [
{"start": "10.1.0.1", "end": "10.1.0.10"},
{"start": "10.2.0.1", "end": "10.2.1.10"},
],
},
"expected_result": "valid",
},
]
INVENTORY_MODEL_INVALID = [
{
"name": "Host_with_Invalid_entry",
"input": {"hosts": [{"host": "192.168.0.17"}, {"host": "192.168.0.2/32"}]},
"expected_result": "invalid",
},
]
class TestInventoryUnitModels:
"""Test components of AntaInventoryInput model."""
class TestAntaInventoryHost:
"""Test anta.inventory.models.AntaInventoryHost."""
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_HOST_VALID, ids=generate_test_ids_dict)
def test_anta_inventory_host_valid(self, test_definition: dict[str, Any]) -> None:
"""Test host input model.
@pytest.mark.parametrize(("name", "host", "port", "tags", "disable_cache"), INVENTORY_HOST_VALID_PARAMS)
def test_valid(self, name: str, host: str, port: int, tags: set[str], disable_cache: bool | None) -> None:
"""Valid model parameters."""
params: dict[str, Any] = {"name": name, "host": host, "port": port, "tags": tags}
if disable_cache is not None:
params = params | {"disable_cache": disable_cache}
inventory_host = AntaInventoryHost.model_validate(params)
assert host == str(inventory_host.host)
assert port == inventory_host.port
assert name == inventory_host.name
assert tags == inventory_host.tags
if disable_cache is None:
# Check cache default value
assert inventory_host.disable_cache is False
else:
assert inventory_host.disable_cache == disable_cache
Test structure:
---------------
{
'name': 'ValidIPv4_Host',
'input': '1.1.1.1',
'expected_result': 'valid'
}
"""
try:
host_inventory = AntaInventoryHost(host=test_definition["input"])
except ValidationError as exc:
logging.warning("Error: %s", str(exc))
raise AssertionError from exc
assert test_definition["input"] == str(host_inventory.host)
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_HOST_INVALID, ids=generate_test_ids_dict)
def test_anta_inventory_host_invalid(self, test_definition: dict[str, Any]) -> None:
"""Test host input model.
Test structure:
---------------
{
'name': 'ValidIPv4_Host',
'input': '1.1.1.1/32',
'expected_result': 'invalid'
}
"""
@pytest.mark.parametrize(("name", "host", "port", "tags", "disable_cache"), INVENTORY_HOST_INVALID_PARAMS)
def test_invalid(self, name: str, host: str, port: int, tags: set[str], disable_cache: bool | None) -> None:
"""Invalid model parameters."""
with pytest.raises(ValidationError):
AntaInventoryHost(host=test_definition["input"])
AntaInventoryHost.model_validate({"name": name, "host": host, "port": port, "tags": tags, "disable_cache": disable_cache})
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_HOST_CACHE, ids=generate_test_ids_dict)
def test_anta_inventory_host_cache(self, test_definition: dict[str, Any]) -> None:
"""Test host disable_cache.
Test structure:
---------------
class TestAntaInventoryNetwork:
"""Test anta.inventory.models.AntaInventoryNetwork."""
{
'name': 'Cache',
'input': {"host": '1.1.1.1', "disable_cache": True},
'expected_result': True
}
"""
if "disable_cache" in test_definition["input"]:
host_inventory = AntaInventoryHost(host=test_definition["input"]["host"], disable_cache=test_definition["input"]["disable_cache"])
@pytest.mark.parametrize(("network", "tags", "disable_cache"), INVENTORY_NETWORK_VALID_PARAMS)
def test_valid(self, network: str, tags: set[str], disable_cache: bool | None) -> None:
"""Valid model parameters."""
params: dict[str, Any] = {"network": network, "tags": tags}
if disable_cache is not None:
params = params | {"disable_cache": disable_cache}
inventory_network = AntaInventoryNetwork.model_validate(params)
assert network == str(inventory_network.network)
assert tags == inventory_network.tags
if disable_cache is None:
# Check cache default value
assert inventory_network.disable_cache is False
else:
host_inventory = AntaInventoryHost(host=test_definition["input"]["host"])
assert test_definition["expected_result"] == host_inventory.disable_cache
assert inventory_network.disable_cache == disable_cache
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_NETWORK_VALID, ids=generate_test_ids_dict)
def test_anta_inventory_network_valid(self, test_definition: dict[str, Any]) -> None:
"""Test Network input model with valid data.
@pytest.mark.parametrize(("network", "tags", "disable_cache"), INVENTORY_NETWORK_INVALID_PARAMS)
def test_invalid(self, network: str, tags: set[str], disable_cache: bool | None) -> None:
"""Invalid model parameters."""
with pytest.raises(ValidationError):
AntaInventoryNetwork.model_validate({"network": network, "tags": tags, "disable_cache": disable_cache})
Test structure:
---------------
{
'name': 'ValidIPv4_Subnet',
'input': '1.1.1.0/24',
'expected_result': 'valid'
}
class TestAntaInventoryRange:
"""Test anta.inventory.models.AntaInventoryRange."""
"""
try:
network_inventory = AntaInventoryNetwork(network=test_definition["input"])
except ValidationError as exc:
logging.warning("Error: %s", str(exc))
raise AssertionError from exc
assert test_definition["input"] == str(network_inventory.network)
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_NETWORK_INVALID, ids=generate_test_ids_dict)
def test_anta_inventory_network_invalid(self, test_definition: dict[str, Any]) -> None:
"""Test Network input model with invalid data.
Test structure:
---------------
{
'name': 'ValidIPv4_Subnet',
'input': '1.1.1.0/16',
'expected_result': 'invalid'
}
"""
try:
AntaInventoryNetwork(network=test_definition["input"])
except ValidationError as exc:
logging.warning("Error: %s", str(exc))
@pytest.mark.parametrize(("start", "end", "tags", "disable_cache"), INVENTORY_RANGE_VALID_PARAMS)
def test_valid(self, start: str, end: str, tags: set[str], disable_cache: bool | None) -> None:
"""Valid model parameters."""
params: dict[str, Any] = {"start": start, "end": end, "tags": tags}
if disable_cache is not None:
params = params | {"disable_cache": disable_cache}
inventory_range = AntaInventoryRange.model_validate(params)
assert start == str(inventory_range.start)
assert end == str(inventory_range.end)
assert tags == inventory_range.tags
if disable_cache is None:
# Check cache default value
assert inventory_range.disable_cache is False
else:
raise AssertionError
assert inventory_range.disable_cache == disable_cache
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_NETWORK_CACHE, ids=generate_test_ids_dict)
def test_anta_inventory_network_cache(self, test_definition: dict[str, Any]) -> None:
"""Test network disable_cache.
Test structure:
---------------
{
'name': 'Cache',
'input': {"network": '1.1.1.1/24', "disable_cache": True},
'expected_result': True
}
"""
if "disable_cache" in test_definition["input"]:
network_inventory = AntaInventoryNetwork(network=test_definition["input"]["network"], disable_cache=test_definition["input"]["disable_cache"])
else:
network_inventory = AntaInventoryNetwork(network=test_definition["input"]["network"])
assert test_definition["expected_result"] == network_inventory.disable_cache
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_RANGE_VALID, ids=generate_test_ids_dict)
def test_anta_inventory_range_valid(self, test_definition: dict[str, Any]) -> None:
"""Test range input model.
Test structure:
---------------
{
'name': 'ValidIPv4_Range',
'input': {'start':'10.1.0.1', 'end':'10.1.0.10'},
'expected_result': 'valid'
}
"""
try:
range_inventory = AntaInventoryRange(
start=test_definition["input"]["start"],
end=test_definition["input"]["end"],
)
except ValidationError as exc:
logging.warning("Error: %s", str(exc))
raise AssertionError from exc
assert test_definition["input"]["start"] == str(range_inventory.start)
assert test_definition["input"]["end"] == str(range_inventory.end)
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_RANGE_INVALID, ids=generate_test_ids_dict)
def test_anta_inventory_range_invalid(self, test_definition: dict[str, Any]) -> None:
"""Test range input model.
Test structure:
---------------
{
'name': 'ValidIPv4_Range',
'input': {'start':'10.1.0.1', 'end':'10.1.0.10/32'},
'expected_result': 'invalid'
}
"""
try:
AntaInventoryRange(
start=test_definition["input"]["start"],
end=test_definition["input"]["end"],
)
except ValidationError as exc:
logging.warning("Error: %s", str(exc))
else:
raise AssertionError
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_RANGE_CACHE, ids=generate_test_ids_dict)
def test_anta_inventory_range_cache(self, test_definition: dict[str, Any]) -> None:
"""Test range disable_cache.
Test structure:
---------------
{
'name': 'Cache',
'input': {"start": '1.1.1.1', "end": "1.1.1.10", "disable_cache": True},
'expected_result': True
}
"""
if "disable_cache" in test_definition["input"]:
range_inventory = AntaInventoryRange(
start=test_definition["input"]["start"],
end=test_definition["input"]["end"],
disable_cache=test_definition["input"]["disable_cache"],
)
else:
range_inventory = AntaInventoryRange(start=test_definition["input"]["start"], end=test_definition["input"]["end"])
assert test_definition["expected_result"] == range_inventory.disable_cache
class TestAntaInventoryInputModel:
"""Unit test of AntaInventoryInput model."""
def test_inventory_input_structure(self) -> None:
"""Test inventory keys are those expected."""
inventory = AntaInventoryInput()
logging.info("Inventory keys are: %s", str(inventory.model_dump().keys()))
assert all(elem in inventory.model_dump() for elem in ["hosts", "networks", "ranges"])
@pytest.mark.parametrize("inventory_def", INVENTORY_MODEL_VALID, ids=generate_test_ids_dict)
def test_anta_inventory_intput_valid(self, inventory_def: dict[str, Any]) -> None:
"""Test loading valid data to inventory class.
Test structure:
---------------
{
"name": "Valid_Host_Only",
"input": {
"hosts": [
{
"host": "192.168.0.17"
},
{
"host": "192.168.0.2"
}
]
},
"expected_result": "valid"
}
"""
try:
inventory = AntaInventoryInput(**inventory_def["input"])
except ValidationError as exc:
logging.warning("Error: %s", str(exc))
raise AssertionError from exc
logging.info("Checking if all root keys are correctly lodaded")
assert all(elem in inventory.model_dump() for elem in inventory_def["input"])
@pytest.mark.parametrize("inventory_def", INVENTORY_MODEL_INVALID, ids=generate_test_ids_dict)
def test_anta_inventory_intput_invalid(self, inventory_def: dict[str, Any]) -> None:
"""Test loading invalid data to inventory class.
Test structure:
---------------
{
"name": "Valid_Host_Only",
"input": {
"hosts": [
{
"host": "192.168.0.17"
},
{
"host": "192.168.0.2/32"
}
]
},
"expected_result": "invalid"
}
"""
try:
if "hosts" in inventory_def["input"]:
logging.info(
"Loading %s into AntaInventoryInput hosts section",
str(inventory_def["input"]["hosts"]),
)
AntaInventoryInput(hosts=inventory_def["input"]["hosts"])
if "networks" in inventory_def["input"]:
logging.info(
"Loading %s into AntaInventoryInput networks section",
str(inventory_def["input"]["networks"]),
)
AntaInventoryInput(networks=inventory_def["input"]["networks"])
if "ranges" in inventory_def["input"]:
logging.info(
"Loading %s into AntaInventoryInput ranges section",
str(inventory_def["input"]["ranges"]),
)
AntaInventoryInput(ranges=inventory_def["input"]["ranges"])
except ValidationError as exc:
logging.warning("Error: %s", str(exc))
else:
raise AssertionError
class TestInventoryDeviceModel:
"""Unit test of InventoryDevice model."""
@pytest.mark.parametrize("test_definition", INVENTORY_DEVICE_MODEL_VALID, ids=generate_test_ids_dict)
def test_inventory_device_valid(self, test_definition: dict[str, Any]) -> None:
"""Test loading valid data to InventoryDevice class.
Test structure:
---------------
{
"name": "Valid_Inventory",
"input": [
{
'host': '1.1.1.1',
'username': 'arista',
'password': 'arista123!'
},
{
'host': '1.1.1.1',
'username': 'arista',
'password': 'arista123!'
}
],
"expected_result": "valid"
}
"""
if test_definition["expected_result"] == "invalid":
pytest.skip("Not concerned by the test")
try:
for entity in test_definition["input"]:
AsyncEOSDevice(**entity)
except TypeError as exc:
logging.warning("Error: %s", str(exc))
raise AssertionError from exc
@pytest.mark.parametrize("test_definition", INVENTORY_DEVICE_MODEL_INVALID, ids=generate_test_ids_dict)
def test_inventory_device_invalid(self, test_definition: dict[str, Any]) -> None:
"""Test loading invalid data to InventoryDevice class.
Test structure:
---------------
{
"name": "Valid_Inventory",
"input": [
{
'host': '1.1.1.1',
'username': 'arista',
'password': 'arista123!'
},
{
'host': '1.1.1.1',
'username': 'arista',
'password': 'arista123!'
}
],
"expected_result": "valid"
}
"""
if test_definition["expected_result"] == "valid":
pytest.skip("Not concerned by the test")
try:
for entity in test_definition["input"]:
AsyncEOSDevice(**entity)
except TypeError as exc:
logging.info("Error: %s", str(exc))
else:
raise AssertionError
@pytest.mark.parametrize(("start", "end", "tags", "disable_cache"), INVENTORY_RANGE_INVALID_PARAMS)
def test_invalid(self, start: str, end: str, tags: set[str], disable_cache: bool | None) -> None:
"""Invalid model parameters."""
with pytest.raises(ValidationError):
AntaInventoryRange.model_validate({"start": start, "end": end, "tags": tags, "disable_cache": disable_cache})

View file

@ -0,0 +1,8 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""See https://docs.pytest.org/en/stable/reference/fixtures.html#conftest-py-sharing-fixtures-across-multiple-files."""
from tests.units.result_manager.conftest import list_result_factory, result_manager, result_manager_factory, test_result_factory
__all__ = ["result_manager", "result_manager_factory", "list_result_factory", "test_result_factory"]

View file

@ -13,9 +13,9 @@ from rich.table import Table
from anta import RICH_COLOR_PALETTE
from anta.reporter import ReportJinja, ReportTable
from anta.result_manager.models import AntaTestStatus
if TYPE_CHECKING:
from anta.custom_types import TestStatus
from anta.result_manager import ResultManager
@ -47,7 +47,6 @@ class TestReportTable:
)
def test__split_list_to_txt_list(self, usr_list: list[str], delimiter: str | None, expected_output: str) -> None:
"""Test _split_list_to_txt_list."""
# pylint: disable=protected-access
report = ReportTable()
assert report._split_list_to_txt_list(usr_list, delimiter) == expected_output
@ -61,7 +60,6 @@ class TestReportTable:
)
def test__build_headers(self, headers: list[str]) -> None:
"""Test _build_headers."""
# pylint: disable=protected-access
report = ReportTable()
table = Table()
table_column_before = len(table.columns)
@ -73,17 +71,15 @@ class TestReportTable:
@pytest.mark.parametrize(
("status", "expected_status"),
[
pytest.param("unknown", "unknown", id="unknown status"),
pytest.param("unset", "[grey74]unset", id="unset status"),
pytest.param("skipped", "[bold orange4]skipped", id="skipped status"),
pytest.param("failure", "[bold red]failure", id="failure status"),
pytest.param("error", "[indian_red]error", id="error status"),
pytest.param("success", "[green4]success", id="success status"),
pytest.param(AntaTestStatus.UNSET, "[grey74]unset", id="unset status"),
pytest.param(AntaTestStatus.SKIPPED, "[bold orange4]skipped", id="skipped status"),
pytest.param(AntaTestStatus.FAILURE, "[bold red]failure", id="failure status"),
pytest.param(AntaTestStatus.ERROR, "[indian_red]error", id="error status"),
pytest.param(AntaTestStatus.SUCCESS, "[green4]success", id="success status"),
],
)
def test__color_result(self, status: TestStatus, expected_status: str) -> None:
def test__color_result(self, status: AntaTestStatus, expected_status: str) -> None:
"""Test _build_headers."""
# pylint: disable=protected-access
report = ReportTable()
assert report._color_result(status) == expected_status
@ -104,7 +100,6 @@ class TestReportTable:
expected_length: int,
) -> None:
"""Test report_all."""
# pylint: disable=too-many-arguments
manager = result_manager_factory(number_of_tests)
report = ReportTable()
@ -133,14 +128,13 @@ class TestReportTable:
expected_length: int,
) -> None:
"""Test report_summary_tests."""
# pylint: disable=too-many-arguments
# TODO: refactor this later... this is injecting double test results by modyfing the device name
# should be a fixture
manager = result_manager_factory(number_of_tests)
new_results = [result.model_copy() for result in manager.results]
for result in new_results:
result.name = "test_device"
result.result = "failure"
result.result = AntaTestStatus.FAILURE
report = ReportTable()
kwargs = {"tests": [test] if test is not None else None, "title": title}
@ -168,14 +162,13 @@ class TestReportTable:
expected_length: int,
) -> None:
"""Test report_summary_devices."""
# pylint: disable=too-many-arguments
# TODO: refactor this later... this is injecting double test results by modyfing the device name
# should be a fixture
manager = result_manager_factory(number_of_tests)
new_results = [result.model_copy() for result in manager.results]
for result in new_results:
result.name = dev or "test_device"
result.result = "failure"
result.result = AntaTestStatus.FAILURE
manager.results = new_results
report = ReportTable()

View file

@ -0,0 +1,94 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Test anta.report.csv_reporter.py."""
# pylint: disable=too-few-public-methods
import csv
import pathlib
from typing import Any, Callable
import pytest
from anta.reporter.csv_reporter import ReportCsv
from anta.result_manager import ResultManager
from anta.tools import convert_categories
class TestReportCsv:
"""Tester for ReportCsv class."""
def compare_csv_and_result(self, rows: list[Any], index: int, result_manager: ResultManager) -> None:
"""Compare CSV and TestResult."""
assert rows[index + 1][0] == result_manager.results[index].name
assert rows[index + 1][1] == result_manager.results[index].test
assert rows[index + 1][2] == result_manager.results[index].result
assert rows[index + 1][3] == ReportCsv().split_list_to_txt_list(result_manager.results[index].messages)
assert rows[index + 1][4] == result_manager.results[index].description
assert rows[index + 1][5] == ReportCsv().split_list_to_txt_list(convert_categories(result_manager.results[index].categories))
def test_report_csv_generate(
self,
result_manager_factory: Callable[[int], ResultManager],
tmp_path: pathlib.Path,
) -> None:
"""Test CSV reporter."""
max_test_entries = 10
# Create a temporary CSV file path
csv_filename = tmp_path / "test.csv"
# Create a ResultManager instance with dummy test results
result_manager = result_manager_factory(max_test_entries)
# Test usecase with list of messages
result_manager.results[0].messages = ["Message 1", "Message 2"]
# Test usecase with list of categories
result_manager.results[1].messages = ["Cat 1", "Cat 2"]
# Generate the CSV report
ReportCsv.generate(result_manager, csv_filename)
# Read the generated CSV file
with pathlib.Path.open(csv_filename, encoding="utf-8") as csvfile:
reader = csv.reader(csvfile, delimiter=",")
rows = list(reader)
# Assert the headers
assert rows[0] == [
ReportCsv.Headers.device,
ReportCsv.Headers.test_name,
ReportCsv.Headers.test_status,
ReportCsv.Headers.messages,
ReportCsv.Headers.description,
ReportCsv.Headers.categories,
]
# Assert the test result rows
for index in [0, max_test_entries - 1]:
self.compare_csv_and_result(rows, index, result_manager)
# Assert number of lines: Number of TestResults + CSV Headers
assert len(rows) == len(result_manager.results) + 1
def test_report_csv_generate_os_error(
self,
result_manager_factory: Callable[[int], ResultManager],
tmp_path: pathlib.Path,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test CSV reporter OSError."""
# Create a ResultManager instance with dummy test results
max_test_entries = 10
result_manager = result_manager_factory(max_test_entries)
# Create a temporary CSV file path and make tmp_path read_only
tmp_path.chmod(0o400)
csv_filename = tmp_path / "read_only.csv"
with pytest.raises(OSError, match="Permission denied"):
# Generate the CSV report
ReportCsv.generate(result_manager, csv_filename)
assert len(caplog.record_tuples) == 1
assert "OSError caught while writing the CSV file" in caplog.text

View file

@ -0,0 +1,54 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Test anta.reporter.md_reporter.py."""
from __future__ import annotations
from io import StringIO
from pathlib import Path
import pytest
from anta.reporter.md_reporter import MDReportBase, MDReportGenerator
from anta.result_manager import ResultManager
DATA_DIR: Path = Path(__file__).parent.parent.parent.resolve() / "data"
def test_md_report_generate(tmp_path: Path, result_manager: ResultManager) -> None:
"""Test the MDReportGenerator class."""
md_filename = tmp_path / "test.md"
expected_report = "test_md_report.md"
# Generate the Markdown report
MDReportGenerator.generate(result_manager, md_filename)
assert md_filename.exists()
# Load the existing Markdown report to compare with the generated one
with (DATA_DIR / expected_report).open("r", encoding="utf-8") as f:
expected_content = f.read()
# Check the content of the Markdown file
content = md_filename.read_text(encoding="utf-8")
assert content == expected_content
def test_md_report_base() -> None:
"""Test the MDReportBase class."""
class FakeMDReportBase(MDReportBase):
"""Fake MDReportBase class."""
def generate_section(self) -> None:
pass
results = ResultManager()
with StringIO() as mock_file:
report = FakeMDReportBase(mock_file, results)
assert report.generate_heading_name() == "Fake MD Report Base"
with pytest.raises(NotImplementedError, match="Subclasses should implement this method"):
report.generate_rows()

View file

@ -0,0 +1,85 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""See https://docs.pytest.org/en/stable/reference/fixtures.html#conftest-py-sharing-fixtures-across-multiple-files."""
import json
from pathlib import Path
from typing import Callable
import pytest
from anta.device import AntaDevice
from anta.result_manager import ResultManager
from anta.result_manager.models import TestResult
TEST_RESULTS: Path = Path(__file__).parent.resolve() / "test_files" / "test_md_report_results.json"
@pytest.fixture
def result_manager_factory(list_result_factory: Callable[[int], list[TestResult]]) -> Callable[[int], ResultManager]:
"""Return a ResultManager factory that takes as input a number of tests."""
# pylint: disable=redefined-outer-name
def _factory(number: int = 0) -> ResultManager:
"""Create a factory for list[TestResult] entry of size entries."""
result_manager = ResultManager()
result_manager.results = list_result_factory(number)
return result_manager
return _factory
@pytest.fixture
def result_manager() -> ResultManager:
"""Return a ResultManager with 30 random tests loaded from a JSON file.
Devices: DC1-SPINE1, DC1-LEAF1A
- Total tests: 30
- Success: 7
- Skipped: 2
- Failure: 19
- Error: 2
See `tests/units/result_manager/test_md_report_results.json` for details.
"""
manager = ResultManager()
with TEST_RESULTS.open("r", encoding="utf-8") as f:
results = json.load(f)
for result in results:
manager.add(TestResult(**result))
return manager
@pytest.fixture
def test_result_factory(device: AntaDevice) -> Callable[[int], TestResult]:
"""Return a anta.result_manager.models.TestResult object."""
# pylint: disable=redefined-outer-name
def _create(index: int = 0) -> TestResult:
"""Actual Factory."""
return TestResult(
name=device.name,
test=f"VerifyTest{index}",
categories=["test"],
description=f"Verifies Test {index}",
custom_field=None,
)
return _create
@pytest.fixture
def list_result_factory(test_result_factory: Callable[[int], TestResult]) -> Callable[[int], list[TestResult]]:
"""Return a list[TestResult] with 'size' TestResult instantiated using the test_result_factory fixture."""
# pylint: disable=redefined-outer-name
def _factory(size: int = 0) -> list[TestResult]:
"""Create a factory for list[TestResult] entry of size entries."""
return [test_result_factory(i) for i in range(size)]
return _factory

View file

@ -6,15 +6,16 @@
from __future__ import annotations
import json
import re
from contextlib import AbstractContextManager, nullcontext
from typing import TYPE_CHECKING, Callable
import pytest
from anta.result_manager import ResultManager, models
from anta.result_manager.models import AntaTestStatus
if TYPE_CHECKING:
from anta.custom_types import TestStatus
from anta.result_manager.models import TestResult
@ -55,7 +56,7 @@ class TestResultManager:
success_list = list_result_factory(3)
for test in success_list:
test.result = "success"
test.result = AntaTestStatus.SUCCESS
result_manager.results = success_list
json_res = result_manager.json
@ -71,6 +72,27 @@ class TestResultManager:
assert test.get("custom_field") is None
assert test.get("result") == "success"
def test_sorted_category_stats(self, list_result_factory: Callable[[int], list[TestResult]]) -> None:
"""Test ResultManager.sorted_category_stats."""
result_manager = ResultManager()
results = list_result_factory(4)
# Modify the categories to have a mix of different acronym categories
results[0].categories = ["ospf"]
results[1].categories = ["bgp"]
results[2].categories = ["vxlan"]
results[3].categories = ["system"]
result_manager.results = results
# Check the current categories order
expected_order = ["ospf", "bgp", "vxlan", "system"]
assert list(result_manager.category_stats.keys()) == expected_order
# Check the sorted categories order
expected_order = ["bgp", "ospf", "system", "vxlan"]
assert list(result_manager.sorted_category_stats.keys()) == expected_order
@pytest.mark.parametrize(
("starting_status", "test_status", "expected_status", "expected_raise"),
[
@ -119,29 +141,26 @@ class TestResultManager:
nullcontext(),
id="failure, add success",
),
pytest.param(
"unset", "unknown", None, pytest.raises(ValueError, match="Input should be 'unset', 'success', 'failure', 'error' or 'skipped'"), id="wrong status"
),
pytest.param("unset", "unknown", None, pytest.raises(ValueError, match="'unknown' is not a valid AntaTestStatus"), id="wrong status"),
],
)
def test_add(
self,
test_result_factory: Callable[[], TestResult],
starting_status: TestStatus,
test_status: TestStatus,
starting_status: str,
test_status: str,
expected_status: str,
expected_raise: AbstractContextManager[Exception],
) -> None:
# pylint: disable=too-many-arguments
"""Test ResultManager_update_status."""
result_manager = ResultManager()
result_manager.status = starting_status
result_manager.status = AntaTestStatus(starting_status)
assert result_manager.error_status is False
assert len(result_manager) == 0
test = test_result_factory()
test.result = test_status
with expected_raise:
test.result = AntaTestStatus(test_status)
result_manager.add(test)
if test_status == "error":
assert result_manager.error_status is True
@ -149,6 +168,91 @@ class TestResultManager:
assert result_manager.status == expected_status
assert len(result_manager) == 1
def test_add_clear_cache(self, result_manager: ResultManager, test_result_factory: Callable[[], TestResult]) -> None:
"""Test ResultManager.add and make sure the cache is reset after adding a new test."""
# Check the cache is empty
assert "results_by_status" not in result_manager.__dict__
# Access the cache
assert result_manager.get_total_results() == 30
# Check the cache is filled with the correct results count
assert "results_by_status" in result_manager.__dict__
assert sum(len(v) for v in result_manager.__dict__["results_by_status"].values()) == 30
# Add a new test
result_manager.add(result=test_result_factory())
# Check the cache has been reset
assert "results_by_status" not in result_manager.__dict__
# Access the cache again
assert result_manager.get_total_results() == 31
# Check the cache is filled again with the correct results count
assert "results_by_status" in result_manager.__dict__
assert sum(len(v) for v in result_manager.__dict__["results_by_status"].values()) == 31
def test_get_results(self, result_manager: ResultManager) -> None:
"""Test ResultManager.get_results."""
# Check for single status
success_results = result_manager.get_results(status={AntaTestStatus.SUCCESS})
assert len(success_results) == 7
assert all(r.result == "success" for r in success_results)
# Check for multiple statuses
failure_results = result_manager.get_results(status={AntaTestStatus.FAILURE, AntaTestStatus.ERROR})
assert len(failure_results) == 21
assert all(r.result in {"failure", "error"} for r in failure_results)
# Check all results
all_results = result_manager.get_results()
assert len(all_results) == 30
def test_get_results_sort_by(self, result_manager: ResultManager) -> None:
"""Test ResultManager.get_results with sort_by."""
# Check all results with sort_by result
all_results = result_manager.get_results(sort_by=["result"])
assert len(all_results) == 30
assert [r.result for r in all_results] == ["error"] * 2 + ["failure"] * 19 + ["skipped"] * 2 + ["success"] * 7
# Check all results with sort_by device (name)
all_results = result_manager.get_results(sort_by=["name"])
assert len(all_results) == 30
assert all_results[0].name == "DC1-LEAF1A"
assert all_results[-1].name == "DC1-SPINE1"
# Check multiple statuses with sort_by categories
success_skipped_results = result_manager.get_results(status={AntaTestStatus.SUCCESS, AntaTestStatus.SKIPPED}, sort_by=["categories"])
assert len(success_skipped_results) == 9
assert success_skipped_results[0].categories == ["Interfaces"]
assert success_skipped_results[-1].categories == ["VXLAN"]
# Check all results with bad sort_by
with pytest.raises(
ValueError,
match=re.escape(
"Invalid sort_by fields: ['bad_field']. Accepted fields are: ['name', 'test', 'categories', 'description', 'result', 'messages', 'custom_field']",
),
):
all_results = result_manager.get_results(sort_by=["bad_field"])
def test_get_total_results(self, result_manager: ResultManager) -> None:
"""Test ResultManager.get_total_results."""
# Test all results
assert result_manager.get_total_results() == 30
# Test single status
assert result_manager.get_total_results(status={AntaTestStatus.SUCCESS}) == 7
assert result_manager.get_total_results(status={AntaTestStatus.FAILURE}) == 19
assert result_manager.get_total_results(status={AntaTestStatus.ERROR}) == 2
assert result_manager.get_total_results(status={AntaTestStatus.SKIPPED}) == 2
# Test multiple statuses
assert result_manager.get_total_results(status={AntaTestStatus.SUCCESS, AntaTestStatus.FAILURE}) == 26
assert result_manager.get_total_results(status={AntaTestStatus.SUCCESS, AntaTestStatus.FAILURE, AntaTestStatus.ERROR}) == 28
assert result_manager.get_total_results(status={AntaTestStatus.SUCCESS, AntaTestStatus.FAILURE, AntaTestStatus.ERROR, AntaTestStatus.SKIPPED}) == 30
@pytest.mark.parametrize(
("status", "error_status", "ignore_error", "expected_status"),
[
@ -159,7 +263,7 @@ class TestResultManager:
)
def test_get_status(
self,
status: TestStatus,
status: AntaTestStatus,
error_status: bool,
ignore_error: bool,
expected_status: str,
@ -177,28 +281,28 @@ class TestResultManager:
success_list = list_result_factory(3)
for test in success_list:
test.result = "success"
test.result = AntaTestStatus.SUCCESS
result_manager.results = success_list
test = test_result_factory()
test.result = "failure"
test.result = AntaTestStatus.FAILURE
result_manager.add(test)
test = test_result_factory()
test.result = "error"
test.result = AntaTestStatus.ERROR
result_manager.add(test)
test = test_result_factory()
test.result = "skipped"
test.result = AntaTestStatus.SKIPPED
result_manager.add(test)
assert len(result_manager) == 6
assert len(result_manager.filter({"failure"})) == 5
assert len(result_manager.filter({"error"})) == 5
assert len(result_manager.filter({"skipped"})) == 5
assert len(result_manager.filter({"failure", "error"})) == 4
assert len(result_manager.filter({"failure", "error", "skipped"})) == 3
assert len(result_manager.filter({"success", "failure", "error", "skipped"})) == 0
assert len(result_manager.filter({AntaTestStatus.FAILURE})) == 5
assert len(result_manager.filter({AntaTestStatus.ERROR})) == 5
assert len(result_manager.filter({AntaTestStatus.SKIPPED})) == 5
assert len(result_manager.filter({AntaTestStatus.FAILURE, AntaTestStatus.ERROR})) == 4
assert len(result_manager.filter({AntaTestStatus.FAILURE, AntaTestStatus.ERROR, AntaTestStatus.SKIPPED})) == 3
assert len(result_manager.filter({AntaTestStatus.SUCCESS, AntaTestStatus.FAILURE, AntaTestStatus.ERROR, AntaTestStatus.SKIPPED})) == 0
def test_get_by_tests(self, test_result_factory: Callable[[], TestResult], result_manager_factory: Callable[[int], ResultManager]) -> None:
"""Test ResultManager.get_by_tests."""

View file

@ -0,0 +1,378 @@
[
{
"name": "DC1-SPINE1",
"test": "VerifyTacacsSourceIntf",
"categories": [
"AAA"
],
"description": "Verifies TACACS source-interface for a specified VRF.",
"result": "failure",
"messages": [
"Source-interface Management0 is not configured in VRF default"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyLLDPNeighbors",
"categories": [
"Connectivity"
],
"description": "Verifies that the provided LLDP neighbors are connected properly.",
"result": "failure",
"messages": [
"Wrong LLDP neighbor(s) on port(s):\n Ethernet1\n DC1-LEAF1A_Ethernet1\n Ethernet2\n DC1-LEAF1B_Ethernet1\nPort(s) not configured:\n Ethernet7"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyBGPPeerCount",
"categories": [
"BGP"
],
"description": "Verifies the count of BGP peers.",
"result": "failure",
"messages": [
"Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Not Configured', 'default': 'Expected: 3, Actual: 4'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Not Configured'}}, {'afi': 'evpn', 'vrfs': {'default': 'Expected: 2, Actual: 4'}}]"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifySTPMode",
"categories": [
"STP"
],
"description": "Verifies the configured STP mode for a provided list of VLAN(s).",
"result": "failure",
"messages": [
"STP mode 'rapidPvst' not configured for the following VLAN(s): [10, 20]"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifySnmpStatus",
"categories": [
"SNMP"
],
"description": "Verifies if the SNMP agent is enabled.",
"result": "failure",
"messages": [
"SNMP agent disabled in vrf default"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyRoutingTableEntry",
"categories": [
"Routing"
],
"description": "Verifies that the provided routes are present in the routing table of a specified VRF.",
"result": "failure",
"messages": [
"The following route(s) are missing from the routing table of VRF default: ['10.1.0.2']"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyInterfaceUtilization",
"categories": [
"Interfaces"
],
"description": "Verifies that the utilization of interfaces is below a certain threshold.",
"result": "success",
"messages": [],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyMlagStatus",
"categories": [
"MLAG"
],
"description": "Verifies the health status of the MLAG configuration.",
"result": "skipped",
"messages": [
"MLAG is disabled"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyVxlan1Interface",
"categories": [
"VXLAN"
],
"description": "Verifies the Vxlan1 interface status.",
"result": "skipped",
"messages": [
"Vxlan1 interface is not configured"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyBFDSpecificPeers",
"categories": [
"BFD"
],
"description": "Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF.",
"result": "failure",
"messages": [
"Following BFD peers are not configured, status is not up or remote disc is zero:\n{'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}}"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyNTP",
"categories": [
"System"
],
"description": "Verifies if NTP is synchronised.",
"result": "failure",
"messages": [
"The device is not synchronized with the configured NTP server(s): 'NTP is disabled.'"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyReachability",
"categories": [
"Connectivity"
],
"description": "Test the network reachability to one or many destination IP(s).",
"result": "error",
"messages": [
"ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyTelnetStatus",
"categories": [
"Security"
],
"description": "Verifies if Telnet is disabled in the default VRF.",
"result": "success",
"messages": [],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyEOSVersion",
"categories": [
"Software"
],
"description": "Verifies the EOS version of the device.",
"result": "failure",
"messages": [
"device is running version \"4.31.1F-34554157.4311F (engineering build)\" not in expected versions: ['4.25.4M', '4.26.1F']"
],
"custom_field": null
},
{
"name": "DC1-SPINE1",
"test": "VerifyHostname",
"categories": [
"Services"
],
"description": "Verifies the hostname of a device.",
"result": "failure",
"messages": [
"Expected `s1-spine1` as the hostname, but found `DC1-SPINE1` instead."
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyTacacsSourceIntf",
"categories": [
"AAA"
],
"description": "Verifies TACACS source-interface for a specified VRF.",
"result": "failure",
"messages": [
"Source-interface Management0 is not configured in VRF default"
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyLLDPNeighbors",
"categories": [
"Connectivity"
],
"description": "Verifies that the provided LLDP neighbors are connected properly.",
"result": "failure",
"messages": [
"Wrong LLDP neighbor(s) on port(s):\n Ethernet1\n DC1-SPINE1_Ethernet1\n Ethernet2\n DC1-SPINE2_Ethernet1\nPort(s) not configured:\n Ethernet7"
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyBGPPeerCount",
"categories": [
"BGP"
],
"description": "Verifies the count of BGP peers.",
"result": "failure",
"messages": [
"Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Expected: 2, Actual: 1'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Expected: 3, Actual: 0'}}]"
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifySTPMode",
"categories": [
"STP"
],
"description": "Verifies the configured STP mode for a provided list of VLAN(s).",
"result": "failure",
"messages": [
"Wrong STP mode configured for the following VLAN(s): [10, 20]"
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifySnmpStatus",
"categories": [
"SNMP"
],
"description": "Verifies if the SNMP agent is enabled.",
"result": "failure",
"messages": [
"SNMP agent disabled in vrf default"
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyRoutingTableEntry",
"categories": [
"Routing"
],
"description": "Verifies that the provided routes are present in the routing table of a specified VRF.",
"result": "success",
"messages": [],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyInterfaceUtilization",
"categories": [
"Interfaces"
],
"description": "Verifies that the utilization of interfaces is below a certain threshold.",
"result": "success",
"messages": [],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyMlagStatus",
"categories": [
"MLAG"
],
"description": "Verifies the health status of the MLAG configuration.",
"result": "success",
"messages": [],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyVxlan1Interface",
"categories": [
"VXLAN"
],
"description": "Verifies the Vxlan1 interface status.",
"result": "success",
"messages": [],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyBFDSpecificPeers",
"categories": [
"BFD"
],
"description": "Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF.",
"result": "failure",
"messages": [
"Following BFD peers are not configured, status is not up or remote disc is zero:\n{'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}}"
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyNTP",
"categories": [
"System"
],
"description": "Verifies if NTP is synchronised.",
"result": "failure",
"messages": [
"The device is not synchronized with the configured NTP server(s): 'NTP is disabled.'"
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyReachability",
"categories": [
"Connectivity"
],
"description": "Test the network reachability to one or many destination IP(s).",
"result": "error",
"messages": [
"ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1"
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyTelnetStatus",
"categories": [
"Security"
],
"description": "Verifies if Telnet is disabled in the default VRF.",
"result": "success",
"messages": [],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyEOSVersion",
"categories": [
"Software"
],
"description": "Verifies the EOS version of the device.",
"result": "failure",
"messages": [
"device is running version \"4.31.1F-34554157.4311F (engineering build)\" not in expected versions: ['4.25.4M', '4.26.1F']"
],
"custom_field": null
},
{
"name": "DC1-LEAF1A",
"test": "VerifyHostname",
"categories": [
"Services"
],
"description": "Verifies the hostname of a device.",
"result": "failure",
"messages": [
"Expected `s1-spine1` as the hostname, but found `DC1-LEAF1A` instead."
],
"custom_field": null
}
]

View file

@ -5,56 +5,65 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable
from typing import TYPE_CHECKING, Callable
import pytest
# Import as Result to avoid pytest collection
from tests.data.json_data import TEST_RESULT_SET_STATUS
from tests.lib.fixture import DEVICE_NAME
from tests.lib.utils import generate_test_ids_dict
from anta.result_manager.models import AntaTestStatus
from tests.units.conftest import DEVICE_NAME
if TYPE_CHECKING:
from _pytest.mark.structures import ParameterSet
# Import as Result to avoid pytest collection
from anta.result_manager.models import TestResult as Result
TEST_RESULT_SET_STATUS: list[ParameterSet] = [
pytest.param(AntaTestStatus.SUCCESS, "test success message", id="set_success"),
pytest.param(AntaTestStatus.ERROR, "test error message", id="set_error"),
pytest.param(AntaTestStatus.FAILURE, "test failure message", id="set_failure"),
pytest.param(AntaTestStatus.SKIPPED, "test skipped message", id="set_skipped"),
pytest.param(AntaTestStatus.UNSET, "test unset message", id="set_unset"),
]
class TestTestResultModels:
"""Test components of anta.result_manager.models."""
@pytest.mark.parametrize("data", TEST_RESULT_SET_STATUS, ids=generate_test_ids_dict)
def test__is_status_foo(self, test_result_factory: Callable[[int], Result], data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("target", "message"), TEST_RESULT_SET_STATUS)
def test__is_status_foo(self, test_result_factory: Callable[[int], Result], target: AntaTestStatus, message: str) -> None:
"""Test TestResult.is_foo methods."""
testresult = test_result_factory(1)
assert testresult.result == "unset"
assert testresult.result == AntaTestStatus.UNSET
assert len(testresult.messages) == 0
if data["target"] == "success":
testresult.is_success(data["message"])
assert testresult.result == data["target"]
assert data["message"] in testresult.messages
if data["target"] == "failure":
testresult.is_failure(data["message"])
assert testresult.result == data["target"]
assert data["message"] in testresult.messages
if data["target"] == "error":
testresult.is_error(data["message"])
assert testresult.result == data["target"]
assert data["message"] in testresult.messages
if data["target"] == "skipped":
testresult.is_skipped(data["message"])
assert testresult.result == data["target"]
assert data["message"] in testresult.messages
# no helper for unset, testing _set_status
if data["target"] == "unset":
testresult._set_status("unset", data["message"]) # pylint: disable=W0212
assert testresult.result == data["target"]
assert data["message"] in testresult.messages
if target == AntaTestStatus.SUCCESS:
testresult.is_success(message)
assert testresult.result == "success"
assert message in testresult.messages
if target == AntaTestStatus.FAILURE:
testresult.is_failure(message)
assert testresult.result == "failure"
assert message in testresult.messages
if target == AntaTestStatus.ERROR:
testresult.is_error(message)
assert testresult.result == "error"
assert message in testresult.messages
if target == AntaTestStatus.SKIPPED:
testresult.is_skipped(message)
assert testresult.result == "skipped"
assert message in testresult.messages
if target == AntaTestStatus.UNSET:
# no helper for unset, testing _set_status
testresult._set_status(AntaTestStatus.UNSET, message)
assert testresult.result == "unset"
assert message in testresult.messages
@pytest.mark.parametrize("data", TEST_RESULT_SET_STATUS, ids=generate_test_ids_dict)
def test____str__(self, test_result_factory: Callable[[int], Result], data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("target", "message"), TEST_RESULT_SET_STATUS)
def test____str__(self, test_result_factory: Callable[[int], Result], target: AntaTestStatus, message: str) -> None:
"""Test TestResult.__str__."""
testresult = test_result_factory(1)
assert testresult.result == "unset"
assert testresult.result == AntaTestStatus.UNSET
assert len(testresult.messages) == 0
testresult._set_status(data["target"], data["message"]) # pylint: disable=W0212
assert testresult.result == data["target"]
assert str(testresult) == f"Test 'VerifyTest1' (on '{DEVICE_NAME}'): Result '{data['target']}'\nMessages: {[data['message']]}"
testresult._set_status(target, message)
assert testresult.result == target
assert str(testresult) == f"Test 'VerifyTest1' (on '{DEVICE_NAME}'): Result '{target}'\nMessages: {[message]}"

View file

@ -5,8 +5,9 @@
from __future__ import annotations
from json import load as json_load
from pathlib import Path
from typing import Any
from typing import TYPE_CHECKING, Any, Literal
import pytest
from pydantic import ValidationError
@ -27,30 +28,25 @@ from anta.tests.system import (
VerifyReloadCause,
VerifyUptime,
)
from tests.lib.utils import generate_test_ids_list
from tests.units.test_models import FakeTestWithInput
# Test classes used as expected values
if TYPE_CHECKING:
from _pytest.mark.structures import ParameterSet
DATA_DIR: Path = Path(__file__).parent.parent.resolve() / "data"
INIT_CATALOG_DATA: list[dict[str, Any]] = [
{
"name": "test_catalog",
"filename": "test_catalog.yml",
"tests": [
(VerifyEOSVersion, VerifyEOSVersion.Input(versions=["4.31.1F"])),
],
},
{
"name": "test_catalog_with_tags",
"filename": "test_catalog_with_tags.yml",
"tests": [
INIT_CATALOG_PARAMS: list[ParameterSet] = [
pytest.param("test_catalog.yml", "yaml", [(VerifyEOSVersion, VerifyEOSVersion.Input(versions=["4.31.1F"]))], id="test_catalog_yaml"),
pytest.param("test_catalog.json", "json", [(VerifyEOSVersion, VerifyEOSVersion.Input(versions=["4.31.1F"]))], id="test_catalog_json"),
pytest.param(
"test_catalog_with_tags.yml",
"yaml",
[
(
VerifyUptime,
VerifyUptime.Input(
minimum=10,
filters=VerifyUptime.Input.Filters(tags={"fabric"}),
filters=VerifyUptime.Input.Filters(tags={"spine"}),
),
),
(
@ -60,176 +56,143 @@ INIT_CATALOG_DATA: list[dict[str, Any]] = [
filters=VerifyUptime.Input.Filters(tags={"leaf"}),
),
),
(VerifyReloadCause, {"filters": {"tags": ["leaf", "spine"]}}),
(VerifyReloadCause, {"filters": {"tags": ["spine", "leaf"]}}),
(VerifyCoredump, VerifyCoredump.Input()),
(VerifyAgentLogs, AntaTest.Input()),
(VerifyCPUUtilization, VerifyCPUUtilization.Input(filters=VerifyCPUUtilization.Input.Filters(tags={"leaf"}))),
(VerifyMemoryUtilization, VerifyMemoryUtilization.Input(filters=VerifyMemoryUtilization.Input.Filters(tags={"testdevice"}))),
(VerifyCPUUtilization, None),
(VerifyMemoryUtilization, None),
(VerifyFileSystemUtilization, None),
(VerifyNTP, {}),
(VerifyMlagStatus, None),
(VerifyL3MTU, {"mtu": 1500, "filters": {"tags": ["demo"]}}),
(VerifyMlagStatus, {"filters": {"tags": ["leaf"]}}),
(VerifyL3MTU, {"mtu": 1500, "filters": {"tags": ["spine"]}}),
],
},
{
"name": "test_empty_catalog",
"filename": "test_empty_catalog.yml",
"tests": [],
},
{
"name": "test_empty_dict_catalog",
"filename": "test_empty_dict_catalog.yml",
"tests": [],
},
id="test_catalog_with_tags",
),
pytest.param("test_empty_catalog.yml", "yaml", [], id="test_empty_catalog"),
pytest.param("test_empty_dict_catalog.yml", "yaml", [], id="test_empty_dict_catalog"),
]
CATALOG_PARSE_FAIL_DATA: list[dict[str, Any]] = [
{
"name": "undefined_tests",
"filename": "test_catalog_with_undefined_tests.yml",
"error": "FakeTest is not defined in Python module anta.tests.software",
},
{
"name": "undefined_module",
"filename": "test_catalog_with_undefined_module.yml",
"error": "Module named anta.tests.undefined cannot be imported",
},
{
"name": "undefined_module",
"filename": "test_catalog_with_undefined_module.yml",
"error": "Module named anta.tests.undefined cannot be imported",
},
{
"name": "syntax_error",
"filename": "test_catalog_with_syntax_error_module.yml",
"error": "Value error, Module named tests.data.syntax_error cannot be imported. Verify that the module exists and there is no Python syntax issues.",
},
{
"name": "undefined_module_nested",
"filename": "test_catalog_with_undefined_module_nested.yml",
"error": "Module named undefined from package anta.tests cannot be imported",
},
{
"name": "not_a_list",
"filename": "test_catalog_not_a_list.yml",
"error": "Value error, Syntax error when parsing: True\nIt must be a list of ANTA tests. Check the test catalog.",
},
{
"name": "test_definition_not_a_dict",
"filename": "test_catalog_test_definition_not_a_dict.yml",
"error": "Value error, Syntax error when parsing: VerifyEOSVersion\nIt must be a dictionary. Check the test catalog.",
},
{
"name": "test_definition_multiple_dicts",
"filename": "test_catalog_test_definition_multiple_dicts.yml",
"error": "Value error, Syntax error when parsing: {'VerifyEOSVersion': {'versions': ['4.25.4M', '4.26.1F']}, "
"'VerifyTerminAttrVersion': {'versions': ['4.25.4M']}}\nIt must be a dictionary with a single entry. Check the indentation in the test catalog.",
},
{"name": "wrong_type_after_parsing", "filename": "test_catalog_wrong_type.yml", "error": "must be a dict, got str"},
CATALOG_PARSE_FAIL_PARAMS: list[ParameterSet] = [
pytest.param(
"test_catalog_wrong_format.toto",
"toto",
"'toto' is not a valid format for an AntaCatalog file. Only 'yaml' and 'json' are supported.",
id="undefined_tests",
),
pytest.param("test_catalog_invalid_json.json", "json", "JSONDecodeError", id="invalid_json"),
pytest.param("test_catalog_with_undefined_tests.yml", "yaml", "FakeTest is not defined in Python module anta.tests.software", id="undefined_tests"),
pytest.param("test_catalog_with_undefined_module.yml", "yaml", "Module named anta.tests.undefined cannot be imported", id="undefined_module"),
pytest.param(
"test_catalog_with_syntax_error_module.yml",
"yaml",
"Value error, Module named tests.data.syntax_error cannot be imported. Verify that the module exists and there is no Python syntax issues.",
id="syntax_error",
),
pytest.param(
"test_catalog_with_undefined_module_nested.yml",
"yaml",
"Module named undefined from package anta.tests cannot be imported",
id="undefined_module_nested",
),
pytest.param(
"test_catalog_not_a_list.yml",
"yaml",
"Value error, Syntax error when parsing: True\nIt must be a list of ANTA tests. Check the test catalog.",
id="not_a_list",
),
pytest.param(
"test_catalog_test_definition_not_a_dict.yml",
"yaml",
"Value error, Syntax error when parsing: VerifyEOSVersion\nIt must be a dictionary. Check the test catalog.",
id="test_definition_not_a_dict",
),
pytest.param(
"test_catalog_test_definition_multiple_dicts.yml",
"yaml",
"Value error, Syntax error when parsing: {'VerifyEOSVersion': {'versions': ['4.25.4M', '4.26.1F']}, 'VerifyTerminAttrVersion': {'versions': ['4.25.4M']}}\n"
"It must be a dictionary with a single entry. Check the indentation in the test catalog.",
id="test_definition_multiple_dicts",
),
pytest.param("test_catalog_wrong_type.yml", "yaml", "must be a dict, got str", id="wrong_type_after_parsing"),
]
CATALOG_FROM_DICT_FAIL_DATA: list[dict[str, Any]] = [
{
"name": "undefined_tests",
"filename": "test_catalog_with_undefined_tests.yml",
"error": "FakeTest is not defined in Python module anta.tests.software",
},
{
"name": "wrong_type",
"filename": "test_catalog_wrong_type.yml",
"error": "Wrong input type for catalog data, must be a dict, got str",
},
CATALOG_FROM_DICT_FAIL_PARAMS: list[ParameterSet] = [
pytest.param("test_catalog_with_undefined_tests.yml", "FakeTest is not defined in Python module anta.tests.software", id="undefined_tests"),
pytest.param("test_catalog_wrong_type.yml", "Wrong input type for catalog data, must be a dict, got str", id="wrong_type"),
]
CATALOG_FROM_LIST_FAIL_DATA: list[dict[str, Any]] = [
{
"name": "wrong_inputs",
"tests": [
(
FakeTestWithInput,
AntaTest.Input(),
),
],
"error": "Test input has type AntaTest.Input but expected type FakeTestWithInput.Input",
},
{
"name": "no_test",
"tests": [(None, None)],
"error": "Input should be a subclass of AntaTest",
},
{
"name": "no_input_when_required",
"tests": [(FakeTestWithInput, None)],
"error": "FakeTestWithInput test inputs are not valid: 1 validation error for Input\n\tstring\n\t Field required",
},
{
"name": "wrong_input_type",
"tests": [(FakeTestWithInput, {"string": True})],
"error": "FakeTestWithInput test inputs are not valid: 1 validation error for Input\n\tstring\n\t Input should be a valid string",
},
CATALOG_FROM_LIST_FAIL_PARAMS: list[ParameterSet] = [
pytest.param([(FakeTestWithInput, AntaTest.Input())], "Test input has type AntaTest.Input but expected type FakeTestWithInput.Input", id="wrong_inputs"),
pytest.param([(None, None)], "Input should be a subclass of AntaTest", id="no_test"),
pytest.param(
[(FakeTestWithInput, None)],
"FakeTestWithInput test inputs are not valid: 1 validation error for Input\n\tstring\n\t Field required",
id="no_input_when_required",
),
pytest.param(
[(FakeTestWithInput, {"string": True})],
"FakeTestWithInput test inputs are not valid: 1 validation error for Input\n\tstring\n\t Input should be a valid string",
id="wrong_input_type",
),
]
TESTS_SETTER_FAIL_DATA: list[dict[str, Any]] = [
{
"name": "not_a_list",
"tests": "not_a_list",
"error": "The catalog must contain a list of tests",
},
{
"name": "not_a_list_of_test_definitions",
"tests": [42, 43],
"error": "A test in the catalog must be an AntaTestDefinition instance",
},
TESTS_SETTER_FAIL_PARAMS: list[ParameterSet] = [
pytest.param("not_a_list", "The catalog must contain a list of tests", id="not_a_list"),
pytest.param([42, 43], "A test in the catalog must be an AntaTestDefinition instance", id="not_a_list_of_test_definitions"),
]
class TestAntaCatalog:
"""Test for anta.catalog.AntaCatalog."""
"""Tests for anta.catalog.AntaCatalog."""
@pytest.mark.parametrize("catalog_data", INIT_CATALOG_DATA, ids=generate_test_ids_list(INIT_CATALOG_DATA))
def test_parse(self, catalog_data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("filename", "file_format", "tests"), INIT_CATALOG_PARAMS)
def test_parse(self, filename: str, file_format: Literal["yaml", "json"], tests: list[tuple[type[AntaTest], AntaTest.Input | dict[str, Any] | None]]) -> None:
"""Instantiate AntaCatalog from a file."""
catalog: AntaCatalog = AntaCatalog.parse(DATA_DIR / catalog_data["filename"])
catalog: AntaCatalog = AntaCatalog.parse(DATA_DIR / filename, file_format=file_format)
assert len(catalog.tests) == len(catalog_data["tests"])
for test_id, (test, inputs_data) in enumerate(catalog_data["tests"]):
assert len(catalog.tests) == len(tests)
for test_id, (test, inputs_data) in enumerate(tests):
assert catalog.tests[test_id].test == test
if inputs_data is not None:
inputs = test.Input(**inputs_data) if isinstance(inputs_data, dict) else inputs_data
assert inputs == catalog.tests[test_id].inputs
@pytest.mark.parametrize("catalog_data", INIT_CATALOG_DATA, ids=generate_test_ids_list(INIT_CATALOG_DATA))
def test_from_list(self, catalog_data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("filename", "file_format", "tests"), INIT_CATALOG_PARAMS)
def test_from_list(
self, filename: str, file_format: Literal["yaml", "json"], tests: list[tuple[type[AntaTest], AntaTest.Input | dict[str, Any] | None]]
) -> None:
"""Instantiate AntaCatalog from a list."""
catalog: AntaCatalog = AntaCatalog.from_list(catalog_data["tests"])
catalog: AntaCatalog = AntaCatalog.from_list(tests)
assert len(catalog.tests) == len(catalog_data["tests"])
for test_id, (test, inputs_data) in enumerate(catalog_data["tests"]):
assert len(catalog.tests) == len(tests)
for test_id, (test, inputs_data) in enumerate(tests):
assert catalog.tests[test_id].test == test
if inputs_data is not None:
inputs = test.Input(**inputs_data) if isinstance(inputs_data, dict) else inputs_data
assert inputs == catalog.tests[test_id].inputs
@pytest.mark.parametrize("catalog_data", INIT_CATALOG_DATA, ids=generate_test_ids_list(INIT_CATALOG_DATA))
def test_from_dict(self, catalog_data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("filename", "file_format", "tests"), INIT_CATALOG_PARAMS)
def test_from_dict(
self, filename: str, file_format: Literal["yaml", "json"], tests: list[tuple[type[AntaTest], AntaTest.Input | dict[str, Any] | None]]
) -> None:
"""Instantiate AntaCatalog from a dict."""
file = DATA_DIR / catalog_data["filename"]
with file.open(encoding="UTF-8") as file:
data = safe_load(file)
file = DATA_DIR / filename
with file.open(encoding="UTF-8") as f:
data = safe_load(f) if file_format == "yaml" else json_load(f)
catalog: AntaCatalog = AntaCatalog.from_dict(data)
assert len(catalog.tests) == len(catalog_data["tests"])
for test_id, (test, inputs_data) in enumerate(catalog_data["tests"]):
assert len(catalog.tests) == len(tests)
for test_id, (test, inputs_data) in enumerate(tests):
assert catalog.tests[test_id].test == test
if inputs_data is not None:
inputs = test.Input(**inputs_data) if isinstance(inputs_data, dict) else inputs_data
assert inputs == catalog.tests[test_id].inputs
@pytest.mark.parametrize("catalog_data", CATALOG_PARSE_FAIL_DATA, ids=generate_test_ids_list(CATALOG_PARSE_FAIL_DATA))
def test_parse_fail(self, catalog_data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("filename", "file_format", "error"), CATALOG_PARSE_FAIL_PARAMS)
def test_parse_fail(self, filename: str, file_format: Literal["yaml", "json"], error: str) -> None:
"""Errors when instantiating AntaCatalog from a file."""
with pytest.raises((ValidationError, TypeError)) as exec_info:
AntaCatalog.parse(DATA_DIR / catalog_data["filename"])
with pytest.raises((ValidationError, TypeError, ValueError, OSError)) as exec_info:
AntaCatalog.parse(DATA_DIR / filename, file_format=file_format)
if isinstance(exec_info.value, ValidationError):
assert catalog_data["error"] in exec_info.value.errors()[0]["msg"]
assert error in exec_info.value.errors()[0]["msg"]
else:
assert catalog_data["error"] in str(exec_info)
assert error in str(exec_info)
def test_parse_fail_parsing(self, caplog: pytest.LogCaptureFixture) -> None:
"""Errors when instantiating AntaCatalog from a file."""
@ -241,25 +204,25 @@ class TestAntaCatalog:
assert "Unable to parse ANTA Test Catalog file" in message
assert "FileNotFoundError: [Errno 2] No such file or directory" in message
@pytest.mark.parametrize("catalog_data", CATALOG_FROM_LIST_FAIL_DATA, ids=generate_test_ids_list(CATALOG_FROM_LIST_FAIL_DATA))
def test_from_list_fail(self, catalog_data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("tests", "error"), CATALOG_FROM_LIST_FAIL_PARAMS)
def test_from_list_fail(self, tests: list[tuple[type[AntaTest], AntaTest.Input | dict[str, Any] | None]], error: str) -> None:
"""Errors when instantiating AntaCatalog from a list of tuples."""
with pytest.raises(ValidationError) as exec_info:
AntaCatalog.from_list(catalog_data["tests"])
assert catalog_data["error"] in exec_info.value.errors()[0]["msg"]
AntaCatalog.from_list(tests)
assert error in exec_info.value.errors()[0]["msg"]
@pytest.mark.parametrize("catalog_data", CATALOG_FROM_DICT_FAIL_DATA, ids=generate_test_ids_list(CATALOG_FROM_DICT_FAIL_DATA))
def test_from_dict_fail(self, catalog_data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("filename", "error"), CATALOG_FROM_DICT_FAIL_PARAMS)
def test_from_dict_fail(self, filename: str, error: str) -> None:
"""Errors when instantiating AntaCatalog from a list of tuples."""
file = DATA_DIR / catalog_data["filename"]
with file.open(encoding="UTF-8") as file:
data = safe_load(file)
file = DATA_DIR / filename
with file.open(encoding="UTF-8") as f:
data = safe_load(f)
with pytest.raises((ValidationError, TypeError)) as exec_info:
AntaCatalog.from_dict(data)
if isinstance(exec_info.value, ValidationError):
assert catalog_data["error"] in exec_info.value.errors()[0]["msg"]
assert error in exec_info.value.errors()[0]["msg"]
else:
assert catalog_data["error"] in str(exec_info)
assert error in str(exec_info)
def test_filename(self) -> None:
"""Test filename."""
@ -268,34 +231,39 @@ class TestAntaCatalog:
catalog = AntaCatalog(filename=Path("test"))
assert catalog.filename == Path("test")
@pytest.mark.parametrize("catalog_data", INIT_CATALOG_DATA, ids=generate_test_ids_list(INIT_CATALOG_DATA))
def test__tests_setter_success(self, catalog_data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("filename", "file_format", "tests"), INIT_CATALOG_PARAMS)
def test__tests_setter_success(
self,
filename: str,
file_format: Literal["yaml", "json"],
tests: list[tuple[type[AntaTest], AntaTest.Input | dict[str, Any] | None]],
) -> None:
"""Success when setting AntaCatalog.tests from a list of tuples."""
catalog = AntaCatalog()
catalog.tests = [AntaTestDefinition(test=test, inputs=inputs) for test, inputs in catalog_data["tests"]]
assert len(catalog.tests) == len(catalog_data["tests"])
for test_id, (test, inputs_data) in enumerate(catalog_data["tests"]):
catalog.tests = [AntaTestDefinition(test=test, inputs=inputs) for test, inputs in tests]
assert len(catalog.tests) == len(tests)
for test_id, (test, inputs_data) in enumerate(tests):
assert catalog.tests[test_id].test == test
if inputs_data is not None:
inputs = test.Input(**inputs_data) if isinstance(inputs_data, dict) else inputs_data
assert inputs == catalog.tests[test_id].inputs
@pytest.mark.parametrize("catalog_data", TESTS_SETTER_FAIL_DATA, ids=generate_test_ids_list(TESTS_SETTER_FAIL_DATA))
def test__tests_setter_fail(self, catalog_data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("tests", "error"), TESTS_SETTER_FAIL_PARAMS)
def test__tests_setter_fail(self, tests: list[Any], error: str) -> None:
"""Errors when setting AntaCatalog.tests from a list of tuples."""
catalog = AntaCatalog()
with pytest.raises(TypeError) as exec_info:
catalog.tests = catalog_data["tests"]
assert catalog_data["error"] in str(exec_info)
catalog.tests = tests
assert error in str(exec_info)
def test_build_indexes_all(self) -> None:
"""Test AntaCatalog.build_indexes()."""
catalog: AntaCatalog = AntaCatalog.parse(DATA_DIR / "test_catalog_with_tags.yml")
catalog.build_indexes()
assert len(catalog.tests_without_tags) == 5
assert len(catalog.tag_to_tests[None]) == 6
assert "leaf" in catalog.tag_to_tests
assert len(catalog.tag_to_tests["leaf"]) == 3
all_unique_tests = catalog.tests_without_tags
all_unique_tests = catalog.tag_to_tests[None]
for tests in catalog.tag_to_tests.values():
all_unique_tests.update(tests)
assert len(all_unique_tests) == 11
@ -307,8 +275,8 @@ class TestAntaCatalog:
catalog.build_indexes({"VerifyUptime", "VerifyCoredump", "VerifyL3MTU"})
assert "leaf" in catalog.tag_to_tests
assert len(catalog.tag_to_tests["leaf"]) == 1
assert len(catalog.tests_without_tags) == 1
all_unique_tests = catalog.tests_without_tags
assert len(catalog.tag_to_tests[None]) == 1
all_unique_tests = catalog.tag_to_tests[None]
for tests in catalog.tag_to_tests.values():
all_unique_tests.update(tests)
assert len(all_unique_tests) == 4
@ -323,6 +291,17 @@ class TestAntaCatalog:
tests = catalog.get_tests_by_tags(tags={"leaf", "spine"}, strict=True)
assert len(tests) == 1
def test_merge_catalogs(self) -> None:
"""Test the merge_catalogs function."""
# Load catalogs of different sizes
small_catalog = AntaCatalog.parse(DATA_DIR / "test_catalog.yml")
medium_catalog = AntaCatalog.parse(DATA_DIR / "test_catalog_medium.yml")
tagged_catalog = AntaCatalog.parse(DATA_DIR / "test_catalog_with_tags.yml")
# Merge the catalogs and check the number of tests
final_catalog = AntaCatalog.merge_catalogs([small_catalog, medium_catalog, tagged_catalog])
assert len(final_catalog.tests) == len(small_catalog.tests) + len(medium_catalog.tests) + len(tagged_catalog.tests)
def test_merge(self) -> None:
"""Test AntaCatalog.merge()."""
catalog1: AntaCatalog = AntaCatalog.parse(DATA_DIR / "test_catalog.yml")
@ -332,11 +311,15 @@ class TestAntaCatalog:
catalog3: AntaCatalog = AntaCatalog.parse(DATA_DIR / "test_catalog_medium.yml")
assert len(catalog3.tests) == 228
assert len(catalog1.merge(catalog2).tests) == 2
with pytest.deprecated_call():
merged_catalog = catalog1.merge(catalog2)
assert len(merged_catalog.tests) == 2
assert len(catalog1.tests) == 1
assert len(catalog2.tests) == 1
assert len(catalog2.merge(catalog3).tests) == 229
with pytest.deprecated_call():
merged_catalog = catalog2.merge(catalog3)
assert len(merged_catalog.tests) == 229
assert len(catalog2.tests) == 1
assert len(catalog3.tests) == 228

View file

@ -17,6 +17,7 @@ import pytest
from anta.custom_types import (
REGEX_BGP_IPV4_MPLS_VPN,
REGEX_BGP_IPV4_UNICAST,
REGEX_TYPE_PORTCHANNEL,
REGEXP_BGP_IPV4_MPLS_LABELS,
REGEXP_BGP_L2VPN_AFI,
REGEXP_EOS_BLACKLIST_CMDS,
@ -29,6 +30,7 @@ from anta.custom_types import (
bgp_multiprotocol_capabilities_abbreviations,
interface_autocomplete,
interface_case_sensitivity,
validate_regex,
)
# ------------------------------------------------------------------------------
@ -140,6 +142,22 @@ def test_regexp_type_vxlan_src_interface() -> None:
assert re.match(REGEXP_TYPE_VXLAN_SRC_INTERFACE, "Loopback9000") is None
def test_regexp_type_portchannel() -> None:
"""Test REGEX_TYPE_PORTCHANNEL."""
# Test strings that should match the pattern
assert re.match(REGEX_TYPE_PORTCHANNEL, "Port-Channel5") is not None
assert re.match(REGEX_TYPE_PORTCHANNEL, "Port-Channel100") is not None
assert re.match(REGEX_TYPE_PORTCHANNEL, "Port-Channel999") is not None
assert re.match(REGEX_TYPE_PORTCHANNEL, "Port-Channel1000") is not None
# Test strings that should not match the pattern
assert re.match(REGEX_TYPE_PORTCHANNEL, "Port-Channel") is None
assert re.match(REGEX_TYPE_PORTCHANNEL, "Port_Channel") is None
assert re.match(REGEX_TYPE_PORTCHANNEL, "Port_Channel1000") is None
assert re.match(REGEX_TYPE_PORTCHANNEL, "Port_Channel5/1") is None
assert re.match(REGEX_TYPE_PORTCHANNEL, "Port-Channel-100") is None
def test_regexp_type_hostname() -> None:
"""Test REGEXP_TYPE_HOSTNAME."""
# Test strings that should match the pattern
@ -200,6 +218,8 @@ def test_interface_autocomplete_success() -> None:
assert interface_autocomplete("eth2") == "Ethernet2"
assert interface_autocomplete("po3") == "Port-Channel3"
assert interface_autocomplete("lo4") == "Loopback4"
assert interface_autocomplete("Po1000") == "Port-Channel1000"
assert interface_autocomplete("Po 1000") == "Port-Channel1000"
def test_interface_autocomplete_no_alias() -> None:
@ -262,3 +282,36 @@ def test_interface_case_sensitivity_uppercase() -> None:
assert interface_case_sensitivity("ETHERNET") == "ETHERNET"
assert interface_case_sensitivity("VLAN") == "VLAN"
assert interface_case_sensitivity("LOOPBACK") == "LOOPBACK"
@pytest.mark.parametrize(
"str_input",
[
REGEX_BGP_IPV4_MPLS_VPN,
REGEX_BGP_IPV4_UNICAST,
REGEX_TYPE_PORTCHANNEL,
REGEXP_BGP_IPV4_MPLS_LABELS,
REGEXP_BGP_L2VPN_AFI,
REGEXP_INTERFACE_ID,
REGEXP_PATH_MARKERS,
REGEXP_TYPE_EOS_INTERFACE,
REGEXP_TYPE_HOSTNAME,
REGEXP_TYPE_VXLAN_SRC_INTERFACE,
],
)
def test_validate_regex_valid(str_input: str) -> None:
"""Test validate_regex with valid regex."""
assert validate_regex(str_input) == str_input
@pytest.mark.parametrize(
("str_input", "error"),
[
pytest.param("[", "Invalid regex: unterminated character set at position 0", id="unterminated character"),
pytest.param("\\", r"Invalid regex: bad escape \(end of pattern\) at position 0", id="bad escape"),
],
)
def test_validate_regex_invalid(str_input: str, error: str) -> None:
"""Test validate_regex with invalid regex."""
with pytest.raises(ValueError, match=error):
validate_regex(str_input)

View file

@ -10,129 +10,51 @@ from pathlib import Path
from typing import TYPE_CHECKING, Any
from unittest.mock import patch
import httpx
import pytest
from asyncssh import SSHClientConnection, SSHClientConnectionOptions
from httpx import ConnectError, HTTPError
from rich import print as rprint
import asynceapi
from anta.device import AntaDevice, AsyncEOSDevice
from anta.models import AntaCommand
from tests.lib.fixture import COMMAND_OUTPUT
from tests.lib.utils import generate_test_ids_list
from asynceapi import EapiCommandError
from tests.units.conftest import COMMAND_OUTPUT
if TYPE_CHECKING:
from _pytest.mark.structures import ParameterSet
INIT_DATA: list[dict[str, Any]] = [
{
"name": "no name, no port",
"device": {
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
},
"expected": {"name": "42.42.42.42"},
},
{
"name": "no name, port",
"device": {
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
"port": 666,
},
"expected": {"name": "42.42.42.42:666"},
},
{
"name": "name",
"device": {
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
"name": "test.anta.ninja",
"disable_cache": True,
},
"expected": {"name": "test.anta.ninja"},
},
{
"name": "insecure",
"device": {
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
"name": "test.anta.ninja",
"insecure": True,
},
"expected": {"name": "test.anta.ninja"},
},
INIT_PARAMS: list[ParameterSet] = [
pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta"}, {"name": "42.42.42.42"}, id="no name, no port"),
pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta", "port": 666}, {"name": "42.42.42.42:666"}, id="no name, port"),
pytest.param(
{"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "disable_cache": True}, {"name": "test.anta.ninja"}, id="name"
),
pytest.param(
{"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "insecure": True}, {"name": "test.anta.ninja"}, id="insecure"
),
]
EQUALITY_DATA: list[dict[str, Any]] = [
{
"name": "equal",
"device1": {
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
},
"device2": {
"host": "42.42.42.42",
"username": "anta",
"password": "blah",
},
"expected": True,
},
{
"name": "equals-name",
"device1": {
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
"name": "device1",
},
"device2": {
"host": "42.42.42.42",
"username": "plop",
"password": "anta",
"name": "device2",
},
"expected": True,
},
{
"name": "not-equal-port",
"device1": {
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
},
"device2": {
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
"port": 666,
},
"expected": False,
},
{
"name": "not-equal-host",
"device1": {
"host": "42.42.42.41",
"username": "anta",
"password": "anta",
},
"device2": {
"host": "42.42.42.42",
"username": "anta",
"password": "anta",
},
"expected": False,
},
EQUALITY_PARAMS: list[ParameterSet] = [
pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta"}, {"host": "42.42.42.42", "username": "anta", "password": "blah"}, True, id="equal"),
pytest.param(
{"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "device1"},
{"host": "42.42.42.42", "username": "plop", "password": "anta", "name": "device2"},
True,
id="equals-name",
),
pytest.param(
{"host": "42.42.42.42", "username": "anta", "password": "anta"},
{"host": "42.42.42.42", "username": "anta", "password": "anta", "port": 666},
False,
id="not-equal-port",
),
pytest.param(
{"host": "42.42.42.41", "username": "anta", "password": "anta"}, {"host": "42.42.42.42", "username": "anta", "password": "anta"}, False, id="not-equal-host"
),
]
ASYNCEAPI_COLLECT_DATA: list[dict[str, Any]] = [
{
"name": "command",
"device": {},
"command": {
ASYNCEAPI_COLLECT_PARAMS: list[ParameterSet] = [
pytest.param(
{},
{
"command": "show version",
"patch_kwargs": {
"return_value": [
@ -155,11 +77,11 @@ ASYNCEAPI_COLLECT_DATA: list[dict[str, Any]] = [
"memTotal": 8099732,
"memFree": 4989568,
"isIntlVersion": False,
},
],
}
]
},
},
"expected": {
{
"output": {
"mfgName": "Arista",
"modelName": "DCS-7280CR3-32P4-F",
@ -182,11 +104,11 @@ ASYNCEAPI_COLLECT_DATA: list[dict[str, Any]] = [
},
"errors": [],
},
},
{
"name": "enable",
"device": {"enable": True},
"command": {
id="command",
),
pytest.param(
{"enable": True},
{
"command": "show version",
"patch_kwargs": {
"return_value": [
@ -211,10 +133,10 @@ ASYNCEAPI_COLLECT_DATA: list[dict[str, Any]] = [
"memFree": 4989568,
"isIntlVersion": False,
},
],
]
},
},
"expected": {
{
"output": {
"mfgName": "Arista",
"modelName": "DCS-7280CR3-32P4-F",
@ -237,11 +159,11 @@ ASYNCEAPI_COLLECT_DATA: list[dict[str, Any]] = [
},
"errors": [],
},
},
{
"name": "enable password",
"device": {"enable": True, "enable_password": "anta"},
"command": {
id="enable",
),
pytest.param(
{"enable": True, "enable_password": "anta"},
{
"command": "show version",
"patch_kwargs": {
"return_value": [
@ -266,10 +188,10 @@ ASYNCEAPI_COLLECT_DATA: list[dict[str, Any]] = [
"memFree": 4989568,
"isIntlVersion": False,
},
],
]
},
},
"expected": {
{
"output": {
"mfgName": "Arista",
"modelName": "DCS-7280CR3-32P4-F",
@ -292,11 +214,11 @@ ASYNCEAPI_COLLECT_DATA: list[dict[str, Any]] = [
},
"errors": [],
},
},
{
"name": "revision",
"device": {},
"command": {
id="enable password",
),
pytest.param(
{},
{
"command": "show version",
"revision": 3,
"patch_kwargs": {
@ -322,10 +244,10 @@ ASYNCEAPI_COLLECT_DATA: list[dict[str, Any]] = [
"memFree": 4989568,
"isIntlVersion": False,
},
],
]
},
},
"expected": {
{
"output": {
"mfgName": "Arista",
"modelName": "DCS-7280CR3-32P4-F",
@ -348,77 +270,47 @@ ASYNCEAPI_COLLECT_DATA: list[dict[str, Any]] = [
},
"errors": [],
},
},
{
"name": "asynceapi.EapiCommandError",
"device": {},
"command": {
id="revision",
),
pytest.param(
{},
{
"command": "show version",
"patch_kwargs": {
"side_effect": asynceapi.EapiCommandError(
"side_effect": EapiCommandError(
passed=[],
failed="show version",
errors=["Authorization denied for command 'show version'"],
errmsg="Invalid command",
not_exec=[],
),
)
},
},
"expected": {"output": None, "errors": ["Authorization denied for command 'show version'"]},
},
{
"name": "httpx.HTTPError",
"device": {},
"command": {
"command": "show version",
"patch_kwargs": {"side_effect": httpx.HTTPError(message="404")},
},
"expected": {"output": None, "errors": ["HTTPError: 404"]},
},
{
"name": "httpx.ConnectError",
"device": {},
"command": {
"command": "show version",
"patch_kwargs": {"side_effect": httpx.ConnectError(message="Cannot open port")},
},
"expected": {"output": None, "errors": ["ConnectError: Cannot open port"]},
},
{"output": None, "errors": ["Authorization denied for command 'show version'"]},
id="asynceapi.EapiCommandError",
),
pytest.param(
{},
{"command": "show version", "patch_kwargs": {"side_effect": HTTPError("404")}},
{"output": None, "errors": ["HTTPError: 404"]},
id="httpx.HTTPError",
),
pytest.param(
{},
{"command": "show version", "patch_kwargs": {"side_effect": ConnectError("Cannot open port")}},
{"output": None, "errors": ["ConnectError: Cannot open port"]},
id="httpx.ConnectError",
),
]
ASYNCEAPI_COPY_DATA: list[dict[str, Any]] = [
{
"name": "from",
"device": {},
"copy": {
"sources": [Path("/mnt/flash"), Path("/var/log/agents")],
"destination": Path(),
"direction": "from",
},
},
{
"name": "to",
"device": {},
"copy": {
"sources": [Path("/mnt/flash"), Path("/var/log/agents")],
"destination": Path(),
"direction": "to",
},
},
{
"name": "wrong",
"device": {},
"copy": {
"sources": [Path("/mnt/flash"), Path("/var/log/agents")],
"destination": Path(),
"direction": "wrong",
},
},
ASYNCEAPI_COPY_PARAMS: list[ParameterSet] = [
pytest.param({}, {"sources": [Path("/mnt/flash"), Path("/var/log/agents")], "destination": Path(), "direction": "from"}, id="from"),
pytest.param({}, {"sources": [Path("/mnt/flash"), Path("/var/log/agents")], "destination": Path(), "direction": "to"}, id="to"),
pytest.param({}, {"sources": [Path("/mnt/flash"), Path("/var/log/agents")], "destination": Path(), "direction": "wrong"}, id="wrong"),
]
REFRESH_DATA: list[dict[str, Any]] = [
{
"name": "established",
"device": {},
"patch_kwargs": (
REFRESH_PARAMS: list[ParameterSet] = [
pytest.param(
{},
(
{"return_value": True},
{
"return_value": [
@ -442,15 +334,15 @@ REFRESH_DATA: list[dict[str, Any]] = [
"memFree": 4989568,
"isIntlVersion": False,
}
],
]
},
),
"expected": {"is_online": True, "established": True, "hw_model": "DCS-7280CR3-32P4-F"},
},
{
"name": "is not online",
"device": {},
"patch_kwargs": (
{"is_online": True, "established": True, "hw_model": "DCS-7280CR3-32P4-F"},
id="established",
),
pytest.param(
{},
(
{"return_value": False},
{
"return_value": {
@ -472,15 +364,15 @@ REFRESH_DATA: list[dict[str, Any]] = [
"memTotal": 8099732,
"memFree": 4989568,
"isIntlVersion": False,
},
}
},
),
"expected": {"is_online": False, "established": False, "hw_model": None},
},
{
"name": "cannot parse command",
"device": {},
"patch_kwargs": (
{"is_online": False, "established": False, "hw_model": None},
id="is not online",
),
pytest.param(
{},
(
{"return_value": True},
{
"return_value": [
@ -503,108 +395,87 @@ REFRESH_DATA: list[dict[str, Any]] = [
"memFree": 4989568,
"isIntlVersion": False,
}
],
]
},
),
"expected": {"is_online": True, "established": False, "hw_model": None},
},
{
"name": "asynceapi.EapiCommandError",
"device": {},
"patch_kwargs": (
{"is_online": True, "established": False, "hw_model": None},
id="cannot parse command",
),
pytest.param(
{},
(
{"return_value": True},
{
"side_effect": asynceapi.EapiCommandError(
"side_effect": EapiCommandError(
passed=[],
failed="show version",
errors=["Authorization denied for command 'show version'"],
errmsg="Invalid command",
not_exec=[],
),
)
},
),
"expected": {"is_online": True, "established": False, "hw_model": None},
},
{
"name": "httpx.HTTPError",
"device": {},
"patch_kwargs": (
{"is_online": True, "established": False, "hw_model": None},
id="asynceapi.EapiCommandError",
),
pytest.param(
{},
({"return_value": True}, {"side_effect": HTTPError("404")}),
{"is_online": True, "established": False, "hw_model": None},
id="httpx.HTTPError",
),
pytest.param(
{},
({"return_value": True}, {"side_effect": ConnectError("Cannot open port")}),
{"is_online": True, "established": False, "hw_model": None},
id="httpx.ConnectError",
),
pytest.param(
{},
(
{"return_value": True},
{"side_effect": httpx.HTTPError(message="404")},
{
"return_value": [
{
"mfgName": "Arista",
"modelName": "",
}
]
},
),
"expected": {"is_online": True, "established": False, "hw_model": None},
},
{
"name": "httpx.ConnectError",
"device": {},
"patch_kwargs": (
{"return_value": True},
{"side_effect": httpx.ConnectError(message="Cannot open port")},
),
"expected": {"is_online": True, "established": False, "hw_model": None},
},
{"is_online": True, "established": False, "hw_model": ""},
id="modelName empty string",
),
]
COLLECT_DATA: list[dict[str, Any]] = [
{
"name": "device cache enabled, command cache enabled, no cache hit",
"device": {"disable_cache": False},
"command": {
"command": "show version",
"use_cache": True,
},
"expected": {"cache_hit": False},
},
{
"name": "device cache enabled, command cache enabled, cache hit",
"device": {"disable_cache": False},
"command": {
"command": "show version",
"use_cache": True,
},
"expected": {"cache_hit": True},
},
{
"name": "device cache disabled, command cache enabled",
"device": {"disable_cache": True},
"command": {
"command": "show version",
"use_cache": True,
},
"expected": {},
},
{
"name": "device cache enabled, command cache disabled, cache has command",
"device": {"disable_cache": False},
"command": {
"command": "show version",
"use_cache": False,
},
"expected": {"cache_hit": True},
},
{
"name": "device cache enabled, command cache disabled, cache does not have data",
"device": {
"disable_cache": False,
},
"command": {
"command": "show version",
"use_cache": False,
},
"expected": {"cache_hit": False},
},
{
"name": "device cache disabled, command cache disabled",
"device": {
"disable_cache": True,
},
"command": {
"command": "show version",
"use_cache": False,
},
"expected": {},
},
COLLECT_PARAMS: list[ParameterSet] = [
pytest.param(
{"disable_cache": False},
{"command": "show version", "use_cache": True},
{"cache_hit": False},
id="device cache enabled, command cache enabled, no cache hit",
),
pytest.param(
{"disable_cache": False},
{"command": "show version", "use_cache": True},
{"cache_hit": True},
id="device cache enabled, command cache enabled, cache hit",
),
pytest.param({"disable_cache": True}, {"command": "show version", "use_cache": True}, {}, id="device cache disabled, command cache enabled"),
pytest.param(
{"disable_cache": False},
{"command": "show version", "use_cache": False},
{"cache_hit": True},
id="device cache enabled, command cache disabled, cache has command",
),
pytest.param(
{"disable_cache": False},
{"command": "show version", "use_cache": False},
{"cache_hit": False},
id="device cache enabled, command cache disabled, cache does not have data",
),
pytest.param({"disable_cache": True}, {"command": "show version", "use_cache": False}, {}, id="device cache disabled, command cache disabled"),
]
CACHE_STATS_DATA: list[ParameterSet] = [
CACHE_STATS_PARAMS: list[ParameterSet] = [
pytest.param({"disable_cache": False}, {"total_commands_sent": 0, "cache_hits": 0, "cache_hit_ratio": "0.00%"}, id="with_cache"),
pytest.param({"disable_cache": True}, None, id="without_cache"),
]
@ -613,48 +484,42 @@ CACHE_STATS_DATA: list[ParameterSet] = [
class TestAntaDevice:
"""Test for anta.device.AntaDevice Abstract class."""
@pytest.mark.asyncio()
@pytest.mark.parametrize(
("device", "command_data", "expected_data"),
((d["device"], d["command"], d["expected"]) for d in COLLECT_DATA),
indirect=["device"],
ids=generate_test_ids_list(COLLECT_DATA),
)
async def test_collect(self, device: AntaDevice, command_data: dict[str, Any], expected_data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("device", "command", "expected"), COLLECT_PARAMS, indirect=["device"])
async def test_collect(self, device: AntaDevice, command: dict[str, Any], expected: dict[str, Any]) -> None:
"""Test AntaDevice.collect behavior."""
command = AntaCommand(command=command_data["command"], use_cache=command_data["use_cache"])
cmd = AntaCommand(command=command["command"], use_cache=command["use_cache"])
# Dummy output for cache hit
cached_output = "cached_value"
if device.cache is not None and expected_data["cache_hit"] is True:
await device.cache.set(command.uid, cached_output)
if device.cache is not None and expected["cache_hit"] is True:
await device.cache.set(cmd.uid, cached_output)
await device.collect(command)
await device.collect(cmd)
if device.cache is not None: # device_cache is enabled
current_cached_data = await device.cache.get(command.uid)
if command.use_cache is True: # command is allowed to use cache
if expected_data["cache_hit"] is True:
assert command.output == cached_output
current_cached_data = await device.cache.get(cmd.uid)
if cmd.use_cache is True: # command is allowed to use cache
if expected["cache_hit"] is True:
assert cmd.output == cached_output
assert current_cached_data == cached_output
assert device.cache.hit_miss_ratio["hits"] == 2
else:
assert command.output == COMMAND_OUTPUT
assert cmd.output == COMMAND_OUTPUT
assert current_cached_data == COMMAND_OUTPUT
assert device.cache.hit_miss_ratio["hits"] == 1
else: # command is not allowed to use cache
device._collect.assert_called_once_with(command=command, collection_id=None) # type: ignore[attr-defined] # pylint: disable=protected-access
assert command.output == COMMAND_OUTPUT
if expected_data["cache_hit"] is True:
device._collect.assert_called_once_with(command=cmd, collection_id=None) # type: ignore[attr-defined]
assert cmd.output == COMMAND_OUTPUT
if expected["cache_hit"] is True:
assert current_cached_data == cached_output
else:
assert current_cached_data is None
else: # device is disabled
assert device.cache is None
device._collect.assert_called_once_with(command=command, collection_id=None) # type: ignore[attr-defined] # pylint: disable=protected-access
device._collect.assert_called_once_with(command=cmd, collection_id=None) # type: ignore[attr-defined]
@pytest.mark.parametrize(("device", "expected"), CACHE_STATS_DATA, indirect=["device"])
@pytest.mark.parametrize(("device", "expected"), CACHE_STATS_PARAMS, indirect=["device"])
def test_cache_statistics(self, device: AntaDevice, expected: dict[str, Any] | None) -> None:
"""Verify that when cache statistics attribute does not exist.
@ -666,42 +531,39 @@ class TestAntaDevice:
class TestAsyncEOSDevice:
"""Test for anta.device.AsyncEOSDevice."""
@pytest.mark.parametrize("data", INIT_DATA, ids=generate_test_ids_list(INIT_DATA))
def test__init__(self, data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("device", "expected"), INIT_PARAMS)
def test__init__(self, device: dict[str, Any], expected: dict[str, Any]) -> None:
"""Test the AsyncEOSDevice constructor."""
device = AsyncEOSDevice(**data["device"])
dev = AsyncEOSDevice(**device)
assert device.name == data["expected"]["name"]
if data["device"].get("disable_cache") is True:
assert device.cache is None
assert device.cache_locks is None
assert dev.name == expected["name"]
if device.get("disable_cache") is True:
assert dev.cache is None
assert dev.cache_locks is None
else: # False or None
assert device.cache is not None
assert device.cache_locks is not None
hash(device)
assert dev.cache is not None
assert dev.cache_locks is not None
hash(dev)
with patch("anta.device.__DEBUG__", new=True):
rprint(device)
rprint(dev)
@pytest.mark.parametrize("data", EQUALITY_DATA, ids=generate_test_ids_list(EQUALITY_DATA))
def test__eq(self, data: dict[str, Any]) -> None:
@pytest.mark.parametrize(("device1", "device2", "expected"), EQUALITY_PARAMS)
def test__eq(self, device1: dict[str, Any], device2: dict[str, Any], expected: bool) -> None:
"""Test the AsyncEOSDevice equality."""
device1 = AsyncEOSDevice(**data["device1"])
device2 = AsyncEOSDevice(**data["device2"])
if data["expected"]:
assert device1 == device2
dev1 = AsyncEOSDevice(**device1)
dev2 = AsyncEOSDevice(**device2)
if expected:
assert dev1 == dev2
else:
assert device1 != device2
assert dev1 != dev2
@pytest.mark.asyncio()
@pytest.mark.parametrize(
("async_device", "patch_kwargs", "expected"),
((d["device"], d["patch_kwargs"], d["expected"]) for d in REFRESH_DATA),
ids=generate_test_ids_list(REFRESH_DATA),
REFRESH_PARAMS,
indirect=["async_device"],
)
async def test_refresh(self, async_device: AsyncEOSDevice, patch_kwargs: list[dict[str, Any]], expected: dict[str, Any]) -> None:
# pylint: disable=protected-access
"""Test AsyncEOSDevice.refresh()."""
with patch.object(async_device._session, "check_connection", **patch_kwargs[0]), patch.object(async_device._session, "cli", **patch_kwargs[1]):
await async_device.refresh()
@ -712,15 +574,12 @@ class TestAsyncEOSDevice:
assert async_device.established == expected["established"]
assert async_device.hw_model == expected["hw_model"]
@pytest.mark.asyncio()
@pytest.mark.parametrize(
("async_device", "command", "expected"),
((d["device"], d["command"], d["expected"]) for d in ASYNCEAPI_COLLECT_DATA),
ids=generate_test_ids_list(ASYNCEAPI_COLLECT_DATA),
ASYNCEAPI_COLLECT_PARAMS,
indirect=["async_device"],
)
async def test__collect(self, async_device: AsyncEOSDevice, command: dict[str, Any], expected: dict[str, Any]) -> None:
# pylint: disable=protected-access
"""Test AsyncEOSDevice._collect()."""
cmd = AntaCommand(command=command["command"], revision=command["revision"]) if "revision" in command else AntaCommand(command=command["command"])
with patch.object(async_device._session, "cli", **command["patch_kwargs"]):
@ -741,15 +600,13 @@ class TestAsyncEOSDevice:
commands.append({"cmd": cmd.command, "revision": cmd.revision})
else:
commands.append({"cmd": cmd.command})
async_device._session.cli.assert_called_once_with(commands=commands, ofmt=cmd.ofmt, version=cmd.version, req_id=f"ANTA-{collection_id}-{id(cmd)}") # type: ignore[attr-defined] # asynceapi.Device.cli is patched # pylint: disable=line-too-long
async_device._session.cli.assert_called_once_with(commands=commands, ofmt=cmd.ofmt, version=cmd.version, req_id=f"ANTA-{collection_id}-{id(cmd)}") # type: ignore[attr-defined] # asynceapi.Device.cli is patched
assert cmd.output == expected["output"]
assert cmd.errors == expected["errors"]
@pytest.mark.asyncio()
@pytest.mark.parametrize(
("async_device", "copy"),
((d["device"], d["copy"]) for d in ASYNCEAPI_COPY_DATA),
ids=generate_test_ids_list(ASYNCEAPI_COPY_DATA),
ASYNCEAPI_COPY_PARAMS,
indirect=["async_device"],
)
async def test_copy(self, async_device: AsyncEOSDevice, copy: dict[str, Any]) -> None:

View file

@ -58,7 +58,6 @@ def test_anta_log_exception(
debug_value: bool,
expected_message: str,
) -> None:
# pylint: disable=too-many-arguments
"""Test anta_log_exception."""
if calling_logger is not None:
# https://github.com/pytest-dev/pytest/issues/3697

View file

@ -8,14 +8,16 @@
from __future__ import annotations
import asyncio
import sys
from typing import TYPE_CHECKING, Any, ClassVar
import pytest
from anta.decorators import deprecated_test, skip_on_platforms
from anta.models import AntaCommand, AntaTemplate, AntaTest
from tests.lib.fixture import DEVICE_HW_MODEL
from tests.lib.utils import generate_test_ids
from anta.result_manager.models import AntaTestStatus
from tests.units.anta_tests.conftest import build_test_id
from tests.units.conftest import DEVICE_HW_MODEL
if TYPE_CHECKING:
from anta.device import AntaDevice
@ -302,6 +304,15 @@ class DeprecatedTestWithNewTest(AntaTest):
self.result.is_success()
class FakeTestWithMissingTest(AntaTest):
"""ANTA test with missing test() method implementation."""
name = "FakeTestWithMissingTest"
description = "ANTA test with missing test() method implementation"
categories: ClassVar[list[str]] = []
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
ANTATEST_DATA: list[dict[str, Any]] = [
{
"name": "no input",
@ -507,17 +518,17 @@ ANTATEST_DATA: list[dict[str, Any]] = [
},
]
BLACKLIST_COMMANDS_PARAMS = ["reload", "reload --force", "write", "wr mem"]
class TestAntaTest:
"""Test for anta.models.AntaTest."""
def test__init_subclass__name(self) -> None:
def test__init_subclass__(self) -> None:
"""Test __init_subclass__."""
# Pylint detects all the classes in here as unused which is on purpose
# pylint: disable=unused-variable
with pytest.raises(NotImplementedError) as exec_info:
class WrongTestNoName(AntaTest):
class _WrongTestNoName(AntaTest):
"""ANTA test that is missing a name."""
description = "ANTA test that is missing a name"
@ -528,11 +539,11 @@ class TestAntaTest:
def test(self) -> None:
self.result.is_success()
assert exec_info.value.args[0] == "Class tests.units.test_models.WrongTestNoName is missing required class attribute name"
assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoName is missing required class attribute name"
with pytest.raises(NotImplementedError) as exec_info:
class WrongTestNoDescription(AntaTest):
class _WrongTestNoDescription(AntaTest):
"""ANTA test that is missing a description."""
name = "WrongTestNoDescription"
@ -543,11 +554,11 @@ class TestAntaTest:
def test(self) -> None:
self.result.is_success()
assert exec_info.value.args[0] == "Class tests.units.test_models.WrongTestNoDescription is missing required class attribute description"
assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoDescription is missing required class attribute description"
with pytest.raises(NotImplementedError) as exec_info:
class WrongTestNoCategories(AntaTest):
class _WrongTestNoCategories(AntaTest):
"""ANTA test that is missing categories."""
name = "WrongTestNoCategories"
@ -558,11 +569,11 @@ class TestAntaTest:
def test(self) -> None:
self.result.is_success()
assert exec_info.value.args[0] == "Class tests.units.test_models.WrongTestNoCategories is missing required class attribute categories"
assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoCategories is missing required class attribute categories"
with pytest.raises(NotImplementedError) as exec_info:
class WrongTestNoCommands(AntaTest):
class _WrongTestNoCommands(AntaTest):
"""ANTA test that is missing commands."""
name = "WrongTestNoCommands"
@ -573,22 +584,34 @@ class TestAntaTest:
def test(self) -> None:
self.result.is_success()
assert exec_info.value.args[0] == "Class tests.units.test_models.WrongTestNoCommands is missing required class attribute commands"
assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoCommands is missing required class attribute commands"
def test_abc(self) -> None:
"""Test that an error is raised if AntaTest is not implemented."""
with pytest.raises(TypeError) as exec_info:
FakeTestWithMissingTest() # type: ignore[abstract,call-arg]
msg = (
"Can't instantiate abstract class FakeTestWithMissingTest without an implementation for abstract method 'test'"
if sys.version_info >= (3, 12)
else "Can't instantiate abstract class FakeTestWithMissingTest with abstract method test"
)
assert exec_info.value.args[0] == msg
def _assert_test(self, test: AntaTest, expected: dict[str, Any]) -> None:
assert test.result.result == expected["result"]
if "messages" in expected:
assert len(test.result.messages) == len(expected["messages"])
for result_msg, expected_msg in zip(test.result.messages, expected["messages"]): # NOTE: zip(strict=True) has been added in Python 3.10
assert expected_msg in result_msg
@pytest.mark.parametrize("data", ANTATEST_DATA, ids=generate_test_ids(ANTATEST_DATA))
@pytest.mark.parametrize("data", ANTATEST_DATA, ids=build_test_id)
def test__init__(self, device: AntaDevice, data: dict[str, Any]) -> None:
"""Test the AntaTest constructor."""
expected = data["expected"]["__init__"]
test = data["test"](device, inputs=data["inputs"])
self._assert_test(test, expected)
@pytest.mark.parametrize("data", ANTATEST_DATA, ids=generate_test_ids(ANTATEST_DATA))
@pytest.mark.parametrize("data", ANTATEST_DATA, ids=build_test_id)
def test_test(self, device: AntaDevice, data: dict[str, Any]) -> None:
"""Test the AntaTest.test method."""
expected = data["expected"]["test"]
@ -596,38 +619,42 @@ class TestAntaTest:
asyncio.run(test.test())
self._assert_test(test, expected)
@pytest.mark.parametrize("command", BLACKLIST_COMMANDS_PARAMS)
def test_blacklist(self, device: AntaDevice, command: str) -> None:
"""Test that blacklisted commands are not collected."""
ANTATEST_BLACKLIST_DATA = ["reload", "reload --force", "write", "wr mem"]
class FakeTestWithBlacklist(AntaTest):
"""Fake Test for blacklist."""
name = "FakeTestWithBlacklist"
description = "ANTA test that has blacklisted command"
categories: ClassVar[list[str]] = []
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command=command)]
@pytest.mark.parametrize("data", ANTATEST_BLACKLIST_DATA)
def test_blacklist(device: AntaDevice, data: str) -> None:
"""Test for blacklisting function."""
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
class FakeTestWithBlacklist(AntaTest):
"""Fake Test for blacklist."""
test = FakeTestWithBlacklist(device)
asyncio.run(test.test())
assert test.result.result == AntaTestStatus.ERROR
assert f"<{command}> is blocked for security reason" in test.result.messages
assert test.instance_commands[0].collected is False
name = "FakeTestWithBlacklist"
description = "ANTA test that has blacklisted command"
categories: ClassVar[list[str]] = []
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command=data)]
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
test_instance = FakeTestWithBlacklist(device)
# Run the test() method
asyncio.run(test_instance.test())
assert test_instance.result.result == "error"
def test_result_overwrite(self, device: AntaDevice) -> None:
"""Test the AntaTest.Input.ResultOverwrite model."""
test = FakeTest(device, inputs={"result_overwrite": {"categories": ["hardware"], "description": "a description", "custom_field": "a custom field"}})
asyncio.run(test.test())
assert test.result.result == AntaTestStatus.SUCCESS
assert "hardware" in test.result.categories
assert test.result.description == "a description"
assert test.result.custom_field == "a custom field"
class TestAntaComamnd:
"""Test for anta.models.AntaCommand."""
# ruff: noqa: B018
# pylint: disable=pointless-statement
def test_empty_output_access(self) -> None:
"""Test for both json and text ofmt."""
@ -656,16 +683,20 @@ class TestAntaComamnd:
text_cmd_2.json_output
def test_supported(self) -> None:
"""Test if the supported property."""
"""Test the supported property."""
command = AntaCommand(command="show hardware counter drop", errors=["Unavailable command (not supported on this hardware platform) (at token 2: 'counter')"])
assert command.supported is False
command = AntaCommand(
command="show hardware counter drop", output={"totalAdverseDrops": 0, "totalCongestionDrops": 0, "totalPacketProcessorDrops": 0, "dropEvents": {}}
)
assert command.supported is True
command = AntaCommand(command="show hardware counter drop")
with pytest.raises(RuntimeError) as exec_info:
command.supported
assert exec_info.value.args[0] == "Command 'show hardware counter drop' has not been collected and has not returned an error. Call AntaDevice.collect()."
def test_requires_privileges(self) -> None:
"""Test if the requires_privileges property."""
"""Test the requires_privileges property."""
command = AntaCommand(command="show aaa methods accounting", errors=["Invalid input (privileged mode required)"])
assert command.requires_privileges is True
command = AntaCommand(
@ -678,3 +709,7 @@ class TestAntaComamnd:
},
)
assert command.requires_privileges is False
command = AntaCommand(command="show aaa methods accounting")
with pytest.raises(RuntimeError) as exec_info:
command.requires_privileges
assert exec_info.value.args[0] == "Command 'show aaa methods accounting' has not been collected and has not returned an error. Call AntaDevice.collect()."

View file

@ -7,73 +7,62 @@ from __future__ import annotations
import logging
import resource
import sys
from pathlib import Path
from unittest.mock import patch
import pytest
from anta import logger
from anta.catalog import AntaCatalog
from anta.inventory import AntaInventory
from anta.result_manager import ResultManager
from anta.runner import adjust_rlimit_nofile, main, prepare_tests
from .test_models import FakeTest
from .test_models import FakeTest, FakeTestWithMissingTest
DATA_DIR: Path = Path(__file__).parent.parent.resolve() / "data"
FAKE_CATALOG: AntaCatalog = AntaCatalog.from_list([(FakeTest, None)])
@pytest.mark.asyncio()
async def test_runner_empty_tests(caplog: pytest.LogCaptureFixture, test_inventory: AntaInventory) -> None:
"""Test that when the list of tests is empty, a log is raised.
caplog is the pytest fixture to capture logs
test_inventory is a fixture that gives a default inventory for tests
"""
logger.setup_logging(logger.Log.INFO)
async def test_empty_tests(caplog: pytest.LogCaptureFixture, inventory: AntaInventory) -> None:
"""Test that when the list of tests is empty, a log is raised."""
caplog.set_level(logging.INFO)
manager = ResultManager()
await main(manager, test_inventory, AntaCatalog())
await main(manager, inventory, AntaCatalog())
assert len(caplog.record_tuples) == 1
assert "The list of tests is empty, exiting" in caplog.records[0].message
@pytest.mark.asyncio()
async def test_runner_empty_inventory(caplog: pytest.LogCaptureFixture) -> None:
"""Test that when the Inventory is empty, a log is raised.
caplog is the pytest fixture to capture logs
"""
logger.setup_logging(logger.Log.INFO)
async def test_empty_inventory(caplog: pytest.LogCaptureFixture) -> None:
"""Test that when the Inventory is empty, a log is raised."""
caplog.set_level(logging.INFO)
manager = ResultManager()
inventory = AntaInventory()
await main(manager, inventory, FAKE_CATALOG)
await main(manager, AntaInventory(), FAKE_CATALOG)
assert len(caplog.record_tuples) == 3
assert "The inventory is empty, exiting" in caplog.records[1].message
@pytest.mark.asyncio()
async def test_runner_no_selected_device(caplog: pytest.LogCaptureFixture, test_inventory: AntaInventory) -> None:
"""Test that when the list of established device.
caplog is the pytest fixture to capture logs
test_inventory is a fixture that gives a default inventory for tests
"""
logger.setup_logging(logger.Log.INFO)
caplog.set_level(logging.INFO)
@pytest.mark.parametrize(
("inventory", "tags", "devices"),
[
pytest.param({"count": 1, "reachable": False}, None, None, id="not-reachable"),
pytest.param({"filename": "test_inventory_with_tags.yml", "reachable": False}, {"leaf"}, None, id="not-reachable-with-tag"),
pytest.param({"count": 1, "reachable": True}, {"invalid-tag"}, None, id="reachable-with-invalid-tag"),
pytest.param({"filename": "test_inventory_with_tags.yml", "reachable": True}, None, {"invalid-device"}, id="reachable-with-invalid-device"),
pytest.param({"filename": "test_inventory_with_tags.yml", "reachable": False}, None, {"leaf1"}, id="not-reachable-with-device"),
pytest.param({"filename": "test_inventory_with_tags.yml", "reachable": False}, {"leaf"}, {"leaf1"}, id="not-reachable-with-device-and-tag"),
pytest.param({"filename": "test_inventory_with_tags.yml", "reachable": False}, {"invalid"}, {"invalid-device"}, id="reachable-with-invalid-tag-and-device"),
],
indirect=["inventory"],
)
async def test_no_selected_device(caplog: pytest.LogCaptureFixture, inventory: AntaInventory, tags: set[str], devices: set[str]) -> None:
"""Test that when the list of established devices is empty a log is raised."""
caplog.set_level(logging.WARNING)
manager = ResultManager()
await main(manager, test_inventory, FAKE_CATALOG)
assert "No reachable device was found." in [record.message for record in caplog.records]
# Reset logs and run with tags
caplog.clear()
await main(manager, test_inventory, FAKE_CATALOG, tags={"toto"})
assert "No reachable device matching the tags {'toto'} was found." in [record.message for record in caplog.records]
await main(manager, inventory, FAKE_CATALOG, tags=tags, devices=devices)
msg = f'No reachable device {f"matching the tags {tags} " if tags else ""}was found.{f" Selected devices: {devices} " if devices is not None else ""}'
assert msg in caplog.messages
def test_adjust_rlimit_nofile_valid_env(caplog: pytest.LogCaptureFixture) -> None:
@ -140,67 +129,55 @@ def test_adjust_rlimit_nofile_invalid_env(caplog: pytest.LogCaptureFixture) -> N
setrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE, (16384, 1048576))
@pytest.mark.asyncio()
@pytest.mark.parametrize(
("tags", "expected_tests_count", "expected_devices_count"),
("inventory", "tags", "tests", "devices_count", "tests_count"),
[
(None, 22, 3),
({"leaf"}, 9, 3),
({"invalid_tag"}, 0, 0),
pytest.param({"filename": "test_inventory_with_tags.yml"}, None, None, 3, 27, id="all-tests"),
pytest.param({"filename": "test_inventory_with_tags.yml"}, {"leaf"}, None, 2, 6, id="1-tag"),
pytest.param({"filename": "test_inventory_with_tags.yml"}, {"leaf", "spine"}, None, 3, 9, id="2-tags"),
pytest.param({"filename": "test_inventory_with_tags.yml"}, None, {"VerifyMlagStatus", "VerifyUptime"}, 3, 5, id="filtered-tests"),
pytest.param({"filename": "test_inventory_with_tags.yml"}, {"leaf"}, {"VerifyMlagStatus", "VerifyUptime"}, 2, 4, id="1-tag-filtered-tests"),
pytest.param({"filename": "test_inventory_with_tags.yml"}, {"invalid"}, None, 0, 0, id="invalid-tag"),
],
ids=["no_tags", "leaf_tag", "invalid_tag"],
indirect=["inventory"],
)
async def test_prepare_tests(
caplog: pytest.LogCaptureFixture,
test_inventory: AntaInventory,
tags: set[str] | None,
expected_tests_count: int,
expected_devices_count: int,
caplog: pytest.LogCaptureFixture, inventory: AntaInventory, tags: set[str], tests: set[str], devices_count: int, tests_count: int
) -> None:
"""Test the runner prepare_tests function."""
logger.setup_logging(logger.Log.INFO)
caplog.set_level(logging.INFO)
catalog: AntaCatalog = AntaCatalog.parse(str(DATA_DIR / "test_catalog_with_tags.yml"))
selected_tests = prepare_tests(inventory=test_inventory, catalog=catalog, tags=tags, tests=None)
if selected_tests is None:
assert expected_tests_count == 0
expected_log = f"There are no tests matching the tags {tags} to run in the current test catalog and device inventory, please verify your inputs."
assert expected_log in caplog.text
else:
assert len(selected_tests) == expected_devices_count
assert sum(len(tests) for tests in selected_tests.values()) == expected_tests_count
@pytest.mark.asyncio()
async def test_prepare_tests_with_specific_tests(caplog: pytest.LogCaptureFixture, test_inventory: AntaInventory) -> None:
"""Test the runner prepare_tests function with specific tests."""
logger.setup_logging(logger.Log.INFO)
caplog.set_level(logging.INFO)
caplog.set_level(logging.WARNING)
catalog: AntaCatalog = AntaCatalog.parse(str(DATA_DIR / "test_catalog_with_tags.yml"))
selected_tests = prepare_tests(inventory=test_inventory, catalog=catalog, tags=None, tests={"VerifyMlagStatus", "VerifyUptime"})
selected_tests = prepare_tests(inventory=inventory, catalog=catalog, tags=tags, tests=tests)
if selected_tests is None:
msg = f"There are no tests matching the tags {tags} to run in the current test catalog and device inventory, please verify your inputs."
assert msg in caplog.messages
return
assert selected_tests is not None
assert len(selected_tests) == 3
assert sum(len(tests) for tests in selected_tests.values()) == 5
assert len(selected_tests) == devices_count
assert sum(len(tests) for tests in selected_tests.values()) == tests_count
@pytest.mark.asyncio()
async def test_runner_dry_run(caplog: pytest.LogCaptureFixture, test_inventory: AntaInventory) -> None:
"""Test that when dry_run is True, no tests are run.
caplog is the pytest fixture to capture logs
test_inventory is a fixture that gives a default inventory for tests
"""
logger.setup_logging(logger.Log.INFO)
async def test_dry_run(caplog: pytest.LogCaptureFixture, inventory: AntaInventory) -> None:
"""Test that when dry_run is True, no tests are run."""
caplog.set_level(logging.INFO)
manager = ResultManager()
catalog_path = Path(__file__).parent.parent / "data" / "test_catalog.yml"
catalog = AntaCatalog.parse(catalog_path)
await main(manager, inventory, FAKE_CATALOG, dry_run=True)
assert "Dry-run mode, exiting before running the tests." in caplog.records[-1].message
await main(manager, test_inventory, catalog, dry_run=True)
# Check that the last log contains Dry-run
assert "Dry-run" in caplog.records[-1].message
async def test_cannot_create_test(caplog: pytest.LogCaptureFixture, inventory: AntaInventory) -> None:
"""Test that when an Exception is raised during test instantiation, it is caught and a log is raised."""
caplog.set_level(logging.CRITICAL)
manager = ResultManager()
catalog = AntaCatalog.from_list([(FakeTestWithMissingTest, None)]) # type: ignore[type-abstract]
await main(manager, inventory, catalog)
msg = (
"There is an error when creating test tests.units.test_models.FakeTestWithMissingTest.\nIf this is not a custom test implementation: "
"Please reach out to the maintainer team or open an issue on Github: https://github.com/aristanetworks/anta.\nTypeError: "
)
msg += (
"Can't instantiate abstract class FakeTestWithMissingTest without an implementation for abstract method 'test'"
if sys.version_info >= (3, 12)
else "Can't instantiate abstract class FakeTestWithMissingTest with abstract method test"
)
assert msg in caplog.messages

View file

@ -11,7 +11,7 @@ from typing import Any
import pytest
from anta.tools import custom_division, get_dict_superset, get_failed_logs, get_item, get_value
from anta.tools import convert_categories, custom_division, get_dict_superset, get_failed_logs, get_item, get_value
TEST_GET_FAILED_LOGS_DATA = [
{"id": 1, "name": "Alice", "age": 30, "email": "alice@example.com"},
@ -313,7 +313,6 @@ def test_get_dict_superset(
expected_raise: AbstractContextManager[Exception],
) -> None:
"""Test get_dict_superset."""
# pylint: disable=too-many-arguments
with expected_raise:
assert get_dict_superset(list_of_dicts, input_dict, default, var_name, custom_error_msg, required=required) == expected_result
@ -421,7 +420,6 @@ def test_get_value(
expected_raise: AbstractContextManager[Exception],
) -> None:
"""Test get_value."""
# pylint: disable=too-many-arguments
kwargs = {
"default": default,
"required": required,
@ -485,7 +483,6 @@ def test_get_item(
expected_raise: AbstractContextManager[Exception],
) -> None:
"""Test get_item."""
# pylint: disable=too-many-arguments
with expected_raise:
assert get_item(list_of_dicts, key, value, default, var_name, custom_error_msg, required=required, case_sensitive=case_sensitive) == expected_result
@ -502,3 +499,17 @@ def test_get_item(
def test_custom_division(numerator: float, denominator: float, expected_result: str) -> None:
"""Test custom_division."""
assert custom_division(numerator, denominator) == expected_result
@pytest.mark.parametrize(
("test_input", "expected_raise", "expected_result"),
[
pytest.param([], does_not_raise(), [], id="empty list"),
pytest.param(["bgp", "system", "vlan", "configuration"], does_not_raise(), ["BGP", "System", "VLAN", "Configuration"], id="list with acronyms and titles"),
pytest.param(42, pytest.raises(TypeError, match="Wrong input type"), None, id="wrong input type"),
],
)
def test_convert_categories(test_input: list[str], expected_raise: AbstractContextManager[Exception], expected_result: list[str]) -> None:
"""Test convert_categories."""
with expected_raise:
assert convert_categories(test_input) == expected_result