2025-02-05 11:54:55 +01:00
|
|
|
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
|
|
|
# Use of this source code is governed by the Apache License 2.0
|
|
|
|
# that can be found in the LICENSE file.
|
|
|
|
"""Benchmark tests for ANTA."""
|
|
|
|
|
|
|
|
import asyncio
|
|
|
|
import logging
|
2025-02-05 11:55:22 +01:00
|
|
|
from collections import defaultdict
|
2025-02-05 11:54:55 +01:00
|
|
|
from unittest.mock import patch
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
import respx
|
|
|
|
from pytest_codspeed import BenchmarkFixture
|
|
|
|
|
|
|
|
from anta.catalog import AntaCatalog
|
|
|
|
from anta.inventory import AntaInventory
|
|
|
|
from anta.result_manager import ResultManager
|
|
|
|
from anta.result_manager.models import AntaTestStatus
|
|
|
|
from anta.runner import main
|
|
|
|
|
|
|
|
from .utils import collect, collect_commands
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2025-02-05 11:55:22 +01:00
|
|
|
def test_anta_dry_run(
|
|
|
|
benchmark: BenchmarkFixture,
|
|
|
|
event_loop: asyncio.AbstractEventLoop,
|
|
|
|
catalog: AntaCatalog,
|
|
|
|
inventory: AntaInventory,
|
|
|
|
request: pytest.FixtureRequest,
|
|
|
|
session_results: defaultdict[str, ResultManager],
|
|
|
|
) -> None:
|
2025-02-05 11:54:55 +01:00
|
|
|
"""Benchmark ANTA in Dry-Run Mode."""
|
|
|
|
# Disable logging during ANTA execution to avoid having these function time in benchmarks
|
|
|
|
logging.disable()
|
|
|
|
|
2025-02-05 11:55:22 +01:00
|
|
|
results = session_results[request.node.callspec.id]
|
2025-02-05 11:54:55 +01:00
|
|
|
|
2025-02-05 11:55:22 +01:00
|
|
|
@benchmark
|
|
|
|
def _() -> None:
|
|
|
|
results.reset()
|
|
|
|
catalog.clear_indexes()
|
|
|
|
event_loop.run_until_complete(main(results, inventory, catalog, dry_run=True))
|
2025-02-05 11:54:55 +01:00
|
|
|
|
|
|
|
logging.disable(logging.NOTSET)
|
2025-02-05 11:55:22 +01:00
|
|
|
|
|
|
|
if len(results.results) != len(inventory) * len(catalog.tests):
|
|
|
|
pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(results.results)}", pytrace=False)
|
|
|
|
bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(results.results)}\n" "-----------------------------------------------"
|
2025-02-05 11:54:55 +01:00
|
|
|
logger.info(bench_info)
|
|
|
|
|
|
|
|
|
|
|
|
@patch("anta.models.AntaTest.collect", collect)
|
|
|
|
@patch("anta.device.AntaDevice.collect_commands", collect_commands)
|
2025-02-05 11:55:22 +01:00
|
|
|
@pytest.mark.dependency(name="anta_benchmark", scope="package")
|
2025-02-05 11:54:55 +01:00
|
|
|
@respx.mock # Mock eAPI responses
|
2025-02-05 11:55:22 +01:00
|
|
|
def test_anta(
|
|
|
|
benchmark: BenchmarkFixture,
|
|
|
|
event_loop: asyncio.AbstractEventLoop,
|
|
|
|
catalog: AntaCatalog,
|
|
|
|
inventory: AntaInventory,
|
|
|
|
request: pytest.FixtureRequest,
|
|
|
|
session_results: defaultdict[str, ResultManager],
|
|
|
|
) -> None:
|
2025-02-05 11:54:55 +01:00
|
|
|
"""Benchmark ANTA."""
|
|
|
|
# Disable logging during ANTA execution to avoid having these function time in benchmarks
|
|
|
|
logging.disable()
|
|
|
|
|
2025-02-05 11:55:22 +01:00
|
|
|
results = session_results[request.node.callspec.id]
|
2025-02-05 11:54:55 +01:00
|
|
|
|
2025-02-05 11:55:22 +01:00
|
|
|
@benchmark
|
|
|
|
def _() -> None:
|
|
|
|
results.reset()
|
|
|
|
catalog.clear_indexes()
|
|
|
|
event_loop.run_until_complete(main(results, inventory, catalog))
|
2025-02-05 11:54:55 +01:00
|
|
|
|
|
|
|
logging.disable(logging.NOTSET)
|
|
|
|
|
2025-02-05 11:55:22 +01:00
|
|
|
if len(catalog.tests) * len(inventory) != len(results.results):
|
2025-02-05 11:54:55 +01:00
|
|
|
# This could mean duplicates exist.
|
|
|
|
# TODO: consider removing this code and refactor unit test data as a dictionary with tuple keys instead of a list
|
|
|
|
seen = set()
|
|
|
|
dupes = []
|
|
|
|
for test in catalog.tests:
|
|
|
|
if test in seen:
|
|
|
|
dupes.append(test)
|
|
|
|
else:
|
|
|
|
seen.add(test)
|
|
|
|
if dupes:
|
|
|
|
for test in dupes:
|
|
|
|
msg = f"Found duplicate in test catalog: {test}"
|
|
|
|
logger.error(msg)
|
2025-02-05 11:55:22 +01:00
|
|
|
pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(results.results)}", pytrace=False)
|
2025-02-05 11:54:55 +01:00
|
|
|
bench_info = (
|
|
|
|
"\n--- ANTA NRFU Benchmark Information ---\n"
|
2025-02-05 11:55:22 +01:00
|
|
|
f"Test results: {len(results.results)}\n"
|
|
|
|
f"Success: {results.get_total_results({AntaTestStatus.SUCCESS})}\n"
|
|
|
|
f"Failure: {results.get_total_results({AntaTestStatus.FAILURE})}\n"
|
|
|
|
f"Skipped: {results.get_total_results({AntaTestStatus.SKIPPED})}\n"
|
|
|
|
f"Error: {results.get_total_results({AntaTestStatus.ERROR})}\n"
|
|
|
|
f"Unset: {results.get_total_results({AntaTestStatus.UNSET})}\n"
|
2025-02-05 11:54:55 +01:00
|
|
|
"---------------------------------------"
|
|
|
|
)
|
|
|
|
logger.info(bench_info)
|
2025-02-05 11:55:22 +01:00
|
|
|
assert results.get_total_results({AntaTestStatus.ERROR}) == 0
|
|
|
|
assert results.get_total_results({AntaTestStatus.UNSET}) == 0
|