diff --git a/.codespellignore b/.codespellignore deleted file mode 100644 index a6d3a93..0000000 --- a/.codespellignore +++ /dev/null @@ -1 +0,0 @@ -toi \ No newline at end of file diff --git a/.github/workflows/code-testing.yml b/.github/workflows/code-testing.yml index 84777d6..de2e6bc 100644 --- a/.github/workflows/code-testing.yml +++ b/.github/workflows/code-testing.yml @@ -43,7 +43,7 @@ jobs: - 'docs/**' - 'README.md' check-requirements: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: matrix: python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] @@ -62,7 +62,7 @@ jobs: # @gmuloc: commenting this out for now #missing-documentation: # name: "Warning documentation is missing" - # runs-on: ubuntu-latest + # runs-on: ubuntu-20.04 # needs: [file-changes] # if: needs.file-changes.outputs.cli == 'true' && needs.file-changes.outputs.docs == 'false' # steps: @@ -74,7 +74,7 @@ jobs: # You should update documentation to reflect your change, or maybe not :) lint-python: name: Check the code style - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: file-changes if: needs.file-changes.outputs.code == 'true' steps: @@ -89,7 +89,7 @@ jobs: run: tox -e lint type-python: name: Check typing - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: file-changes if: needs.file-changes.outputs.code == 'true' steps: @@ -104,7 +104,7 @@ jobs: run: tox -e type test-python: name: Pytest across all supported python versions - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [lint-python, type-python] strategy: matrix: @@ -138,7 +138,7 @@ jobs: run: tox test-documentation: name: Build offline documentation for testing - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [test-python] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c0a538f..4b3b357 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -27,9 +27,31 @@ jobs: - name: Publish distribution 📦 to PyPI uses: pypa/gh-action-pypi-publish@release/v1 + release-coverage: + name: Updated ANTA release coverage badge + runs-on: ubuntu-20.04 + needs: [pypi] + steps: + - uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + - name: Install dependencies + run: pip install genbadge[coverage] tox tox-gh-actions + - name: "Run pytest via tox for ${{ matrix.python }}" + run: tox + - name: Generate coverage badge + run: genbadge coverage -i .coverage.xml -o badge/latest-release-coverage.svg + - name: Publish coverage badge to gh-pages branch + uses: JamesIves/github-pages-deploy-action@v4 + with: + branch: coverage-badge + folder: badge release-doc: name: "Publish documentation for release ${{github.ref_name}}" runs-on: ubuntu-latest + needs: [release-coverage] steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/sonar.yml b/.github/workflows/sonar.yml index 7ec7896..81db36e 100644 --- a/.github/workflows/sonar.yml +++ b/.github/workflows/sonar.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} - fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis + fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis - name: Setup Python uses: actions/setup-python@v5 with: @@ -30,7 +30,7 @@ jobs: - name: "Run pytest via tox for ${{ matrix.python }}" run: tox - name: SonarCloud Scan - uses: SonarSource/sonarqube-scan-action@v5.0.0 + uses: SonarSource/sonarcloud-github-action@master env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7dde835..f33db65 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,6 @@ # See https://pre-commit.com/hooks.html for more hooks ci: autoupdate_commit_msg: "ci: pre-commit autoupdate" - skip: [mypy] files: ^(anta|docs|scripts|tests|asynceapi)/ @@ -44,28 +43,28 @@ repos: - --allow-past-years - --fuzzy-match-generates-todo - --comment-style - - "" + - '' - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.10.0 + rev: v0.8.4 hooks: - - id: ruff - name: Run Ruff linter - args: [--fix] - - id: ruff-format - name: Run Ruff formatter + - id: ruff + name: Run Ruff linter + args: [ --fix ] + - id: ruff-format + name: Run Ruff formatter - repo: https://github.com/pycqa/pylint - rev: "v3.3.5" + rev: "v3.3.2" hooks: - id: pylint name: Check code style with pylint description: This hook runs pylint. types: [python] args: - - -rn # Only display messages - - -sn # Don't display the score - - --rcfile=pyproject.toml # Link to config file + - -rn # Only display messages + - -sn # Don't display the score + - --rcfile=pyproject.toml # Link to config file additional_dependencies: - anta[cli] - types-PyYAML @@ -77,17 +76,16 @@ repos: - respx - repo: https://github.com/codespell-project/codespell - rev: v2.4.1 + rev: v2.3.0 hooks: - id: codespell name: Checks for common misspellings in text files. entry: codespell language: python types: [text] - args: ["--ignore-words", ".codespellignore"] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.15.0 + rev: v1.14.0 hooks: - id: mypy name: Check typing with mypy @@ -99,10 +97,10 @@ repos: - types-requests - types-pyOpenSSL - pytest - files: ^(anta|tests|asynceapi)/ + files: ^(anta|tests)/ - repo: https://github.com/igorshubovych/markdownlint-cli - rev: v0.44.0 + rev: v0.43.0 hooks: - id: markdownlint name: Check Markdown files style. @@ -124,14 +122,5 @@ repos: pass_filenames: false additional_dependencies: - anta[cli] - - id: doc-snippets - name: Generate doc snippets - entry: >- - sh -c "docs/scripts/generate_doc_snippets.py" - language: python - types: [python] - files: anta/cli/ - verbose: true - pass_filenames: false - additional_dependencies: - - anta[cli] + # TODO: next can go once we have it added to anta properly + - numpydoc diff --git a/anta/__init__.py b/anta/__init__.py index 339a7d3..6660843 100644 --- a/anta/__init__.py +++ b/anta/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Arista Network Test Automation (ANTA) Framework.""" diff --git a/anta/catalog.py b/anta/catalog.py index 5239255..bc95104 100644 --- a/anta/catalog.py +++ b/anta/catalog.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Catalog related functions.""" @@ -14,11 +14,11 @@ from itertools import chain from json import load as json_load from pathlib import Path from typing import TYPE_CHECKING, Any, Literal, Optional, Union +from warnings import warn from pydantic import BaseModel, ConfigDict, RootModel, ValidationError, ValidationInfo, field_validator, model_serializer, model_validator from pydantic.types import ImportString from pydantic_core import PydanticCustomError -from typing_extensions import deprecated from yaml import YAMLError, safe_dump, safe_load from anta.logger import anta_log_exception @@ -182,7 +182,7 @@ class AntaCatalogFile(RootModel[dict[ImportString[Any], list[AntaTestDefinition] except Exception as e: # A test module is potentially user-defined code. # We need to catch everything if we want to have meaningful logs - module_str = f"{module_name.removeprefix('.')}{f' from package {package}' if package else ''}" + module_str = f"{module_name[1:] if module_name.startswith('.') else module_name}{f' from package {package}' if package else ''}" message = f"Module named {module_str} cannot be imported. Verify that the module exists and there is no Python syntax issues." anta_log_exception(e, message, logger) raise ValueError(message) from e @@ -223,14 +223,16 @@ class AntaCatalogFile(RootModel[dict[ImportString[Any], list[AntaTestDefinition] raise ValueError(msg) # noqa: TRY004 pydantic catches ValueError or AssertionError, no TypeError if len(test_definition) != 1: msg = ( - f"Syntax error when parsing: {test_definition}\nIt must be a dictionary with a single entry. Check the indentation in the test catalog." + f"Syntax error when parsing: {test_definition}\n" + "It must be a dictionary with a single entry. Check the indentation in the test catalog." ) raise ValueError(msg) for test_name, test_inputs in test_definition.copy().items(): test: type[AntaTest] | None = getattr(module, test_name, None) if test is None: msg = ( - f"{test_name} is not defined in Python module {module.__name__}{f' (from {module.__file__})' if module.__file__ is not None else ''}" + f"{test_name} is not defined in Python module {module.__name__}" + f"{f' (from {module.__file__})' if module.__file__ is not None else ''}" ) raise ValueError(msg) test_definitions.append(AntaTestDefinition(test=test, inputs=test_inputs)) @@ -250,7 +252,7 @@ class AntaCatalogFile(RootModel[dict[ImportString[Any], list[AntaTestDefinition] # This could be improved. # https://github.com/pydantic/pydantic/issues/1043 # Explore if this worth using this: https://github.com/NowanIlfideme/pydantic-yaml - return safe_dump(safe_load(self.model_dump_json(serialize_as_any=True, exclude_unset=True)), width=math.inf) + return safe_dump(safe_load(self.model_dump_json(serialize_as_any=True, exclude_unset=True)), indent=2, width=math.inf) def to_json(self) -> str: """Return a JSON representation string of this model. @@ -289,7 +291,11 @@ class AntaCatalog: self._tests = tests self._filename: Path | None = None if filename is not None: - self._filename = filename if isinstance(filename, Path) else Path(filename) + if isinstance(filename, Path): + self._filename = filename + else: + self._filename = Path(filename) + self.indexes_built: bool self.tag_to_tests: defaultdict[str | None, set[AntaTestDefinition]] self._init_indexes() @@ -319,8 +325,6 @@ class AntaCatalog: msg = "A test in the catalog must be an AntaTestDefinition instance" raise TypeError(msg) self._tests = value - # Tests were modified so indexes need to be rebuilt. - self.clear_indexes() @staticmethod def parse(filename: str | Path, file_format: Literal["yaml", "json"] = "yaml") -> AntaCatalog: @@ -436,12 +440,13 @@ class AntaCatalog: combined_tests = list(chain(*(catalog.tests for catalog in catalogs))) return cls(tests=combined_tests) - @deprecated( - "This method is deprecated, use `AntaCatalogs.merge_catalogs` class method instead. This will be removed in ANTA v2.0.0.", category=DeprecationWarning - ) def merge(self, catalog: AntaCatalog) -> AntaCatalog: """Merge two AntaCatalog instances. + Warning + ------- + This method is deprecated and will be removed in ANTA v2.0. Use `AntaCatalog.merge_catalogs()` instead. + Parameters ---------- catalog @@ -452,6 +457,12 @@ class AntaCatalog: AntaCatalog A new AntaCatalog instance containing the tests of the two instances. """ + # TODO: Use a decorator to deprecate this method instead. See https://github.com/aristanetworks/anta/issues/754 + warn( + message="AntaCatalog.merge() is deprecated and will be removed in ANTA v2.0. Use AntaCatalog.merge_catalogs() instead.", + category=DeprecationWarning, + stacklevel=2, + ) return self.merge_catalogs([self, catalog]) def dump(self) -> AntaCatalogFile: diff --git a/anta/cli/__init__.py b/anta/cli/__init__.py index dd39f78..90be5c7 100644 --- a/anta/cli/__init__.py +++ b/anta/cli/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """ANTA CLI.""" diff --git a/anta/cli/_main.py b/anta/cli/_main.py index 1dc6224..ae4e050 100644 --- a/anta/cli/_main.py +++ b/anta/cli/_main.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """ANTA CLI.""" diff --git a/anta/cli/check/__init__.py b/anta/cli/check/__init__.py index ab1b08e..bbc5a7e 100644 --- a/anta/cli/check/__init__.py +++ b/anta/cli/check/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Click commands to validate configuration files.""" diff --git a/anta/cli/check/commands.py b/anta/cli/check/commands.py index 2ca6013..23895d7 100644 --- a/anta/cli/check/commands.py +++ b/anta/cli/check/commands.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. # pylint: disable = redefined-outer-name diff --git a/anta/cli/console.py b/anta/cli/console.py index 068e676..9c57d6d 100644 --- a/anta/cli/console.py +++ b/anta/cli/console.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """ANTA Top-level Console. diff --git a/anta/cli/debug/__init__.py b/anta/cli/debug/__init__.py index d3ff5bf..18d577f 100644 --- a/anta/cli/debug/__init__.py +++ b/anta/cli/debug/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Click commands to execute EOS commands on remote devices.""" diff --git a/anta/cli/debug/commands.py b/anta/cli/debug/commands.py index 54f580a..e6e456e 100644 --- a/anta/cli/debug/commands.py +++ b/anta/cli/debug/commands.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. # pylint: disable = redefined-outer-name diff --git a/anta/cli/debug/utils.py b/anta/cli/debug/utils.py index c8ead5a..454c3e6 100644 --- a/anta/cli/debug/utils.py +++ b/anta/cli/debug/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Utils functions to use with anta.cli.debug module.""" diff --git a/anta/cli/exec/__init__.py b/anta/cli/exec/__init__.py index bcec37c..5fa6eb9 100644 --- a/anta/cli/exec/__init__.py +++ b/anta/cli/exec/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Click commands to execute various scripts on EOS devices.""" diff --git a/anta/cli/exec/commands.py b/anta/cli/exec/commands.py index a299393..ff36e56 100644 --- a/anta/cli/exec/commands.py +++ b/anta/cli/exec/commands.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Click commands to execute various scripts on EOS devices.""" diff --git a/anta/cli/exec/utils.py b/anta/cli/exec/utils.py index 3258d0b..33a0222 100644 --- a/anta/cli/exec/utils.py +++ b/anta/cli/exec/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. @@ -13,7 +13,6 @@ import logging from pathlib import Path from typing import TYPE_CHECKING, Literal -from asyncssh.misc import HostKeyNotVerifiable from click.exceptions import UsageError from httpx import ConnectError, HTTPError @@ -24,7 +23,6 @@ from asynceapi import EapiCommandError if TYPE_CHECKING: from anta.inventory import AntaInventory - from asynceapi._types import EapiComplexCommand, EapiSimpleCommand EOS_SCHEDULED_TECH_SUPPORT = "/mnt/flash/schedule/tech-support" INVALID_CHAR = "`~!@#$/" @@ -98,7 +96,7 @@ async def collect_commands( logger.error("Error when collecting commands: %s", str(r)) -async def collect_show_tech(inv: AntaInventory, root_dir: Path, *, configure: bool, tags: set[str] | None = None, latest: int | None = None) -> None: # noqa: C901 +async def collect_show_tech(inv: AntaInventory, root_dir: Path, *, configure: bool, tags: set[str] | None = None, latest: int | None = None) -> None: """Collect scheduled show-tech on devices.""" async def collect(device: AntaDevice) -> None: @@ -137,13 +135,13 @@ async def collect_show_tech(inv: AntaInventory, root_dir: Path, *, configure: bo ) logger.warning(msg) + commands = [] # TODO: @mtache - add `config` field to `AntaCommand` object to handle this use case. # Otherwise mypy complains about enable as it is only implemented for AsyncEOSDevice # TODO: Should enable be also included in AntaDevice? if not isinstance(device, AsyncEOSDevice): msg = "anta exec collect-tech-support is only supported with AsyncEOSDevice for now." raise UsageError(msg) - commands: list[EapiSimpleCommand | EapiComplexCommand] = [] if device.enable and device._enable_password is not None: commands.append({"cmd": "enable", "input": device._enable_password}) elif device.enable: @@ -164,11 +162,6 @@ async def collect_show_tech(inv: AntaInventory, root_dir: Path, *, configure: bo await device.copy(sources=filenames, destination=outdir, direction="from") logger.info("Collected %s scheduled tech-support from %s", len(filenames), device.name) - except HostKeyNotVerifiable: - logger.error( - "Unable to collect tech-support on %s. The host SSH key could not be verified. Make sure it is part of the `known_hosts` file on your machine.", - device.name, - ) except (EapiCommandError, HTTPError, ConnectError) as e: logger.error("Unable to collect tech-support on %s: %s", device.name, str(e)) diff --git a/anta/cli/get/__init__.py b/anta/cli/get/__init__.py index d0393ad..8763b35 100644 --- a/anta/cli/get/__init__.py +++ b/anta/cli/get/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Click commands to get information from or generate inventories.""" diff --git a/anta/cli/get/commands.py b/anta/cli/get/commands.py index e34be2c..3cc9126 100644 --- a/anta/cli/get/commands.py +++ b/anta/cli/get/commands.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. # pylint: disable = redefined-outer-name diff --git a/anta/cli/get/utils.py b/anta/cli/get/utils.py index e609065..d21dc54 100644 --- a/anta/cli/get/utils.py +++ b/anta/cli/get/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Utils functions to use with anta.cli.get.commands module.""" @@ -350,18 +350,17 @@ def print_test(test: type[AntaTest], *, short: bool = False) -> None: # Need to handle the fact that we nest the routing modules in Examples. # This is a bit fragile. inputs = example.split("\n") - test_name_lines = [i for i, input_entry in enumerate(inputs) if test.name in input_entry] - if not test_name_lines: + try: + test_name_line = next((i for i, input_entry in enumerate(inputs) if test.name in input_entry)) + except StopIteration as e: msg = f"Could not find the name of the test '{test.name}' in the Example section in the docstring." - raise ValueError(msg) - for list_index, line_index in enumerate(test_name_lines): - end = test_name_lines[list_index + 1] if list_index + 1 < len(test_name_lines) else -1 - console.print(f" {inputs[line_index].strip()}") - # Injecting the description for the first example - if list_index == 0: - console.print(f" # {test.description}", soft_wrap=True) - if not short and len(inputs) > line_index + 2: # There are params - console.print(textwrap.indent(textwrap.dedent("\n".join(inputs[line_index + 1 : end])), " " * 6)) + raise ValueError(msg) from e + # TODO: handle not found + console.print(f" {inputs[test_name_line].strip()}") + # Injecting the description + console.print(f" # {test.description}", soft_wrap=True) + if not short and len(inputs) > test_name_line + 2: # There are params + console.print(textwrap.indent(textwrap.dedent("\n".join(inputs[test_name_line + 1 : -1])), " " * 6)) def extract_examples(docstring: str) -> str | None: diff --git a/anta/cli/nrfu/__init__.py b/anta/cli/nrfu/__init__.py index 6dc912d..0272e0d 100644 --- a/anta/cli/nrfu/__init__.py +++ b/anta/cli/nrfu/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Click commands that run ANTA tests using anta.runner.""" @@ -42,10 +42,9 @@ class IgnoreRequiredWithHelp(AliasedGroup): if "--help" not in args: raise - # Fake presence of the required params so that help can display + # remove the required params so that help can display for param in self.params: - if param.required: - param.value_is_missing = lambda value: False # type: ignore[method-assign] # noqa: ARG005 + param.required = False return super().parse_args(ctx, args) diff --git a/anta/cli/nrfu/commands.py b/anta/cli/nrfu/commands.py index ed0f432..a549268 100644 --- a/anta/cli/nrfu/commands.py +++ b/anta/cli/nrfu/commands.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Click commands that render ANTA tests results.""" @@ -45,10 +45,7 @@ def table(ctx: click.Context, group_by: Literal["device", "test"] | None) -> Non help="Path to save report as a JSON file", ) def json(ctx: click.Context, output: pathlib.Path | None) -> None: - """ANTA command to check network state with JSON results. - - If no `--output` is specified, the output is printed to stdout. - """ + """ANTA command to check network state with JSON results.""" run_tests(ctx) print_json(ctx, output=output) exit_with_code(ctx) @@ -75,11 +72,11 @@ def text(ctx: click.Context) -> None: path_type=pathlib.Path, ), show_envvar=True, - required=True, + required=False, help="Path to save report as a CSV file", ) def csv(ctx: click.Context, csv_output: pathlib.Path) -> None: - """ANTA command to check network state with CSV report.""" + """ANTA command to check network states with CSV result.""" run_tests(ctx) save_to_csv(ctx, csv_file=csv_output) exit_with_code(ctx) diff --git a/anta/cli/nrfu/utils.py b/anta/cli/nrfu/utils.py index 60c0d29..375e6e1 100644 --- a/anta/cli/nrfu/utils.py +++ b/anta/cli/nrfu/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Utils functions to use with anta.cli.nrfu.commands module.""" @@ -157,7 +157,7 @@ def save_markdown_report(ctx: click.Context, md_output: pathlib.Path) -> None: Path to save the markdown report. """ try: - MDReportGenerator.generate(results=_get_result_manager(ctx).sort(["name", "categories", "test"]), md_filename=md_output) + MDReportGenerator.generate(results=_get_result_manager(ctx), md_filename=md_output) console.print(f"Markdown report saved to {md_output} ✅", style="cyan") except OSError: console.print(f"Failed to save Markdown report to {md_output} ❌", style="cyan") diff --git a/anta/cli/utils.py b/anta/cli/utils.py index 34b96b3..a939c32 100644 --- a/anta/cli/utils.py +++ b/anta/cli/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Utils functions to use with anta.cli module.""" @@ -9,7 +9,7 @@ import enum import functools import logging from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable, Literal +from typing import TYPE_CHECKING, Any, Callable import click from yaml import YAMLError @@ -17,7 +17,6 @@ from yaml import YAMLError from anta.catalog import AntaCatalog from anta.inventory import AntaInventory from anta.inventory.exceptions import InventoryIncorrectSchemaError, InventoryRootKeyError -from anta.logger import anta_log_exception if TYPE_CHECKING: from click import Option @@ -191,14 +190,6 @@ def core_options(f: Callable[..., Any]) -> Callable[..., Any]: required=True, type=click.Path(file_okay=True, dir_okay=False, exists=True, readable=True, path_type=Path), ) - @click.option( - "--inventory-format", - envvar="ANTA_INVENTORY_FORMAT", - show_envvar=True, - help="Format of the inventory file, either 'yaml' or 'json'", - default="yaml", - type=click.Choice(["yaml", "json"], case_sensitive=False), - ) @click.pass_context @functools.wraps(f) def wrapper( @@ -213,7 +204,6 @@ def core_options(f: Callable[..., Any]) -> Callable[..., Any]: timeout: float, insecure: bool, disable_cache: bool, - inventory_format: Literal["json", "yaml"], **kwargs: dict[str, Any], ) -> Any: # If help is invoke somewhere, do not parse inventory @@ -251,10 +241,8 @@ def core_options(f: Callable[..., Any]) -> Callable[..., Any]: timeout=timeout, insecure=insecure, disable_cache=disable_cache, - file_format=inventory_format, ) - except (TypeError, ValueError, YAMLError, OSError, InventoryIncorrectSchemaError, InventoryRootKeyError) as e: - anta_log_exception(e, f"Failed to parse the inventory: {inventory}", logger) + except (TypeError, ValueError, YAMLError, OSError, InventoryIncorrectSchemaError, InventoryRootKeyError): ctx.exit(ExitCode.USAGE_ERROR) return f(*args, inventory=i, **kwargs) @@ -331,8 +319,7 @@ def catalog_options(f: Callable[..., Any]) -> Callable[..., Any]: try: file_format = catalog_format.lower() c = AntaCatalog.parse(catalog, file_format=file_format) # type: ignore[arg-type] - except (TypeError, ValueError, YAMLError, OSError) as e: - anta_log_exception(e, f"Failed to parse the catalog: {catalog}", logger) + except (TypeError, ValueError, YAMLError, OSError): ctx.exit(ExitCode.USAGE_ERROR) return f(*args, catalog=c, **kwargs) diff --git a/anta/constants.py b/anta/constants.py index 2a4c0c9..4dcef30 100644 --- a/anta/constants.py +++ b/anta/constants.py @@ -1,30 +1,11 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Constants used in ANTA.""" from __future__ import annotations -ACRONYM_CATEGORIES: set[str] = { - "aaa", - "avt", - "bfd", - "bgp", - "igmp", - "ip", - "isis", - "lanz", - "lldp", - "mlag", - "ntp", - "ospf", - "ptp", - "snmp", - "stp", - "stun", - "vlan", - "vxlan", -} +ACRONYM_CATEGORIES: set[str] = {"aaa", "mlag", "snmp", "bgp", "ospf", "vxlan", "stp", "igmp", "ip", "lldp", "ntp", "bfd", "ptp", "lanz", "stun", "vlan"} """A set of network protocol or feature acronyms that should be represented in uppercase.""" MD_REPORT_TOC = """**Table of Contents:** @@ -43,33 +24,5 @@ KNOWN_EOS_ERRORS = [ r".* does not support IP", r"IS-IS (.*) is disabled because: .*", r"No source interface .*", - r".*controller\snot\sready.*", ] -"""List of known EOS errors. - -!!! failure "Generic EOS Error Handling" - When catching these errors, **ANTA will fail the affected test** and reported the error message. -""" - -EOS_BLACKLIST_CMDS = [ - r"^reload.*", - r"^conf.*", - r"^wr.*", -] -"""List of blacklisted EOS commands. - -!!! success "Disruptive commands safeguard" - ANTA implements a mechanism to **prevent the execution of disruptive commands** such as `reload`, `write erase` or `configure terminal`. -""" - -UNSUPPORTED_PLATFORM_ERRORS = [ - "not supported on this hardware platform", - "Invalid input (at token 2: 'trident')", -] -"""Error messages indicating platform or hardware unsupported commands. Includes both general hardware -platform errors and specific ASIC family limitations. - -!!! tip "Running EOS commands unsupported by hardware" - When catching these errors, ANTA will skip the affected test and raise a warning. The **test catalog must be updated** to remove execution of the affected test - on unsupported devices. -""" +"""List of known EOS errors that should set a test status to 'failure' with the error message.""" diff --git a/anta/custom_types.py b/anta/custom_types.py index 92edabc..297f1f5 100644 --- a/anta/custom_types.py +++ b/anta/custom_types.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module that provides predefined types for AntaTest.Input instances.""" @@ -10,6 +10,9 @@ from pydantic import Field from pydantic.functional_validators import AfterValidator, BeforeValidator # Regular Expression definition +# TODO: make this configurable - with an env var maybe? +REGEXP_EOS_BLACKLIST_CMDS = [r"^reload.*", r"^conf\w*\s*(terminal|session)*", r"^wr\w*\s*\w+"] +"""List of regular expressions to blacklist from eos commands.""" REGEXP_PATH_MARKERS = r"[\\\/\s]" """Match directory path from string.""" REGEXP_INTERFACE_ID = r"\d+(\/\d+)*(\.\d+)?" @@ -23,12 +26,15 @@ REGEX_TYPE_PORTCHANNEL = r"^Port-Channel[0-9]{1,6}$" REGEXP_TYPE_HOSTNAME = r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$" """Match hostname like `my-hostname`, `my-hostname-1`, `my-hostname-1-2`.""" - -# Regular expression for BGP redistributed routes -REGEX_IPV4_UNICAST = r"ipv4[-_ ]?unicast$" -REGEX_IPV4_MULTICAST = r"ipv4[-_ ]?multicast$" -REGEX_IPV6_UNICAST = r"ipv6[-_ ]?unicast$" -REGEX_IPV6_MULTICAST = r"ipv6[-_ ]?multicast$" +# Regexp BGP AFI/SAFI +REGEXP_BGP_L2VPN_AFI = r"\b(l2[\s\-]?vpn[\s\-]?evpn)\b" +"""Match L2VPN EVPN AFI.""" +REGEXP_BGP_IPV4_MPLS_LABELS = r"\b(ipv4[\s\-]?mpls[\s\-]?label(s)?)\b" +"""Match IPv4 MPLS Labels.""" +REGEX_BGP_IPV4_MPLS_VPN = r"\b(ipv4[\s\-]?mpls[\s\-]?vpn)\b" +"""Match IPv4 MPLS VPN.""" +REGEX_BGP_IPV4_UNICAST = r"\b(ipv4[\s\-]?uni[\s\-]?cast)\b" +"""Match IPv4 Unicast.""" def aaa_group_prefix(v: str) -> str: @@ -52,7 +58,7 @@ def interface_autocomplete(v: str) -> str: raise ValueError(msg) intf_id = m[0] - alias_map = {"et": "Ethernet", "eth": "Ethernet", "po": "Port-Channel", "lo": "Loopback", "vl": "Vlan"} + alias_map = {"et": "Ethernet", "eth": "Ethernet", "po": "Port-Channel", "lo": "Loopback"} return next((f"{full_name}{intf_id}" for alias, full_name in alias_map.items() if v.lower().startswith(alias)), v) @@ -75,57 +81,26 @@ def interface_case_sensitivity(v: str) -> str: def bgp_multiprotocol_capabilities_abbreviations(value: str) -> str: """Abbreviations for different BGP multiprotocol capabilities. - Handles different separators (hyphen, underscore, space) and case sensitivity. - Examples -------- - ```python - >>> bgp_multiprotocol_capabilities_abbreviations("IPv4 Unicast") - 'ipv4Unicast' - >>> bgp_multiprotocol_capabilities_abbreviations("ipv4-Flow_Spec Vpn") - 'ipv4FlowSpecVpn' - >>> bgp_multiprotocol_capabilities_abbreviations("ipv6_labeled-unicast") - 'ipv6MplsLabels' - >>> bgp_multiprotocol_capabilities_abbreviations("ipv4_mpls_vpn") - 'ipv4MplsVpn' - >>> bgp_multiprotocol_capabilities_abbreviations("ipv4 mpls labels") - 'ipv4MplsLabels' - >>> bgp_multiprotocol_capabilities_abbreviations("rt-membership") - 'rtMembership' - >>> bgp_multiprotocol_capabilities_abbreviations("dynamic-path-selection") - 'dps' - ``` + - IPv4 Unicast + - L2vpnEVPN + - ipv4 MPLS Labels + - ipv4Mplsvpn + """ patterns = { - f"{r'dynamic[-_ ]?path[-_ ]?selection$'}": "dps", - f"{r'dps$'}": "dps", - f"{REGEX_IPV4_UNICAST}": "ipv4Unicast", - f"{REGEX_IPV6_UNICAST}": "ipv6Unicast", - f"{REGEX_IPV4_MULTICAST}": "ipv4Multicast", - f"{REGEX_IPV6_MULTICAST}": "ipv6Multicast", - f"{r'ipv4[-_ ]?labeled[-_ ]?Unicast$'}": "ipv4MplsLabels", - f"{r'ipv4[-_ ]?mpls[-_ ]?labels$'}": "ipv4MplsLabels", - f"{r'ipv6[-_ ]?labeled[-_ ]?Unicast$'}": "ipv6MplsLabels", - f"{r'ipv6[-_ ]?mpls[-_ ]?labels$'}": "ipv6MplsLabels", - f"{r'ipv4[-_ ]?sr[-_ ]?te$'}": "ipv4SrTe", # codespell:ignore - f"{r'ipv6[-_ ]?sr[-_ ]?te$'}": "ipv6SrTe", # codespell:ignore - f"{r'ipv4[-_ ]?mpls[-_ ]?vpn$'}": "ipv4MplsVpn", - f"{r'ipv6[-_ ]?mpls[-_ ]?vpn$'}": "ipv6MplsVpn", - f"{r'ipv4[-_ ]?Flow[-_ ]?spec$'}": "ipv4FlowSpec", - f"{r'ipv6[-_ ]?Flow[-_ ]?spec$'}": "ipv6FlowSpec", - f"{r'ipv4[-_ ]?Flow[-_ ]?spec[-_ ]?vpn$'}": "ipv4FlowSpecVpn", - f"{r'ipv6[-_ ]?Flow[-_ ]?spec[-_ ]?vpn$'}": "ipv6FlowSpecVpn", - f"{r'l2[-_ ]?vpn[-_ ]?vpls$'}": "l2VpnVpls", - f"{r'l2[-_ ]?vpn[-_ ]?evpn$'}": "l2VpnEvpn", - f"{r'link[-_ ]?state$'}": "linkState", - f"{r'rt[-_ ]?membership$'}": "rtMembership", - f"{r'ipv4[-_ ]?rt[-_ ]?membership$'}": "rtMembership", - f"{r'ipv4[-_ ]?mvpn$'}": "ipv4Mvpn", + REGEXP_BGP_L2VPN_AFI: "l2VpnEvpn", + REGEXP_BGP_IPV4_MPLS_LABELS: "ipv4MplsLabels", + REGEX_BGP_IPV4_MPLS_VPN: "ipv4MplsVpn", + REGEX_BGP_IPV4_UNICAST: "ipv4Unicast", } + for pattern, replacement in patterns.items(): - match = re.match(pattern, value, re.IGNORECASE) + match = re.search(pattern, value, re.IGNORECASE) if match: return replacement + return value @@ -139,54 +114,6 @@ def validate_regex(value: str) -> str: return value -def bgp_redistributed_route_proto_abbreviations(value: str) -> str: - """Abbreviations for different BGP redistributed route protocols. - - Handles different separators (hyphen, underscore, space) and case sensitivity. - - Examples - -------- - ```python - >>> bgp_redistributed_route_proto_abbreviations("IPv4 Unicast") - 'v4u' - >>> bgp_redistributed_route_proto_abbreviations("IPv4-multicast") - 'v4m' - >>> bgp_redistributed_route_proto_abbreviations("IPv6_multicast") - 'v6m' - >>> bgp_redistributed_route_proto_abbreviations("ipv6unicast") - 'v6u' - ``` - """ - patterns = {REGEX_IPV4_UNICAST: "v4u", REGEX_IPV4_MULTICAST: "v4m", REGEX_IPV6_UNICAST: "v6u", REGEX_IPV6_MULTICAST: "v6m"} - - for pattern, replacement in patterns.items(): - match = re.match(pattern, value, re.IGNORECASE) - if match: - return replacement - - return value - - -def update_bgp_redistributed_proto_user(value: str) -> str: - """Update BGP redistributed route `User` proto with EOS SDK. - - Examples - -------- - ```python - >>> update_bgp_redistributed_proto_user("User") - 'EOS SDK' - >>> update_bgp_redistributed_proto_user("Bgp") - 'Bgp' - >>> update_bgp_redistributed_proto_user("RIP") - 'RIP' - ``` - """ - if value == "User": - value = "EOS SDK" - - return value - - # AntaTest.Input types AAAAuthMethod = Annotated[str, AfterValidator(aaa_group_prefix)] Vlan = Annotated[int, Field(ge=0, le=4094)] @@ -221,68 +148,22 @@ Safi = Literal["unicast", "multicast", "labeled-unicast", "sr-te"] EncryptionAlgorithm = Literal["RSA", "ECDSA"] RsaKeySize = Literal[2048, 3072, 4096] EcdsaKeySize = Literal[256, 384, 512] -MultiProtocolCaps = Annotated[ - Literal[ - "dps", - "ipv4Unicast", - "ipv6Unicast", - "ipv4Multicast", - "ipv6Multicast", - "ipv4MplsLabels", - "ipv6MplsLabels", - "ipv4SrTe", - "ipv6SrTe", - "ipv4MplsVpn", - "ipv6MplsVpn", - "ipv4FlowSpec", - "ipv6FlowSpec", - "ipv4FlowSpecVpn", - "ipv6FlowSpecVpn", - "l2VpnVpls", - "l2VpnEvpn", - "linkState", - "rtMembership", - "ipv4Mvpn", - ], - BeforeValidator(bgp_multiprotocol_capabilities_abbreviations), -] +MultiProtocolCaps = Annotated[str, BeforeValidator(bgp_multiprotocol_capabilities_abbreviations)] BfdInterval = Annotated[int, Field(ge=50, le=60000)] BfdMultiplier = Annotated[int, Field(ge=3, le=50)] ErrDisableReasons = Literal[ "acl", "arp-inspection", - "bgp-session-tracking", "bpduguard", - "dot1x", - "dot1x-coa", "dot1x-session-replace", - "evpn-sa-mh", - "fabric-link-failure", - "fabric-link-flap", "hitless-reload-down", - "lacp-no-portid", "lacp-rate-limit", - "license-enforce", "link-flap", - "mlagasu", - "mlagdualprimary", - "mlagissu", - "mlagmaintdown", "no-internal-vlan", - "out-of-voqs", "portchannelguard", - "portgroup-disabled", "portsec", - "speed-misconfigured", - "storm-control", - "stp-no-portid", - "stuck-queue", "tapagg", "uplink-failure-detection", - "xcvr-misconfigured", - "xcvr-overheat", - "xcvr-power-unsupported", - "xcvr-unsupported", ] ErrDisableInterval = Annotated[int, Field(ge=30, le=86400)] Percent = Annotated[float, Field(ge=0.0, le=100.0)] @@ -323,6 +204,11 @@ BgpDropStats = Literal[ ] BgpUpdateError = Literal["inUpdErrWithdraw", "inUpdErrIgnore", "inUpdErrDisableAfiSafi", "disabledAfiSafi", "lastUpdErrTime"] BfdProtocol = Literal["bgp", "isis", "lag", "ospf", "ospfv3", "pim", "route-input", "static-bfd", "static-route", "vrrp", "vxlan"] +SnmpPdu = Literal["inGetPdus", "inGetNextPdus", "inSetPdus", "outGetResponsePdus", "outTrapPdus"] +SnmpErrorCounter = Literal[ + "inVersionErrs", "inBadCommunityNames", "inBadCommunityUses", "inParseErrs", "outTooBigErrs", "outNoSuchNameErrs", "outBadValueErrs", "outGeneralErrs" +] + IPv4RouteType = Literal[ "connected", "static", @@ -352,47 +238,3 @@ IPv4RouteType = Literal[ "Route Cache Route", "CBF Leaked Route", ] -DynamicVlanSource = Literal["dmf", "dot1x", "dynvtep", "evpn", "mlag", "mlagsync", "mvpn", "swfwd", "vccbfd"] -LogSeverityLevel = Literal["alerts", "critical", "debugging", "emergencies", "errors", "informational", "notifications", "warnings"] - - -######################################## -# SNMP -######################################## -def snmp_v3_prefix(auth_type: Literal["auth", "priv", "noauth"]) -> str: - """Prefix the SNMP authentication type with 'v3'.""" - if auth_type == "noauth": - return "v3NoAuth" - return f"v3{auth_type.title()}" - - -SnmpVersion = Literal["v1", "v2c", "v3"] -SnmpHashingAlgorithm = Literal["MD5", "SHA", "SHA-224", "SHA-256", "SHA-384", "SHA-512"] -SnmpEncryptionAlgorithm = Literal["AES-128", "AES-192", "AES-256", "DES"] -SnmpPdu = Literal["inGetPdus", "inGetNextPdus", "inSetPdus", "outGetResponsePdus", "outTrapPdus"] -SnmpErrorCounter = Literal[ - "inVersionErrs", "inBadCommunityNames", "inBadCommunityUses", "inParseErrs", "outTooBigErrs", "outNoSuchNameErrs", "outBadValueErrs", "outGeneralErrs" -] -SnmpVersionV3AuthType = Annotated[Literal["auth", "priv", "noauth"], AfterValidator(snmp_v3_prefix)] -RedistributedProtocol = Annotated[ - Literal[ - "AttachedHost", - "Bgp", - "Connected", - "DHCP", - "Dynamic", - "IS-IS", - "OSPF Internal", - "OSPF External", - "OSPF Nssa-External", - "OSPFv3 Internal", - "OSPFv3 External", - "OSPFv3 Nssa-External", - "RIP", - "Static", - "User", - ], - AfterValidator(update_bgp_redistributed_proto_user), -] -RedistributedAfiSafi = Annotated[Literal["v4u", "v4m", "v6u", "v6m"], BeforeValidator(bgp_redistributed_route_proto_abbreviations)] -NTPStratumLevel = Annotated[int, Field(ge=0, le=16)] diff --git a/anta/decorators.py b/anta/decorators.py index 0ca2be8..0431623 100644 --- a/anta/decorators.py +++ b/anta/decorators.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """decorators for tests.""" @@ -58,7 +58,7 @@ def deprecated_test(new_tests: list[str] | None = None) -> Callable[[F], F]: # logger.warning("%s test is deprecated.", anta_test.name) return await function(*args, **kwargs) - return cast("F", wrapper) + return cast(F, wrapper) return decorator @@ -167,6 +167,6 @@ def skip_on_platforms(platforms: list[str]) -> Callable[[F], F]: return await function(*args, **kwargs) - return cast("F", wrapper) + return cast(F, wrapper) return decorator diff --git a/anta/device.py b/anta/device.py index 7c1e6f6..561323f 100644 --- a/anta/device.py +++ b/anta/device.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """ANTA Device Abstraction Module.""" @@ -8,12 +8,13 @@ from __future__ import annotations import asyncio import logging from abc import ABC, abstractmethod -from collections import OrderedDict, defaultdict -from time import monotonic +from collections import defaultdict from typing import TYPE_CHECKING, Any, Literal import asyncssh import httpcore +from aiocache import Cache +from aiocache.plugins import HitMissRatioPlugin from asyncssh import SSHClientConnection, SSHClientConnectionOptions from httpx import ConnectError, HTTPError, TimeoutException @@ -26,79 +27,12 @@ if TYPE_CHECKING: from collections.abc import Iterator from pathlib import Path - from asynceapi._types import EapiComplexCommand, EapiSimpleCommand - logger = logging.getLogger(__name__) # Do not load the default keypairs multiple times due to a performance issue introduced in cryptography 37.0 # https://github.com/pyca/cryptography/issues/7236#issuecomment-1131908472 CLIENT_KEYS = asyncssh.public_key.load_default_keypairs() -# Limit concurrency to 100 requests (HTTPX default) to avoid high-concurrency performance issues -# See: https://github.com/encode/httpx/issues/3215 -MAX_CONCURRENT_REQUESTS = 100 - - -class AntaCache: - """Class to be used as cache. - - Example - ------- - - ```python - # Create cache - cache = AntaCache("device1") - with cache.locks[key]: - command_output = cache.get(key) - ``` - """ - - def __init__(self, device: str, max_size: int = 128, ttl: int = 60) -> None: - """Initialize the cache.""" - self.device = device - self.cache: OrderedDict[str, Any] = OrderedDict() - self.locks: defaultdict[str, asyncio.Lock] = defaultdict(asyncio.Lock) - self.max_size = max_size - self.ttl = ttl - - # Stats - self.stats: dict[str, int] = {} - self._init_stats() - - def _init_stats(self) -> None: - """Initialize the stats.""" - self.stats["hits"] = 0 - self.stats["total"] = 0 - - async def get(self, key: str) -> Any: # noqa: ANN401 - """Return the cached entry for key.""" - self.stats["total"] += 1 - if key in self.cache: - timestamp, value = self.cache[key] - if monotonic() - timestamp < self.ttl: - # checking the value is still valid - self.cache.move_to_end(key) - self.stats["hits"] += 1 - return value - # Time expired - del self.cache[key] - del self.locks[key] - return None - - async def set(self, key: str, value: Any) -> bool: # noqa: ANN401 - """Set the cached entry for key to value.""" - timestamp = monotonic() - if len(self.cache) > self.max_size: - self.cache.popitem(last=False) - self.cache[key] = timestamp, value - return True - - def clear(self) -> None: - """Empty the cache.""" - logger.debug("Clearing cache for device %s", self.device) - self.cache = OrderedDict() - self._init_stats() - class AntaDevice(ABC): """Abstract class representing a device in ANTA. @@ -118,11 +52,10 @@ class AntaDevice(ABC): Hardware model of the device. tags : set[str] Tags for this device. - cache : AntaCache | None - In-memory cache for this device (None if cache is disabled). + cache : Cache | None + In-memory cache from aiocache library for this device (None if cache is disabled). cache_locks : dict Dictionary mapping keys to asyncio locks to guarantee exclusive access to the cache if not disabled. - Deprecated, will be removed in ANTA v2.0.0, use self.cache.locks instead. """ @@ -146,8 +79,7 @@ class AntaDevice(ABC): self.tags.add(self.name) self.is_online: bool = False self.established: bool = False - self.cache: AntaCache | None = None - # Keeping cache_locks for backward compatibility. + self.cache: Cache | None = None self.cache_locks: defaultdict[str, asyncio.Lock] | None = None # Initialize cache if not disabled @@ -169,16 +101,17 @@ class AntaDevice(ABC): def _init_cache(self) -> None: """Initialize cache for the device, can be overridden by subclasses to manipulate how it works.""" - self.cache = AntaCache(device=self.name, ttl=60) - self.cache_locks = self.cache.locks + self.cache = Cache(cache_class=Cache.MEMORY, ttl=60, namespace=self.name, plugins=[HitMissRatioPlugin()]) + self.cache_locks = defaultdict(asyncio.Lock) @property def cache_statistics(self) -> dict[str, Any] | None: """Return the device cache statistics for logging purposes.""" + # Need to ignore pylint no-member as Cache is a proxy class and pylint is not smart enough + # https://github.com/pylint-dev/pylint/issues/7258 if self.cache is not None: - stats = self.cache.stats - ratio = stats["hits"] / stats["total"] if stats["total"] > 0 else 0 - return {"total_commands_sent": stats["total"], "cache_hits": stats["hits"], "cache_hit_ratio": f"{ratio * 100:.2f}%"} + stats = getattr(self.cache, "hit_miss_ratio", {"total": 0, "hits": 0, "hit_ratio": 0}) + return {"total_commands_sent": stats["total"], "cache_hits": stats["hits"], "cache_hit_ratio": f"{stats['hit_ratio'] * 100:.2f}%"} return None def __rich_repr__(self) -> Iterator[tuple[str, Any]]: @@ -244,16 +177,18 @@ class AntaDevice(ABC): collection_id An identifier used to build the eAPI request ID. """ - if self.cache is not None and command.use_cache: - async with self.cache.locks[command.uid]: - cached_output = await self.cache.get(command.uid) + # Need to ignore pylint no-member as Cache is a proxy class and pylint is not smart enough + # https://github.com/pylint-dev/pylint/issues/7258 + if self.cache is not None and self.cache_locks is not None and command.use_cache: + async with self.cache_locks[command.uid]: + cached_output = await self.cache.get(command.uid) # pylint: disable=no-member if cached_output is not None: logger.debug("Cache hit for %s on %s", command.command, self.name) command.output = cached_output else: await self._collect(command=command, collection_id=collection_id) - await self.cache.set(command.uid, command.output) + await self.cache.set(command.uid, command.output) # pylint: disable=no-member else: await self._collect(command=command, collection_id=collection_id) @@ -302,7 +237,6 @@ class AntaDevice(ABC): raise NotImplementedError(msg) -# pylint: disable=too-many-instance-attributes class AsyncEOSDevice(AntaDevice): """Implementation of AntaDevice for EOS using aio-eapi. @@ -395,10 +329,6 @@ class AsyncEOSDevice(AntaDevice): host=host, port=ssh_port, username=username, password=password, client_keys=CLIENT_KEYS, **ssh_params ) - # In Python 3.9, Semaphore must be created within a running event loop - # TODO: Once we drop Python 3.9 support, initialize the semaphore here - self._command_semaphore: asyncio.Semaphore | None = None - def __rich_repr__(self) -> Iterator[tuple[str, Any]]: """Implement Rich Repr Protocol. @@ -442,15 +372,6 @@ class AsyncEOSDevice(AntaDevice): """ return (self._session.host, self._session.port) - async def _get_semaphore(self) -> asyncio.Semaphore: - """Return the semaphore, initializing it if needed. - - TODO: Remove this method once we drop Python 3.9 support. - """ - if self._command_semaphore is None: - self._command_semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS) - return self._command_semaphore - async def _collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None: """Collect device command output from EOS using aio-eapi. @@ -465,63 +386,57 @@ class AsyncEOSDevice(AntaDevice): collection_id An identifier used to build the eAPI request ID. """ - semaphore = await self._get_semaphore() - - async with semaphore: - commands: list[EapiComplexCommand | EapiSimpleCommand] = [] - if self.enable and self._enable_password is not None: - commands.append( - { - "cmd": "enable", - "input": str(self._enable_password), - }, - ) - elif self.enable: - # No password - commands.append({"cmd": "enable"}) - commands += [{"cmd": command.command, "revision": command.revision}] if command.revision else [{"cmd": command.command}] - try: - response = await self._session.cli( - commands=commands, - ofmt=command.ofmt, - version=command.version, - req_id=f"ANTA-{collection_id}-{id(command)}" if collection_id else f"ANTA-{id(command)}", - ) - # Do not keep response of 'enable' command - command.output = response[-1] - except asynceapi.EapiCommandError as e: - # This block catches exceptions related to EOS issuing an error. - self._log_eapi_command_error(command, e) - except TimeoutException as e: - # This block catches Timeout exceptions. - command.errors = [exc_to_str(e)] - timeouts = self._session.timeout.as_dict() - logger.error( - "%s occurred while sending a command to %s. Consider increasing the timeout.\nCurrent timeouts: Connect: %s | Read: %s | Write: %s | Pool: %s", - exc_to_str(e), - self.name, - timeouts["connect"], - timeouts["read"], - timeouts["write"], - timeouts["pool"], - ) - except (ConnectError, OSError) as e: - # This block catches OSError and socket issues related exceptions. - command.errors = [exc_to_str(e)] - # pylint: disable=no-member - if (isinstance(exc := e.__cause__, httpcore.ConnectError) and isinstance(os_error := exc.__context__, OSError)) or isinstance( - os_error := e, OSError - ): - if isinstance(os_error.__cause__, OSError): - os_error = os_error.__cause__ - logger.error("A local OS error occurred while connecting to %s: %s.", self.name, os_error) - else: - anta_log_exception(e, f"An error occurred while issuing an eAPI request to {self.name}", logger) - except HTTPError as e: - # This block catches most of the httpx Exceptions and logs a general message. - command.errors = [exc_to_str(e)] + commands: list[dict[str, str | int]] = [] + if self.enable and self._enable_password is not None: + commands.append( + { + "cmd": "enable", + "input": str(self._enable_password), + }, + ) + elif self.enable: + # No password + commands.append({"cmd": "enable"}) + commands += [{"cmd": command.command, "revision": command.revision}] if command.revision else [{"cmd": command.command}] + try: + response: list[dict[str, Any] | str] = await self._session.cli( + commands=commands, + ofmt=command.ofmt, + version=command.version, + req_id=f"ANTA-{collection_id}-{id(command)}" if collection_id else f"ANTA-{id(command)}", + ) # type: ignore[assignment] # multiple commands returns a list + # Do not keep response of 'enable' command + command.output = response[-1] + except asynceapi.EapiCommandError as e: + # This block catches exceptions related to EOS issuing an error. + self._log_eapi_command_error(command, e) + except TimeoutException as e: + # This block catches Timeout exceptions. + command.errors = [exc_to_str(e)] + timeouts = self._session.timeout.as_dict() + logger.error( + "%s occurred while sending a command to %s. Consider increasing the timeout.\nCurrent timeouts: Connect: %s | Read: %s | Write: %s | Pool: %s", + exc_to_str(e), + self.name, + timeouts["connect"], + timeouts["read"], + timeouts["write"], + timeouts["pool"], + ) + except (ConnectError, OSError) as e: + # This block catches OSError and socket issues related exceptions. + command.errors = [exc_to_str(e)] + if (isinstance(exc := e.__cause__, httpcore.ConnectError) and isinstance(os_error := exc.__context__, OSError)) or isinstance(os_error := e, OSError): # pylint: disable=no-member + if isinstance(os_error.__cause__, OSError): + os_error = os_error.__cause__ + logger.error("A local OS error occurred while connecting to %s: %s.", self.name, os_error) + else: anta_log_exception(e, f"An error occurred while issuing an eAPI request to {self.name}", logger) - logger.debug("%s: %s", self.name, command) + except HTTPError as e: + # This block catches most of the httpx Exceptions and logs a general message. + command.errors = [exc_to_str(e)] + anta_log_exception(e, f"An error occurred while issuing an eAPI request to {self.name}", logger) + logger.debug("%s: %s", self.name, command) def _log_eapi_command_error(self, command: AntaCommand, e: asynceapi.EapiCommandError) -> None: """Appropriately log the eapi command error.""" diff --git a/anta/input_models/__init__.py b/anta/input_models/__init__.py index 5dbf827..5b8974c 100644 --- a/anta/input_models/__init__.py +++ b/anta/input_models/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Package related to all ANTA tests input models.""" diff --git a/anta/input_models/avt.py b/anta/input_models/avt.py index 44fd780..9219c2f 100644 --- a/anta/input_models/avt.py +++ b/anta/input_models/avt.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for AVT tests.""" @@ -33,4 +33,4 @@ class AVTPath(BaseModel): AVT CONTROL-PLANE-PROFILE VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.1) """ - return f"AVT: {self.avt_name} VRF: {self.vrf} Destination: {self.destination} Next-hop: {self.next_hop}" + return f"AVT {self.avt_name} VRF: {self.vrf} (Destination: {self.destination}, Next-hop: {self.next_hop})" diff --git a/anta/input_models/bfd.py b/anta/input_models/bfd.py index 06838d0..9ccc625 100644 --- a/anta/input_models/bfd.py +++ b/anta/input_models/bfd.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for BFD tests.""" @@ -31,10 +31,6 @@ class BFDPeer(BaseModel): """Multiplier of BFD peer. Required field in the `VerifyBFDPeersIntervals` test.""" protocols: list[BfdProtocol] | None = None """List of protocols to be verified. Required field in the `VerifyBFDPeersRegProtocols` test.""" - detection_time: int | None = None - """Detection time of BFD peer in milliseconds. Defines how long to wait without receiving BFD packets before declaring the peer session as down. - - Optional field in the `VerifyBFDPeersIntervals` test.""" def __str__(self) -> str: """Return a human-readable string representation of the BFDPeer for reporting.""" diff --git a/anta/input_models/connectivity.py b/anta/input_models/connectivity.py index 464d22a..e8f5553 100644 --- a/anta/input_models/connectivity.py +++ b/anta/input_models/connectivity.py @@ -1,11 +1,11 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for connectivity tests.""" from __future__ import annotations -from ipaddress import IPv4Address, IPv6Address +from ipaddress import IPv4Address from typing import Any from warnings import warn @@ -18,30 +18,29 @@ class Host(BaseModel): """Model for a remote host to ping.""" model_config = ConfigDict(extra="forbid") - destination: IPv4Address | IPv6Address - """Destination address to ping.""" - source: IPv4Address | IPv6Address | Interface - """Source address IP or egress interface to use.""" + destination: IPv4Address + """IPv4 address to ping.""" + source: IPv4Address | Interface + """IPv4 address source IP or egress interface to use.""" vrf: str = "default" - """VRF context.""" + """VRF context. Defaults to `default`.""" repeat: int = 2 - """Number of ping repetition.""" + """Number of ping repetition. Defaults to 2.""" size: int = 100 - """Specify datagram size.""" + """Specify datagram size. Defaults to 100.""" df_bit: bool = False - """Enable do not fragment bit in IP header.""" - reachable: bool = True - """Indicates whether the destination should be reachable.""" + """Enable do not fragment bit in IP header. Defaults to False.""" def __str__(self) -> str: """Return a human-readable string representation of the Host for reporting. Examples -------- - Host: 10.1.1.1 Source: 10.2.2.2 VRF: mgmt + Host 10.1.1.1 (src: 10.2.2.2, vrf: mgmt, size: 100B, repeat: 2) """ - return f"Host: {self.destination} Source: {self.source} VRF: {self.vrf}" + df_status = ", df-bit: enabled" if self.df_bit else "" + return f"Host {self.destination} (src: {self.source}, vrf: {self.vrf}, size: {self.size}B, repeat: {self.repeat}{df_status})" class LLDPNeighbor(BaseModel): @@ -60,10 +59,10 @@ class LLDPNeighbor(BaseModel): Examples -------- - Port: Ethernet1 Neighbor: DC1-SPINE2 Neighbor Port: Ethernet2 + Port Ethernet1 (Neighbor: DC1-SPINE2, Neighbor Port: Ethernet2) """ - return f"Port: {self.port} Neighbor: {self.neighbor_device} Neighbor Port: {self.neighbor_port}" + return f"Port {self.port} (Neighbor: {self.neighbor_device}, Neighbor Port: {self.neighbor_port})" class Neighbor(LLDPNeighbor): # pragma: no cover diff --git a/anta/input_models/cvx.py b/anta/input_models/cvx.py index e2f5f8e..4f93749 100644 --- a/anta/input_models/cvx.py +++ b/anta/input_models/cvx.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for CVX tests.""" diff --git a/anta/input_models/flow_tracking.py b/anta/input_models/flow_tracking.py deleted file mode 100644 index 5f4c25b..0000000 --- a/anta/input_models/flow_tracking.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. -# Use of this source code is governed by the Apache License 2.0 -# that can be found in the LICENSE file. -"""Module containing input models for flow tracking tests.""" - -from __future__ import annotations - -from pydantic import BaseModel, ConfigDict - - -class FlowTracker(BaseModel): - """Flow Tracking model representing the tracker details.""" - - model_config = ConfigDict(extra="forbid") - name: str - """The name of the flow tracker.""" - record_export: RecordExport | None = None - """Configuration for record export, specifying details about timeouts.""" - exporters: list[Exporter] | None = None - """A list of exporters associated with the flow tracker.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the FlowTracker for reporting. - - Examples - -------- - Flow Tracker: FLOW-TRACKER - - """ - return f"Flow Tracker: {self.name}" - - -class RecordExport(BaseModel): - """Model representing the record export configuration for a flow tracker.""" - - model_config = ConfigDict(extra="forbid") - on_inactive_timeout: int - """The timeout in milliseconds for exporting flow records when the flow becomes inactive.""" - on_interval: int - """The interval in milliseconds for exporting flow records.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the RecordExport for reporting. - - Examples - -------- - Inactive Timeout: 60000, Active Interval: 300000 - - """ - return f"Inactive Timeout: {self.on_inactive_timeout} Active Interval: {self.on_interval}" - - -class Exporter(BaseModel): - """Model representing the exporter used for flow record export.""" - - model_config = ConfigDict(extra="forbid") - name: str - """The name of the exporter.""" - local_interface: str - """The local interface used by the exporter to send flow records.""" - template_interval: int - """The template interval, in milliseconds, for the exporter to refresh the flow template.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the Exporter for reporting. - - Examples - -------- - Exporter: CVP-TELEMETRY - - """ - return f"Exporter: {self.name}" diff --git a/anta/input_models/interfaces.py b/anta/input_models/interfaces.py index c9ecedb..9e33a2c 100644 --- a/anta/input_models/interfaces.py +++ b/anta/input_models/interfaces.py @@ -1,24 +1,19 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for interface tests.""" from __future__ import annotations -from ipaddress import IPv4Interface -from typing import Any, Literal -from warnings import warn +from typing import Literal -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict from anta.custom_types import Interface, PortChannelInterface class InterfaceState(BaseModel): - """Model for an interface state. - - TODO: Need to review this class name in ANTA v2.0.0. - """ + """Model for an interface state.""" model_config = ConfigDict(extra="forbid") name: Interface @@ -38,16 +33,6 @@ class InterfaceState(BaseModel): Can be enabled in the `VerifyLACPInterfacesStatus` tests. """ - primary_ip: IPv4Interface | None = None - """Primary IPv4 address in CIDR notation. Required field in the `VerifyInterfaceIPv4` test.""" - secondary_ips: list[IPv4Interface] | None = None - """List of secondary IPv4 addresses in CIDR notation. Can be provided in the `VerifyInterfaceIPv4` test.""" - auto: bool = False - """The auto-negotiation status of the interface. Can be provided in the `VerifyInterfacesSpeed` test.""" - speed: float | None = Field(None, ge=1, le=1000) - """The speed of the interface in Gigabits per second. Valid range is 1 to 1000. Required field in the `VerifyInterfacesSpeed` test.""" - lanes: int | None = Field(None, ge=1, le=8) - """The number of lanes in the interface. Valid range is 1 to 8. Can be provided in the `VerifyInterfacesSpeed` test.""" def __str__(self) -> str: """Return a human-readable string representation of the InterfaceState for reporting. @@ -61,21 +46,3 @@ class InterfaceState(BaseModel): if self.portchannel is not None: base_string += f" Port-Channel: {self.portchannel}" return base_string - - -class InterfaceDetail(InterfaceState): # pragma: no cover - """Alias for the InterfaceState model to maintain backward compatibility. - - When initialized, it will emit a deprecation warning and call the InterfaceState model. - - TODO: Remove this class in ANTA v2.0.0. - """ - - def __init__(self, **data: Any) -> None: # noqa: ANN401 - """Initialize the InterfaceState class, emitting a depreciation warning.""" - warn( - message="InterfaceDetail model is deprecated and will be removed in ANTA v2.0.0. Use the InterfaceState model instead.", - category=DeprecationWarning, - stacklevel=2, - ) - super().__init__(**data) diff --git a/anta/input_models/logging.py b/anta/input_models/logging.py deleted file mode 100644 index 977f1ab..0000000 --- a/anta/input_models/logging.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. -# Use of this source code is governed by the Apache License 2.0 -# that can be found in the LICENSE file. -"""Module containing input models for logging tests.""" - -from __future__ import annotations - -from pydantic import BaseModel, Field - -from anta.custom_types import LogSeverityLevel, RegexString - - -class LoggingQuery(BaseModel): - """Logging query model representing the logging details.""" - - regex_match: RegexString - """Log regex pattern to be searched in last log entries.""" - last_number_messages: int = Field(ge=1, le=9999) - """Last number of messages to check in the logging buffers.""" - severity_level: LogSeverityLevel = "informational" - """Log severity level.""" diff --git a/anta/input_models/path_selection.py b/anta/input_models/path_selection.py deleted file mode 100644 index cf06c90..0000000 --- a/anta/input_models/path_selection.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. -# Use of this source code is governed by the Apache License 2.0 -# that can be found in the LICENSE file. -"""Module containing input models for path-selection tests.""" - -from __future__ import annotations - -from ipaddress import IPv4Address - -from pydantic import BaseModel, ConfigDict - - -class DpsPath(BaseModel): - """Model for a list of DPS path entries.""" - - model_config = ConfigDict(extra="forbid") - peer: IPv4Address - """Static peer IPv4 address.""" - path_group: str - """Router path group name.""" - source_address: IPv4Address - """Source IPv4 address of path.""" - destination_address: IPv4Address - """Destination IPv4 address of path.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the DpsPath for reporting.""" - return f"Peer: {self.peer} PathGroup: {self.path_group} Source: {self.source_address} Destination: {self.destination_address}" diff --git a/anta/input_models/routing/__init__.py b/anta/input_models/routing/__init__.py index 772b4f9..e1188cc 100644 --- a/anta/input_models/routing/__init__.py +++ b/anta/input_models/routing/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Package related to routing tests input models.""" diff --git a/anta/input_models/routing/bgp.py b/anta/input_models/routing/bgp.py index 09def7f..57c8217 100644 --- a/anta/input_models/routing/bgp.py +++ b/anta/input_models/routing/bgp.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for routing BGP tests.""" @@ -6,13 +6,13 @@ from __future__ import annotations from ipaddress import IPv4Address, IPv4Network, IPv6Address -from typing import TYPE_CHECKING, Any, Literal +from typing import TYPE_CHECKING, Any from warnings import warn from pydantic import BaseModel, ConfigDict, Field, PositiveInt, model_validator from pydantic_extra_types.mac_address import MacAddress -from anta.custom_types import Afi, BgpDropStats, BgpUpdateError, MultiProtocolCaps, RedistributedAfiSafi, RedistributedProtocol, Safi, Vni +from anta.custom_types import Afi, BgpDropStats, BgpUpdateError, MultiProtocolCaps, Safi, Vni if TYPE_CHECKING: import sys @@ -39,23 +39,6 @@ AFI_SAFI_EOS_KEY = { ("link-state", None): "linkState", } """Dictionary mapping AFI/SAFI to EOS key representation.""" -AFI_SAFI_MAPPINGS = {"v4u": "IPv4 Unicast", "v4m": "IPv4 Multicast", "v6u": "IPv6 Unicast", "v6m": "IPv6 Multicast"} -"""Dictionary mapping AFI/SAFI to EOS key representation for BGP redistributed route protocol.""" -IPV4_MULTICAST_SUPPORTED_PROTO = [ - "AttachedHost", - "Connected", - "IS-IS", - "OSPF Internal", - "OSPF External", - "OSPF Nssa-External", - "OSPFv3 Internal", - "OSPFv3 External", - "OSPFv3 Nssa-External", - "Static", -] -"""List of BGP redistributed route protocol, supported for IPv4 multicast address family.""" -IPV6_MULTICAST_SUPPORTED_PROTO = [proto for proto in IPV4_MULTICAST_SUPPORTED_PROTO if proto != "AttachedHost"] -"""List of BGP redistributed route protocol, supported for IPv6 multicast address family.""" class BgpAddressFamily(BaseModel): @@ -85,7 +68,8 @@ class BgpAddressFamily(BaseModel): check_peer_state: bool = False """Flag to check if the peers are established with negotiated AFI/SAFI. Defaults to `False`. - Can be enabled in the `VerifyBGPPeerCount` tests.""" + Can be enabled in the `VerifyBGPPeerCount` tests. + """ @model_validator(mode="after") def validate_inputs(self) -> Self: @@ -158,14 +142,12 @@ class BgpPeer(BaseModel): """IPv4 address of the BGP peer.""" vrf: str = "default" """Optional VRF for the BGP peer. Defaults to `default`.""" - peer_group: str | None = None - """Peer group of the BGP peer. Required field in the `VerifyBGPPeerGroup` test.""" advertised_routes: list[IPv4Network] | None = None """List of advertised routes in CIDR format. Required field in the `VerifyBGPExchangedRoutes` test.""" received_routes: list[IPv4Network] | None = None """List of received routes in CIDR format. Required field in the `VerifyBGPExchangedRoutes` test.""" capabilities: list[MultiProtocolCaps] | None = None - """List of BGP multiprotocol capabilities. Required field in the `VerifyBGPPeerMPCaps`, `VerifyBGPNlriAcceptance` tests.""" + """List of BGP multiprotocol capabilities. Required field in the `VerifyBGPPeerMPCaps` test.""" strict: bool = False """If True, requires exact match of the provided BGP multiprotocol capabilities. @@ -187,15 +169,9 @@ class BgpPeer(BaseModel): outbound_route_map: str | None = None """Outbound route map applied, defaults to None. Required field in the `VerifyBgpRouteMaps` test.""" maximum_routes: int | None = Field(default=None, ge=0, le=4294967294) - """The maximum allowable number of BGP routes. `0` means unlimited. Required field in the `VerifyBGPPeerRouteLimit` test""" + """The maximum allowable number of BGP routes, `0` means unlimited. Required field in the `VerifyBGPPeerRouteLimit` test""" warning_limit: int | None = Field(default=None, ge=0, le=4294967294) - """The warning limit for the maximum routes. `0` means no warning. - - Optional field in the `VerifyBGPPeerRouteLimit` test. If not provided, the test will not verify the warning limit.""" - ttl: int | None = Field(default=None, ge=1, le=255) - """The Time-To-Live (TTL). Required field in the `VerifyBGPPeerTtlMultiHops` test.""" - max_ttl_hops: int | None = Field(default=None, ge=1, le=255) - """The Max TTL hops. Required field in the `VerifyBGPPeerTtlMultiHops` test.""" + """Optional maximum routes warning limit. If not provided, it defaults to `0` meaning no warning limit.""" def __str__(self) -> str: """Return a human-readable string representation of the BgpPeer for reporting.""" @@ -231,159 +207,3 @@ class VxlanEndpoint(BaseModel): def __str__(self) -> str: """Return a human-readable string representation of the VxlanEndpoint for reporting.""" return f"Address: {self.address} VNI: {self.vni}" - - -class BgpRoute(BaseModel): - """Model representing BGP routes. - - Only IPv4 prefixes are supported for now. - """ - - model_config = ConfigDict(extra="forbid") - prefix: IPv4Network - """The IPv4 network address.""" - vrf: str = "default" - """Optional VRF for the BGP peer. Defaults to `default`.""" - paths: list[BgpRoutePath] | None = None - """A list of paths for the BGP route. Required field in the `VerifyBGPRoutePaths` test.""" - ecmp_count: int | None = None - """The expected number of ECMP paths for the BGP route. Required field in the `VerifyBGPRouteECMP` test.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the BgpRoute for reporting. - - Examples - -------- - - Prefix: 192.168.66.100/24 VRF: default - """ - return f"Prefix: {self.prefix} VRF: {self.vrf}" - - -class BgpRoutePath(BaseModel): - """Model representing a BGP route path.""" - - model_config = ConfigDict(extra="forbid") - nexthop: IPv4Address - """The next-hop IPv4 address for the path.""" - origin: Literal["Igp", "Egp", "Incomplete"] - """The BGP origin attribute of the route.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the RoutePath for reporting. - - Examples - -------- - - Next-hop: 192.168.66.101 Origin: Igp - """ - return f"Next-hop: {self.nexthop} Origin: {self.origin}" - - -class BgpVrf(BaseModel): - """Model representing a VRF in a BGP instance.""" - - vrf: str = "default" - """VRF context.""" - address_families: list[AddressFamilyConfig] - """List of address family configuration.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the BgpVrf for reporting. - - Examples - -------- - - VRF: default - """ - return f"VRF: {self.vrf}" - - -class RedistributedRouteConfig(BaseModel): - """Model representing a BGP redistributed route configuration.""" - - proto: RedistributedProtocol - """The redistributed protocol.""" - include_leaked: bool = False - """Flag to include leaked routes of the redistributed protocol while redistributing.""" - route_map: str | None = None - """Optional route map applied to the redistribution.""" - - @model_validator(mode="after") - def validate_inputs(self) -> Self: - """Validate that 'include_leaked' is not set when the redistributed protocol is AttachedHost, User, Dynamic, or RIP.""" - if self.include_leaked and self.proto in ["AttachedHost", "EOS SDK", "Dynamic", "RIP"]: - msg = f"'include_leaked' field is not supported for redistributed protocol '{self.proto}'" - raise ValueError(msg) - return self - - def __str__(self) -> str: - """Return a human-readable string representation of the RedistributedRouteConfig for reporting. - - Examples - -------- - - Proto: Connected, Include Leaked: True, Route Map: RM-CONN-2-BGP - """ - base_string = f"Proto: {self.proto}" - if self.include_leaked: - base_string += f", Include Leaked: {self.include_leaked}" - if self.route_map: - base_string += f", Route Map: {self.route_map}" - return base_string - - -class AddressFamilyConfig(BaseModel): - """Model representing a BGP address family configuration.""" - - afi_safi: RedistributedAfiSafi - """AFI/SAFI abbreviation per EOS.""" - redistributed_routes: list[RedistributedRouteConfig] - """List of redistributed route configuration.""" - - @model_validator(mode="after") - def validate_afi_safi_supported_routes(self) -> Self: - """Validate each address family supported redistributed protocol. - - Following table shows the supported redistributed routes for each address family. - - | IPv4 Unicast | IPv6 Unicast | IPv4 Multicast | IPv6 Multicast | - | ------------------------|-------------------------|------------------------|------------------------| - | AttachedHost | AttachedHost | AttachedHost | Connected | - | Bgp | Bgp | Connected | IS-IS | - | Connected | Connected | IS-IS | OSPF Internal | - | Dynamic | DHCP | OSPF Internal | OSPF External | - | IS-IS | Dynamic | OSPF External | OSPF Nssa-External | - | OSPF Internal | IS-IS | OSPF Nssa-External | OSPFv3 Internal | - | OSPF External | OSPFv3 Internal | OSPFv3 Internal | OSPFv3 External | - | OSPF Nssa-External | OSPFv3 External | OSPFv3 External | OSPFv3 Nssa-External | - | OSPFv3 Internal | OSPFv3 Nssa-External | OSPFv3 Nssa-External | Static | - | OSPFv3 External | Static | Static | | - | OSPFv3 Nssa-External | User | | | - | RIP | | | | - | Static | | | | - | User | | | | - """ - for routes_data in self.redistributed_routes: - if all([self.afi_safi == "v4u", routes_data.proto == "DHCP"]): - msg = f"Redistributed protocol 'DHCP' is not supported for address-family '{AFI_SAFI_MAPPINGS[self.afi_safi]}'" - raise ValueError(msg) - - if self.afi_safi == "v6u" and routes_data.proto in ["OSPF Internal", "OSPF External", "OSPF Nssa-External", "RIP"]: - msg = f"Redistributed protocol '{routes_data.proto}' is not supported for address-family '{AFI_SAFI_MAPPINGS[self.afi_safi]}'" - raise ValueError(msg) - - if self.afi_safi == "v4m" and routes_data.proto not in IPV4_MULTICAST_SUPPORTED_PROTO: - msg = f"Redistributed protocol '{routes_data.proto}' is not supported for address-family '{AFI_SAFI_MAPPINGS[self.afi_safi]}'" - raise ValueError(msg) - - if self.afi_safi == "v6m" and routes_data.proto not in IPV6_MULTICAST_SUPPORTED_PROTO: - msg = f"Redistributed protocol '{routes_data.proto}' is not supported for address-family '{AFI_SAFI_MAPPINGS[self.afi_safi]}'" - raise ValueError(msg) - - return self - - def __str__(self) -> str: - """Return a human-readable string representation of the AddressFamilyConfig for reporting. - - Examples - -------- - - AFI-SAFI: IPv4 Unicast - """ - return f"AFI-SAFI: {AFI_SAFI_MAPPINGS[self.afi_safi]}" diff --git a/anta/input_models/routing/generic.py b/anta/input_models/routing/generic.py index 72609fc..41c78a1 100644 --- a/anta/input_models/routing/generic.py +++ b/anta/input_models/routing/generic.py @@ -1,11 +1,11 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for generic routing tests.""" from __future__ import annotations -from ipaddress import IPv4Address, IPv4Network +from ipaddress import IPv4Network from pydantic import BaseModel, ConfigDict @@ -17,18 +17,12 @@ class IPv4Routes(BaseModel): model_config = ConfigDict(extra="forbid") prefix: IPv4Network - """IPv4 prefix in CIDR notation.""" + """The IPV4 network to validate the route type.""" vrf: str = "default" """VRF context. Defaults to `default` VRF.""" - route_type: IPv4RouteType | None = None - """Expected route type. Required field in the `VerifyIPv4RouteType` test.""" - nexthops: list[IPv4Address] | None = None - """A list of the next-hop IP addresses for the route. Required field in the `VerifyIPv4RouteNextHops` test.""" - strict: bool = False - """If True, requires exact matching of provided nexthop(s). - - Can be enabled in `VerifyIPv4RouteNextHops` test.""" + route_type: IPv4RouteType + """List of IPV4 Route type to validate the valid rout type.""" def __str__(self) -> str: - """Return a human-readable string representation of the IPv4Routes for reporting.""" + """Return a human-readable string representation of the IPv4RouteType for reporting.""" return f"Prefix: {self.prefix} VRF: {self.vrf}" diff --git a/anta/input_models/routing/isis.py b/anta/input_models/routing/isis.py deleted file mode 100644 index c0e2649..0000000 --- a/anta/input_models/routing/isis.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. -# Use of this source code is governed by the Apache License 2.0 -# that can be found in the LICENSE file. -"""Module containing input models for routing IS-IS tests.""" - -from __future__ import annotations - -from ipaddress import IPv4Address, IPv4Network -from typing import Any, Literal -from warnings import warn - -from pydantic import BaseModel, ConfigDict - -from anta.custom_types import Interface - - -class ISISInstance(BaseModel): - """Model for an IS-IS instance.""" - - model_config = ConfigDict(extra="forbid") - name: str - """The name of the IS-IS instance.""" - vrf: str = "default" - """VRF context of the IS-IS instance.""" - dataplane: Literal["MPLS", "mpls", "unset"] = "MPLS" - """Configured SR data-plane for the IS-IS instance.""" - segments: list[Segment] | None = None - """List of IS-IS SR segments associated with the instance. Required field in the `VerifyISISSegmentRoutingAdjacencySegments` test.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the ISISInstance for reporting.""" - return f"Instance: {self.name} VRF: {self.vrf}" - - -class Segment(BaseModel): - """Model for an IS-IS segment.""" - - model_config = ConfigDict(extra="forbid") - interface: Interface - """Local interface name.""" - level: Literal[1, 2] = 2 - """IS-IS level of the segment.""" - sid_origin: Literal["dynamic", "configured"] = "dynamic" - """Origin of the segment ID.""" - address: IPv4Address - """Adjacency IPv4 address of the segment.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the Segment for reporting.""" - return f"Local Intf: {self.interface} Adj IP Address: {self.address}" - - -class ISISInterface(BaseModel): - """Model for an IS-IS enabled interface.""" - - model_config = ConfigDict(extra="forbid") - name: Interface - """Interface name.""" - vrf: str = "default" - """VRF context of the interface.""" - level: Literal[1, 2] = 2 - """IS-IS level of the interface.""" - count: int | None = None - """Expected number of IS-IS neighbors on this interface. Required field in the `VerifyISISNeighborCount` test.""" - mode: Literal["point-to-point", "broadcast", "passive"] | None = None - """IS-IS network type of the interface. Required field in the `VerifyISISInterfaceMode` test.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the ISISInterface for reporting.""" - return f"Interface: {self.name} VRF: {self.vrf} Level: {self.level}" - - -class InterfaceCount(ISISInterface): # pragma: no cover - """Alias for the ISISInterface model to maintain backward compatibility. - - When initialized, it will emit a deprecation warning and call the ISISInterface model. - - TODO: Remove this class in ANTA v2.0.0. - """ - - def __init__(self, **data: Any) -> None: # noqa: ANN401 - """Initialize the InterfaceCount class, emitting a deprecation warning.""" - warn( - message="InterfaceCount model is deprecated and will be removed in ANTA v2.0.0. Use the ISISInterface model instead.", - category=DeprecationWarning, - stacklevel=2, - ) - super().__init__(**data) - - -class InterfaceState(ISISInterface): # pragma: no cover - """Alias for the ISISInterface model to maintain backward compatibility. - - When initialized, it will emit a deprecation warning and call the ISISInterface model. - - TODO: Remove this class in ANTA v2.0.0. - """ - - def __init__(self, **data: Any) -> None: # noqa: ANN401 - """Initialize the InterfaceState class, emitting a deprecation warning.""" - warn( - message="InterfaceState model is deprecated and will be removed in ANTA v2.0.0. Use the ISISInterface model instead.", - category=DeprecationWarning, - stacklevel=2, - ) - super().__init__(**data) - - -class IsisInstance(ISISInstance): # pragma: no cover - """Alias for the ISISInstance model to maintain backward compatibility. - - When initialized, it will emit a deprecation warning and call the ISISInstance model. - - TODO: Remove this class in ANTA v2.0.0. - """ - - def __init__(self, **data: Any) -> None: # noqa: ANN401 - """Initialize the IsisInstance class, emitting a deprecation warning.""" - warn( - message="IsisInstance model is deprecated and will be removed in ANTA v2.0.0. Use the ISISInstance model instead.", - category=DeprecationWarning, - stacklevel=2, - ) - super().__init__(**data) - - -class Tunnel(BaseModel): - """Model for a IS-IS SR tunnel.""" - - model_config = ConfigDict(extra="forbid") - endpoint: IPv4Network - """Endpoint of the tunnel.""" - vias: list[TunnelPath] | None = None - """Optional list of paths to reach the endpoint.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the Tunnel for reporting.""" - return f"Endpoint: {self.endpoint}" - - -class TunnelPath(BaseModel): - """Model for a IS-IS tunnel path.""" - - model_config = ConfigDict(extra="forbid") - nexthop: IPv4Address | None = None - """Nexthop of the tunnel.""" - type: Literal["ip", "tunnel"] | None = None - """Type of the tunnel.""" - interface: Interface | None = None - """Interface of the tunnel.""" - tunnel_id: Literal["TI-LFA", "ti-lfa", "unset"] | None = None - """Computation method of the tunnel.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the TunnelPath for reporting.""" - base_string = "" - if self.nexthop: - base_string += f" Next-hop: {self.nexthop}" - if self.type: - base_string += f" Type: {self.type}" - if self.interface: - base_string += f" Interface: {self.interface}" - if self.tunnel_id: - base_string += f" Tunnel ID: {self.tunnel_id}" - - return base_string.lstrip() - - -class Entry(Tunnel): # pragma: no cover - """Alias for the Tunnel model to maintain backward compatibility. - - When initialized, it will emit a deprecation warning and call the Tunnel model. - - TODO: Remove this class in ANTA v2.0.0. - """ - - def __init__(self, **data: Any) -> None: # noqa: ANN401 - """Initialize the Entry class, emitting a deprecation warning.""" - warn( - message="Entry model is deprecated and will be removed in ANTA v2.0.0. Use the Tunnel model instead.", - category=DeprecationWarning, - stacklevel=2, - ) - super().__init__(**data) - - -class Vias(TunnelPath): # pragma: no cover - """Alias for the TunnelPath model to maintain backward compatibility. - - When initialized, it will emit a deprecation warning and call the TunnelPath model. - - TODO: Remove this class in ANTA v2.0.0. - """ - - def __init__(self, **data: Any) -> None: # noqa: ANN401 - """Initialize the Vias class, emitting a deprecation warning.""" - warn( - message="Vias model is deprecated and will be removed in ANTA v2.0.0. Use the TunnelPath model instead.", - category=DeprecationWarning, - stacklevel=2, - ) - super().__init__(**data) diff --git a/anta/input_models/security.py b/anta/input_models/security.py index 79bdc17..373d897 100644 --- a/anta/input_models/security.py +++ b/anta/input_models/security.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for security tests.""" @@ -6,20 +6,10 @@ from __future__ import annotations from ipaddress import IPv4Address -from typing import TYPE_CHECKING, Any, ClassVar, get_args +from typing import Any from warnings import warn -from pydantic import BaseModel, ConfigDict, Field, model_validator - -from anta.custom_types import EcdsaKeySize, EncryptionAlgorithm, RsaKeySize - -if TYPE_CHECKING: - import sys - - if sys.version_info >= (3, 11): - from typing import Self - else: - from typing_extensions import Self +from pydantic import BaseModel, ConfigDict class IPSecPeer(BaseModel): @@ -53,107 +43,6 @@ class IPSecConn(BaseModel): """The IPv4 address of the destination in the security connection.""" -class APISSLCertificate(BaseModel): - """Model for an API SSL certificate.""" - - model_config = ConfigDict(extra="forbid") - certificate_name: str - """The name of the certificate to be verified.""" - expiry_threshold: int - """The expiry threshold of the certificate in days.""" - common_name: str - """The Common Name of the certificate.""" - encryption_algorithm: EncryptionAlgorithm - """The encryption algorithm used by the certificate.""" - key_size: RsaKeySize | EcdsaKeySize - """The key size (in bits) of the encryption algorithm.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the APISSLCertificate for reporting. - - Examples - -------- - - Certificate: SIGNING_CA.crt - """ - return f"Certificate: {self.certificate_name}" - - @model_validator(mode="after") - def validate_inputs(self) -> Self: - """Validate the key size provided to the APISSLCertificates class. - - If encryption_algorithm is RSA then key_size should be in {2048, 3072, 4096}. - - If encryption_algorithm is ECDSA then key_size should be in {256, 384, 521}. - """ - if self.encryption_algorithm == "RSA" and self.key_size not in get_args(RsaKeySize): - msg = f"`{self.certificate_name}` key size {self.key_size} is invalid for RSA encryption. Allowed sizes are {get_args(RsaKeySize)}." - raise ValueError(msg) - - if self.encryption_algorithm == "ECDSA" and self.key_size not in get_args(EcdsaKeySize): - msg = f"`{self.certificate_name}` key size {self.key_size} is invalid for ECDSA encryption. Allowed sizes are {get_args(EcdsaKeySize)}." - raise ValueError(msg) - - return self - - -class ACLEntry(BaseModel): - """Model for an Access Control List (ACL) entry.""" - - model_config = ConfigDict(extra="forbid") - sequence: int = Field(ge=1, le=4294967295) - """Sequence number of the ACL entry, used to define the order of processing. Must be between 1 and 4294967295.""" - action: str - """Action of the ACL entry. Example: `deny ip any any`.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the ACLEntry for reporting. - - Examples - -------- - - Sequence: 10 - """ - return f"Sequence: {self.sequence}" - - -class ACL(BaseModel): - """Model for an Access Control List (ACL).""" - - model_config = ConfigDict(extra="forbid") - name: str - """Name of the ACL.""" - entries: list[ACLEntry] - """List of the ACL entries.""" - IPv4ACLEntry: ClassVar[type[ACLEntry]] = ACLEntry - """To maintain backward compatibility.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the ACL for reporting. - - Examples - -------- - - ACL name: Test - """ - return f"ACL name: {self.name}" - - -class IPv4ACL(ACL): # pragma: no cover - """Alias for the ACL model to maintain backward compatibility. - - When initialized, it will emit a deprecation warning and call the ACL model. - - TODO: Remove this class in ANTA v2.0.0. - """ - - def __init__(self, **data: Any) -> None: # noqa: ANN401 - """Initialize the IPv4ACL class, emitting a deprecation warning.""" - warn( - message="IPv4ACL model is deprecated and will be removed in ANTA v2.0.0. Use the ACL model instead.", - category=DeprecationWarning, - stacklevel=2, - ) - super().__init__(**data) - - class IPSecPeers(IPSecPeer): # pragma: no cover """Alias for the IPSecPeers model to maintain backward compatibility. @@ -163,7 +52,7 @@ class IPSecPeers(IPSecPeer): # pragma: no cover """ def __init__(self, **data: Any) -> None: # noqa: ANN401 - """Initialize the IPSecPeers class, emitting a deprecation warning.""" + """Initialize the IPSecPeer class, emitting a deprecation warning.""" warn( message="IPSecPeers model is deprecated and will be removed in ANTA v2.0.0. Use the IPSecPeer model instead.", category=DeprecationWarning, diff --git a/anta/input_models/services.py b/anta/input_models/services.py index 0c602c8..596a3e3 100644 --- a/anta/input_models/services.py +++ b/anta/input_models/services.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for services tests.""" @@ -6,13 +6,9 @@ from __future__ import annotations from ipaddress import IPv4Address, IPv6Address -from typing import Any, Literal -from warnings import warn from pydantic import BaseModel, ConfigDict, Field -from anta.custom_types import ErrDisableReasons - class DnsServer(BaseModel): """Model for a DNS server configuration.""" @@ -32,43 +28,4 @@ class DnsServer(BaseModel): -------- Server 10.0.0.1 (VRF: default, Priority: 1) """ - return f"Server {self.server_address} VRF: {self.vrf} Priority: {self.priority}" - - -class ErrdisableRecovery(BaseModel): - """Model for the error disable recovery functionality.""" - - model_config = ConfigDict(extra="forbid") - reason: ErrDisableReasons - """Name of the error disable reason.""" - status: Literal["Enabled", "Disabled"] = "Enabled" - """Operational status of the reason. Defaults to 'Enabled'.""" - interval: int = Field(ge=30, le=86400) - """Timer interval of the reason in seconds.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the ErrdisableRecovery for reporting. - - Examples - -------- - Reason: acl Status: Enabled Interval: 300 - """ - return f"Reason: {self.reason} Status: {self.status} Interval: {self.interval}" - - -class ErrDisableReason(ErrdisableRecovery): # pragma: no cover - """Alias for the ErrdisableRecovery model to maintain backward compatibility. - - When initialised, it will emit a deprecation warning and call the ErrdisableRecovery model. - - TODO: Remove this class in ANTA v2.0.0. - """ - - def __init__(self, **data: Any) -> None: # noqa: ANN401 - """Initialize the ErrdisableRecovery class, emitting a depreciation warning.""" - warn( - message="ErrDisableReason model is deprecated and will be removed in ANTA v2.0.0. Use the ErrdisableRecovery model instead.", - category=DeprecationWarning, - stacklevel=2, - ) - super().__init__(**data) + return f"Server {self.server_address} (VRF: {self.vrf}, Priority: {self.priority})" diff --git a/anta/input_models/snmp.py b/anta/input_models/snmp.py deleted file mode 100644 index d5f1408..0000000 --- a/anta/input_models/snmp.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. -# Use of this source code is governed by the Apache License 2.0 -# that can be found in the LICENSE file. -"""Module containing input models for SNMP tests.""" - -from __future__ import annotations - -from ipaddress import IPv4Address -from typing import TYPE_CHECKING, Literal - -from pydantic import BaseModel, ConfigDict, model_validator - -from anta.custom_types import Hostname, Interface, Port, SnmpEncryptionAlgorithm, SnmpHashingAlgorithm, SnmpVersion, SnmpVersionV3AuthType - -if TYPE_CHECKING: - import sys - - if sys.version_info >= (3, 11): - from typing import Self - else: - from typing_extensions import Self - - -class SnmpHost(BaseModel): - """Model for a SNMP host.""" - - model_config = ConfigDict(extra="forbid") - hostname: IPv4Address | Hostname - """IPv4 address or Hostname of the SNMP notification host.""" - vrf: str = "default" - """Optional VRF for SNMP Hosts. If not provided, it defaults to `default`.""" - notification_type: Literal["trap", "inform"] = "trap" - """Type of SNMP notification (trap or inform), it defaults to trap.""" - version: SnmpVersion | None = None - """SNMP protocol version. Required field in the `VerifySnmpNotificationHost` test.""" - udp_port: Port | int = 162 - """UDP port for SNMP. If not provided then defaults to 162.""" - community_string: str | None = None - """Optional SNMP community string for authentication,required for SNMP version is v1 or v2c. Can be provided in the `VerifySnmpNotificationHost` test.""" - user: str | None = None - """Optional SNMP user for authentication, required for SNMP version v3. Can be provided in the `VerifySnmpNotificationHost` test.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the SnmpHost for reporting. - - Examples - -------- - - Host: 192.168.1.100 VRF: default - """ - return f"Host: {self.hostname} VRF: {self.vrf}" - - -class SnmpUser(BaseModel): - """Model for a SNMP User.""" - - model_config = ConfigDict(extra="forbid") - username: str - """SNMP user name.""" - group_name: str - """SNMP group for the user.""" - version: SnmpVersion - """SNMP protocol version.""" - auth_type: SnmpHashingAlgorithm | None = None - """User authentication algorithm. Can be provided in the `VerifySnmpUser` test.""" - priv_type: SnmpEncryptionAlgorithm | None = None - """User privacy algorithm. Can be provided in the `VerifySnmpUser` test.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the SnmpUser for reporting. - - Examples - -------- - - User: Test Group: Test_Group Version: v2c - """ - return f"User: {self.username} Group: {self.group_name} Version: {self.version}" - - -class SnmpSourceInterface(BaseModel): - """Model for a SNMP source-interface.""" - - interface: Interface - """Interface to use as the source IP address of SNMP messages.""" - vrf: str = "default" - """VRF of the source interface.""" - - def __str__(self) -> str: - """Return a human-readable string representation of the SnmpSourceInterface for reporting. - - Examples - -------- - - Source Interface: Ethernet1 VRF: default - """ - return f"Source Interface: {self.interface} VRF: {self.vrf}" - - -class SnmpGroup(BaseModel): - """Model for an SNMP group.""" - - group_name: str - """SNMP group name.""" - version: SnmpVersion - """SNMP protocol version.""" - read_view: str | None = None - """Optional field, View to restrict read access.""" - write_view: str | None = None - """Optional field, View to restrict write access.""" - notify_view: str | None = None - """Optional field, View to restrict notifications.""" - authentication: SnmpVersionV3AuthType | None = None - """SNMPv3 authentication settings. Required when version is v3. Can be provided in the `VerifySnmpGroup` test.""" - - @model_validator(mode="after") - def validate_inputs(self) -> Self: - """Validate the inputs provided to the SnmpGroup class.""" - if self.version == "v3" and self.authentication is None: - msg = f"{self!s}: `authentication` field is missing in the input" - raise ValueError(msg) - return self - - def __str__(self) -> str: - """Return a human-readable string representation of the SnmpGroup for reporting. - - Examples - -------- - - Group: Test_Group Version: v2c - """ - return f"Group: {self.group_name} Version: {self.version}" diff --git a/anta/input_models/stun.py b/anta/input_models/stun.py index 1d91567..d1af405 100644 --- a/anta/input_models/stun.py +++ b/anta/input_models/stun.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for services tests.""" diff --git a/anta/input_models/system.py b/anta/input_models/system.py index 3e098c4..7600d28 100644 --- a/anta/input_models/system.py +++ b/anta/input_models/system.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module containing input models for system tests.""" @@ -7,9 +7,9 @@ from __future__ import annotations from ipaddress import IPv4Address -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, Field -from anta.custom_types import Hostname, NTPStratumLevel +from anta.custom_types import Hostname class NTPServer(BaseModel): @@ -22,20 +22,10 @@ class NTPServer(BaseModel): For example, 'ntp.example.com' in the configuration might resolve to 'ntp3.example.com' in the device output.""" preferred: bool = False """Optional preferred for NTP server. If not provided, it defaults to `False`.""" - stratum: NTPStratumLevel + stratum: int = Field(ge=0, le=16) """NTP stratum level (0 to 15) where 0 is the reference clock and 16 indicates unsynchronized. Values should be between 0 and 15 for valid synchronization and 16 represents an out-of-sync state.""" def __str__(self) -> str: """Representation of the NTPServer model.""" - return f"NTP Server: {self.server_address} Preferred: {self.preferred} Stratum: {self.stratum}" - - -class NTPPool(BaseModel): - """Model for a NTP server pool.""" - - model_config = ConfigDict(extra="forbid") - server_addresses: list[Hostname | IPv4Address] - """The list of NTP server addresses as an IPv4 addresses or hostnames.""" - preferred_stratum_range: list[NTPStratumLevel] - """Preferred NTP stratum range for the NTP server pool. If the expected stratum range is 1 to 3 then preferred_stratum_range should be `[1,3]`.""" + return f"{self.server_address} (Preferred: {self.preferred}, Stratum: {self.stratum})" diff --git a/anta/inventory/__init__.py b/anta/inventory/__init__.py index a74638e..3046d7a 100644 --- a/anta/inventory/__init__.py +++ b/anta/inventory/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Inventory module for ANTA.""" @@ -8,16 +8,15 @@ from __future__ import annotations import asyncio import logging from ipaddress import ip_address, ip_network -from json import load as json_load from pathlib import Path -from typing import Any, ClassVar, Literal +from typing import Any, ClassVar from pydantic import ValidationError from yaml import YAMLError, safe_load from anta.device import AntaDevice, AsyncEOSDevice from anta.inventory.exceptions import InventoryIncorrectSchemaError, InventoryRootKeyError -from anta.inventory.models import AntaInventoryHost, AntaInventoryInput +from anta.inventory.models import AntaInventoryInput from anta.logger import anta_log_exception logger = logging.getLogger(__name__) @@ -27,7 +26,7 @@ class AntaInventory(dict[str, AntaDevice]): """Inventory abstraction for ANTA framework.""" # Root key of inventory part of the inventory file - INVENTORY_ROOT_KEY: str = "anta_inventory" + INVENTORY_ROOT_KEY = "anta_inventory" # Supported Output format INVENTORY_OUTPUT_FORMAT: ClassVar[list[str]] = ["native", "json"] @@ -179,7 +178,6 @@ class AntaInventory(dict[str, AntaDevice]): password: str, enable_password: str | None = None, timeout: float | None = None, - file_format: Literal["yaml", "json"] = "yaml", *, enable: bool = False, insecure: bool = False, @@ -201,8 +199,6 @@ class AntaInventory(dict[str, AntaDevice]): Enable password to use if required. timeout Timeout value in seconds for outgoing API calls. - file_format - Whether the inventory file is in JSON or YAML. enable Whether or not the commands need to be run in enable mode towards the devices. insecure @@ -218,10 +214,6 @@ class AntaInventory(dict[str, AntaDevice]): Inventory file is not following AntaInventory Schema. """ - if file_format not in ["yaml", "json"]: - message = f"'{file_format}' is not a valid format for an AntaInventory file. Only 'yaml' and 'json' are supported." - raise ValueError(message) - inventory = AntaInventory() kwargs: dict[str, Any] = { "username": username, @@ -232,12 +224,20 @@ class AntaInventory(dict[str, AntaDevice]): "insecure": insecure, "disable_cache": disable_cache, } + if username is None: + message = "'username' is required to create an AntaInventory" + logger.error(message) + raise ValueError(message) + if password is None: + message = "'password' is required to create an AntaInventory" + logger.error(message) + raise ValueError(message) try: filename = Path(filename) with filename.open(encoding="UTF-8") as file: - data = safe_load(file) if file_format == "yaml" else json_load(file) - except (TypeError, YAMLError, OSError, ValueError) as e: + data = safe_load(file) + except (TypeError, YAMLError, OSError) as e: message = f"Unable to parse ANTA Device Inventory file '{filename}'" anta_log_exception(e, message, logger) raise @@ -342,20 +342,3 @@ class AntaInventory(dict[str, AntaDevice]): if isinstance(r, Exception): message = "Error when refreshing inventory" anta_log_exception(r, message, logger) - - def dump(self) -> AntaInventoryInput: - """Dump the AntaInventory to an AntaInventoryInput. - - Each hosts is dumped individually. - """ - hosts = [ - AntaInventoryHost( - name=device.name, - host=device.host if hasattr(device, "host") else device.name, - port=device.port if hasattr(device, "port") else None, - tags=device.tags, - disable_cache=device.cache is None, - ) - for device in self.devices - ] - return AntaInventoryInput(hosts=hosts) diff --git a/anta/inventory/exceptions.py b/anta/inventory/exceptions.py index f7adaa7..90a672f 100644 --- a/anta/inventory/exceptions.py +++ b/anta/inventory/exceptions.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Manage Exception in Inventory module.""" diff --git a/anta/inventory/models.py b/anta/inventory/models.py index 493bad7..2eea701 100644 --- a/anta/inventory/models.py +++ b/anta/inventory/models.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Models related to inventory management.""" @@ -9,26 +9,14 @@ import logging import math import yaml -from pydantic import BaseModel, ConfigDict, FieldSerializationInfo, IPvAnyAddress, IPvAnyNetwork, field_serializer +from pydantic import BaseModel, ConfigDict, IPvAnyAddress, IPvAnyNetwork from anta.custom_types import Hostname, Port logger = logging.getLogger(__name__) -class AntaInventoryBaseModel(BaseModel): - """Pydantic BaseModel for AntaInventory objects.""" - - model_config = ConfigDict(extra="forbid") - - # Using check_fields as we plan to use this in the child classes - @field_serializer("tags", when_used="json", check_fields=False) - def serialize_tags(self, tags: set[str], _info: FieldSerializationInfo) -> list[str]: - """Make sure the tags are always dumped in the same order.""" - return sorted(tags) - - -class AntaInventoryHost(AntaInventoryBaseModel): +class AntaInventoryHost(BaseModel): """Host entry of AntaInventoryInput. Attributes @@ -46,6 +34,8 @@ class AntaInventoryHost(AntaInventoryBaseModel): """ + model_config = ConfigDict(extra="forbid") + name: str | None = None host: Hostname | IPvAnyAddress port: Port | None = None @@ -53,7 +43,7 @@ class AntaInventoryHost(AntaInventoryBaseModel): disable_cache: bool = False -class AntaInventoryNetwork(AntaInventoryBaseModel): +class AntaInventoryNetwork(BaseModel): """Network entry of AntaInventoryInput. Attributes @@ -67,12 +57,14 @@ class AntaInventoryNetwork(AntaInventoryBaseModel): """ + model_config = ConfigDict(extra="forbid") + network: IPvAnyNetwork tags: set[str] | None = None disable_cache: bool = False -class AntaInventoryRange(AntaInventoryBaseModel): +class AntaInventoryRange(BaseModel): """IP Range entry of AntaInventoryInput. Attributes @@ -88,6 +80,8 @@ class AntaInventoryRange(AntaInventoryBaseModel): """ + model_config = ConfigDict(extra="forbid") + start: IPvAnyAddress end: IPvAnyAddress tags: set[str] | None = None @@ -115,13 +109,4 @@ class AntaInventoryInput(BaseModel): # This could be improved. # https://github.com/pydantic/pydantic/issues/1043 # Explore if this worth using this: https://github.com/NowanIlfideme/pydantic-yaml - return yaml.safe_dump(yaml.safe_load(self.model_dump_json(serialize_as_any=True, exclude_unset=True)), width=math.inf) - - def to_json(self) -> str: - """Return a JSON representation string of this model. - - Returns - ------- - The JSON representation string of this model. - """ - return self.model_dump_json(serialize_as_any=True, exclude_unset=True, indent=2) + return yaml.safe_dump(yaml.safe_load(self.model_dump_json(serialize_as_any=True, exclude_unset=True)), indent=2, width=math.inf) diff --git a/anta/logger.py b/anta/logger.py index e6d0428..54733fb 100644 --- a/anta/logger.py +++ b/anta/logger.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Configure logging for ANTA.""" @@ -9,13 +9,15 @@ import logging import traceback from datetime import timedelta from enum import Enum -from pathlib import Path -from typing import Literal +from typing import TYPE_CHECKING, Literal from rich.logging import RichHandler from anta import __DEBUG__ +if TYPE_CHECKING: + from pathlib import Path + logger = logging.getLogger(__name__) @@ -67,59 +69,27 @@ def setup_logging(level: LogLevel = Log.INFO, file: Path | None = None) -> None: # httpx as well logging.getLogger("httpx").setLevel(logging.WARNING) - # Add RichHandler for stdout if not already present - _maybe_add_rich_handler(loglevel, root) - - # Add FileHandler if file is provided and same File Handler is not already present - if file and not _get_file_handler(root, file): + # Add RichHandler for stdout + rich_handler = RichHandler(markup=True, rich_tracebacks=True, tracebacks_show_locals=False) + # Show Python module in stdout at DEBUG level + fmt_string = "[grey58]\\[%(name)s][/grey58] %(message)s" if loglevel == logging.DEBUG else "%(message)s" + formatter = logging.Formatter(fmt=fmt_string, datefmt="[%X]") + rich_handler.setFormatter(formatter) + root.addHandler(rich_handler) + # Add FileHandler if file is provided + if file: file_handler = logging.FileHandler(file) formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") file_handler.setFormatter(formatter) root.addHandler(file_handler) # If level is DEBUG and file is provided, do not send DEBUG level to stdout - if loglevel == logging.DEBUG and (rich_handler := _get_rich_handler(root)) is not None: + if loglevel == logging.DEBUG: rich_handler.setLevel(logging.INFO) if __DEBUG__: logger.debug("ANTA Debug Mode enabled") -def _get_file_handler(logger_instance: logging.Logger, file: Path) -> logging.FileHandler | None: - """Return the FileHandler if present.""" - return ( - next( - ( - handler - for handler in logger_instance.handlers - if isinstance(handler, logging.FileHandler) and str(Path(handler.baseFilename).resolve()) == str(file.resolve()) - ), - None, - ) - if logger_instance.hasHandlers() - else None - ) - - -def _get_rich_handler(logger_instance: logging.Logger) -> logging.Handler | None: - """Return the ANTA Rich Handler.""" - return next((handler for handler in logger_instance.handlers if handler.get_name() == "ANTA_RICH_HANDLER"), None) if logger_instance.hasHandlers() else None - - -def _maybe_add_rich_handler(loglevel: int, logger_instance: logging.Logger) -> None: - """Add RichHandler for stdout if not already present.""" - if _get_rich_handler(logger_instance) is not None: - # Nothing to do. - return - - anta_rich_handler = RichHandler(markup=True, rich_tracebacks=True, tracebacks_show_locals=False) - anta_rich_handler.set_name("ANTA_RICH_HANDLER") - # Show Python module in stdout at DEBUG level - fmt_string = "[grey58]\\[%(name)s][/grey58] %(message)s" if loglevel == logging.DEBUG else "%(message)s" - formatter = logging.Formatter(fmt=fmt_string, datefmt="[%X]") - anta_rich_handler.setFormatter(formatter) - logger_instance.addHandler(anta_rich_handler) - - def format_td(seconds: float, digits: int = 3) -> str: """Return a formatted string from a float number representing seconds and a number of digits.""" isec, fsec = divmod(round(seconds * 10**digits), 10**digits) diff --git a/anta/models.py b/anta/models.py index 172f032..c69f78e 100644 --- a/anta/models.py +++ b/anta/models.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Models to define a TestStructure.""" @@ -15,8 +15,9 @@ from typing import TYPE_CHECKING, Any, Callable, ClassVar, Literal, TypeVar from pydantic import BaseModel, ConfigDict, ValidationError, create_model -from anta.constants import EOS_BLACKLIST_CMDS, KNOWN_EOS_ERRORS, UNSUPPORTED_PLATFORM_ERRORS -from anta.custom_types import Revision +from anta import GITHUB_SUGGESTION +from anta.constants import KNOWN_EOS_ERRORS +from anta.custom_types import REGEXP_EOS_BLACKLIST_CMDS, Revision from anta.logger import anta_log_exception, exc_to_str from anta.result_manager.models import AntaTestStatus, TestResult @@ -80,7 +81,7 @@ class AntaTemplate: # Create a AntaTemplateParams model to elegantly store AntaTemplate variables field_names = [fname for _, fname, _, _ in Formatter().parse(self.template) if fname] # Extracting the type from the params based on the expected field_names from the template - fields: dict[str, Any] = dict.fromkeys(field_names, (Any, ...)) + fields: dict[str, Any] = {key: (Any, ...) for key in field_names} self.params_schema = create_model( "AntaParams", __base__=AntaParamsBaseModel, @@ -257,8 +258,7 @@ class AntaCommand(BaseModel): msg = f"Command '{self.command}' has not been collected and has not returned an error. Call AntaDevice.collect()." raise RuntimeError(msg) - - return not any(any(error in e for error in UNSUPPORTED_PLATFORM_ERRORS) for e in self.errors) + return all("not supported on this hardware platform" not in e for e in self.errors) @property def returned_known_eos_error(self) -> bool: @@ -432,7 +432,7 @@ class AntaTest(ABC): inputs: dict[str, Any] | AntaTest.Input | None = None, eos_data: list[dict[Any, Any] | str] | None = None, ) -> None: - """Initialize an AntaTest instance. + """AntaTest Constructor. Parameters ---------- @@ -575,12 +575,12 @@ class AntaTest(ABC): """Check if CLI commands contain a blocked keyword.""" state = False for command in self.instance_commands: - for pattern in EOS_BLACKLIST_CMDS: + for pattern in REGEXP_EOS_BLACKLIST_CMDS: if re.match(pattern, command.command): self.logger.error( "Command <%s> is blocked for security reason matching %s", command.command, - EOS_BLACKLIST_CMDS, + REGEXP_EOS_BLACKLIST_CMDS, ) self.result.is_error(f"<{command.command}> is blocked for security reason") state = True @@ -683,6 +683,8 @@ class AntaTest(ABC): cmds = self.failed_commands unsupported_commands = [f"'{c.command}' is not supported on {self.device.hw_model}" for c in cmds if not c.supported] if unsupported_commands: + msg = f"Test {self.name} has been skipped because it is not supported on {self.device.hw_model}: {GITHUB_SUGGESTION}" + self.logger.warning(msg) self.result.is_skipped("\n".join(unsupported_commands)) return returned_known_eos_error = [f"'{c.command}' failed on {self.device.name}: {', '.join(c.errors)}" for c in cmds if c.returned_known_eos_error] diff --git a/anta/reporter/__init__.py b/anta/reporter/__init__.py index 5156ea7..9e5fa1b 100644 --- a/anta/reporter/__init__.py +++ b/anta/reporter/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Report management for ANTA.""" @@ -28,7 +28,7 @@ logger = logging.getLogger(__name__) class ReportTable: """TableReport Generate a Table based on TestResult.""" - @dataclass + @dataclass() class Headers: # pylint: disable=too-many-instance-attributes """Headers for the table report.""" @@ -168,7 +168,7 @@ class ReportTable: self.Headers.list_of_error_nodes, ] table = self._build_headers(headers=headers, table=table) - for test, stats in manager.test_stats.items(): + for test, stats in sorted(manager.test_stats.items()): if tests is None or test in tests: table.add_row( test, @@ -214,7 +214,7 @@ class ReportTable: self.Headers.list_of_error_tests, ] table = self._build_headers(headers=headers, table=table) - for device, stats in manager.device_stats.items(): + for device, stats in sorted(manager.device_stats.items()): if devices is None or device in devices: table.add_row( device, diff --git a/anta/reporter/csv_reporter.py b/anta/reporter/csv_reporter.py index 2a0a4de..3f55923 100644 --- a/anta/reporter/csv_reporter.py +++ b/anta/reporter/csv_reporter.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """CSV Report management for ANTA.""" @@ -8,7 +8,6 @@ from __future__ import annotations import csv import logging -import os from dataclasses import dataclass from typing import TYPE_CHECKING @@ -112,7 +111,6 @@ class ReportCsv: csvwriter = csv.writer( csvfile, delimiter=",", - lineterminator=os.linesep, ) csvwriter.writerow(headers) for entry in results.results: diff --git a/anta/reporter/md_reporter.py b/anta/reporter/md_reporter.py index 2d2d882..94c4a86 100644 --- a/anta/reporter/md_reporter.py +++ b/anta/reporter/md_reporter.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Markdown report generator for ANTA test results.""" @@ -177,8 +177,8 @@ class MDReportBase(ABC): if text is None: return "" - # Replace newlines with
to preserve line breaks in HTML - text = text.replace("\n", "
") + # Replace newlines with spaces to keep content on one line + text = text.replace("\n", " ") # Replace backticks with single quotes return text.replace("`", "'") @@ -237,7 +237,7 @@ class SummaryTotalsDeviceUnderTest(MDReportBase): def generate_rows(self) -> Generator[str, None, None]: """Generate the rows of the summary totals device under test table.""" for device, stat in self.results.device_stats.items(): - total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count + stat.tests_unset_count + total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count categories_skipped = ", ".join(sorted(convert_categories(list(stat.categories_skipped)))) categories_failed = ", ".join(sorted(convert_categories(list(stat.categories_failed)))) yield ( @@ -261,11 +261,10 @@ class SummaryTotalsPerCategory(MDReportBase): def generate_rows(self) -> Generator[str, None, None]: """Generate the rows of the summary totals per category table.""" - for category, stat in self.results.category_stats.items(): - converted_category = convert_categories([category])[0] - total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count + stat.tests_unset_count + for category, stat in self.results.sorted_category_stats.items(): + total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count yield ( - f"| {converted_category} | {total_tests} | {stat.tests_success_count} | {stat.tests_skipped_count} | {stat.tests_failure_count} " + f"| {category} | {total_tests} | {stat.tests_success_count} | {stat.tests_skipped_count} | {stat.tests_failure_count} " f"| {stat.tests_error_count} |\n" ) @@ -285,9 +284,9 @@ class TestResults(MDReportBase): def generate_rows(self) -> Generator[str, None, None]: """Generate the rows of the all test results table.""" - for result in self.results.results: - messages = self.safe_markdown(result.messages[0]) if len(result.messages) == 1 else self.safe_markdown("
".join(result.messages)) - categories = ", ".join(sorted(convert_categories(result.categories))) + for result in self.results.get_results(sort_by=["name", "test"]): + messages = self.safe_markdown(", ".join(result.messages)) + categories = ", ".join(convert_categories(result.categories)) yield ( f"| {result.name or '-'} | {categories or '-'} | {result.test or '-'} " f"| {result.description or '-'} | {self.safe_markdown(result.custom_field) or '-'} | {result.result or '-'} | {messages or '-'} |\n" diff --git a/anta/result_manager/__init__.py b/anta/result_manager/__init__.py index 39ed364..b5b0f39 100644 --- a/anta/result_manager/__init__.py +++ b/anta/result_manager/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Result Manager module for ANTA.""" @@ -12,8 +12,6 @@ from functools import cached_property from itertools import chain from typing import Any -from typing_extensions import deprecated - from anta.result_manager.models import AntaTestStatus, TestResult from .models import CategoryStats, DeviceStats, TestStats @@ -23,40 +21,56 @@ logger = logging.getLogger(__name__) # pylint: disable=too-many-instance-attributes class ResultManager: - """Manager of ANTA Results. + """Helper to manage Test Results and generate reports. - The status of the class is initialized to "unset" + Examples + -------- + Create Inventory: - Then when adding a test with a status that is NOT 'error' the following - table shows the updated status: + inventory_anta = AntaInventory.parse( + filename='examples/inventory.yml', + username='ansible', + password='ansible', + ) - | Current Status | Added test Status | Updated Status | - | -------------- | ------------------------------- | -------------- | - | unset | Any | Any | - | skipped | unset, skipped | skipped | - | skipped | success | success | - | skipped | failure | failure | - | success | unset, skipped, success | success | - | success | failure | failure | - | failure | unset, skipped success, failure | failure | + Create Result Manager: - If the status of the added test is error, the status is untouched and the - `error_status` attribute is set to True. + manager = ResultManager() - Attributes - ---------- - results - dump - status - Status rerpesenting all the results. - error_status - Will be `True` if a test returned an error. - results_by_status - dump - json - device_stats - category_stats - test_stats + Run tests for all connected devices: + + for device in inventory_anta.get_inventory().devices: + manager.add( + VerifyNTP(device=device).test() + ) + manager.add( + VerifyEOSVersion(device=device).test(version='4.28.3M') + ) + + Print result in native format: + + manager.results + [ + TestResult( + name="pf1", + test="VerifyZeroTouch", + categories=["configuration"], + description="Verifies ZeroTouch is disabled", + result="success", + messages=[], + custom_field=None, + ), + TestResult( + name="pf1", + test='VerifyNTP', + categories=["software"], + categories=['system'], + description='Verifies if NTP is synchronised.', + result='failure', + messages=["The device is not synchronized with the configured NTP server(s): 'NTP is disabled.'"], + custom_field=None, + ), + ] """ _result_entries: list[TestResult] @@ -69,7 +83,26 @@ class ResultManager: _stats_in_sync: bool def __init__(self) -> None: - """Initialize a ResultManager instance.""" + """Class constructor. + + The status of the class is initialized to "unset" + + Then when adding a test with a status that is NOT 'error' the following + table shows the updated status: + + | Current Status | Added test Status | Updated Status | + | -------------- | ------------------------------- | -------------- | + | unset | Any | Any | + | skipped | unset, skipped | skipped | + | skipped | success | success | + | skipped | failure | failure | + | success | unset, skipped, success | success | + | success | failure | failure | + | failure | unset, skipped success, failure | failure | + + If the status of the added test is error, the status is untouched and the + error_status is set to True. + """ self.reset() def reset(self) -> None: @@ -110,28 +143,28 @@ class ResultManager: return json.dumps(self.dump, indent=4) @property - def device_stats(self) -> dict[str, DeviceStats]: + def device_stats(self) -> defaultdict[str, DeviceStats]: """Get the device statistics.""" self._ensure_stats_in_sync() - return dict(sorted(self._device_stats.items())) + return self._device_stats @property - def category_stats(self) -> dict[str, CategoryStats]: + def category_stats(self) -> defaultdict[str, CategoryStats]: """Get the category statistics.""" self._ensure_stats_in_sync() - return dict(sorted(self._category_stats.items())) + return self._category_stats @property - def test_stats(self) -> dict[str, TestStats]: + def test_stats(self) -> defaultdict[str, TestStats]: """Get the test statistics.""" self._ensure_stats_in_sync() - return dict(sorted(self._test_stats.items())) + return self._test_stats @property - @deprecated("This property is deprecated, use `category_stats` instead. This will be removed in ANTA v2.0.0.", category=DeprecationWarning) def sorted_category_stats(self) -> dict[str, CategoryStats]: """A property that returns the category_stats dictionary sorted by key name.""" - return self.category_stats + self._ensure_stats_in_sync() + return dict(sorted(self.category_stats.items())) @cached_property def results_by_status(self) -> dict[AntaTestStatus, list[TestResult]]: @@ -283,21 +316,6 @@ class ResultManager: """Return the current status including error_status if ignore_error is False.""" return "error" if self.error_status and not ignore_error else self.status - def sort(self, sort_by: list[str]) -> ResultManager: - """Sort the ResultManager results based on TestResult fields. - - Parameters - ---------- - sort_by - List of TestResult fields to sort the results. - """ - accepted_fields = TestResult.model_fields.keys() - if not set(sort_by).issubset(set(accepted_fields)): - msg = f"Invalid sort_by fields: {sort_by}. Accepted fields are: {list(accepted_fields)}" - raise ValueError(msg) - self._result_entries.sort(key=lambda result: [getattr(result, field) for field in sort_by]) - return self - def filter(self, hide: set[AntaTestStatus]) -> ResultManager: """Get a filtered ResultManager based on test status. @@ -316,7 +334,6 @@ class ResultManager: manager.results = self.get_results(possible_statuses - hide) return manager - @deprecated("This method is deprecated. This will be removed in ANTA v2.0.0.", category=DeprecationWarning) def filter_by_tests(self, tests: set[str]) -> ResultManager: """Get a filtered ResultManager that only contains specific tests. @@ -334,7 +351,6 @@ class ResultManager: manager.results = [result for result in self._result_entries if result.test in tests] return manager - @deprecated("This method is deprecated. This will be removed in ANTA v2.0.0.", category=DeprecationWarning) def filter_by_devices(self, devices: set[str]) -> ResultManager: """Get a filtered ResultManager that only contains specific devices. @@ -352,7 +368,6 @@ class ResultManager: manager.results = [result for result in self._result_entries if result.name in devices] return manager - @deprecated("This method is deprecated. This will be removed in ANTA v2.0.0.", category=DeprecationWarning) def get_tests(self) -> set[str]: """Get the set of all the test names. @@ -363,7 +378,6 @@ class ResultManager: """ return {str(result.test) for result in self._result_entries} - @deprecated("This method is deprecated. This will be removed in ANTA v2.0.0.", category=DeprecationWarning) def get_devices(self) -> set[str]: """Get the set of all the device names. diff --git a/anta/result_manager/models.py b/anta/result_manager/models.py index a18ff57..3297581 100644 --- a/anta/result_manager/models.py +++ b/anta/result_manager/models.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Models related to anta.result_manager module.""" diff --git a/anta/runner.py b/anta/runner.py index 84e27a1..4c6da92 100644 --- a/anta/runner.py +++ b/anta/runner.py @@ -1,7 +1,7 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. -"""ANTA runner module.""" +"""ANTA runner function.""" from __future__ import annotations @@ -115,7 +115,7 @@ async def setup_inventory(inventory: AntaInventory, tags: set[str] | None, devic # If there are no devices in the inventory after filtering, exit if not selected_inventory.devices: - msg = f"No reachable device {f'matching the tags {tags} ' if tags else ''}was found.{f' Selected devices: {devices} ' if devices is not None else ''}" + msg = f'No reachable device {f"matching the tags {tags} " if tags else ""}was found.{f" Selected devices: {devices} " if devices is not None else ""}' logger.warning(msg) return None @@ -170,7 +170,8 @@ def prepare_tests( if total_test_count == 0: msg = ( - f"There are no tests{f' matching the tags {tags} ' if tags else ' '}to run in the current test catalog and device inventory, please verify your inputs." + f"There are no tests{f' matching the tags {tags} ' if tags else ' '}to run in the current " + "test catalog and device inventory, please verify your inputs." ) logger.warning(msg) return None diff --git a/anta/tests/__init__.py b/anta/tests/__init__.py index 15362fc..ec0b1ec 100644 --- a/anta/tests/__init__.py +++ b/anta/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to all ANTA tests.""" diff --git a/anta/tests/aaa.py b/anta/tests/aaa.py index a135fca..019bf1a 100644 --- a/anta/tests/aaa.py +++ b/anta/tests/aaa.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the EOS various AAA tests.""" @@ -51,12 +51,12 @@ class VerifyTacacsSourceIntf(AntaTest): """Main test function for VerifyTacacsSourceIntf.""" command_output = self.instance_commands[0].json_output try: - if (src_interface := command_output["srcIntf"][self.inputs.vrf]) == self.inputs.intf: + if command_output["srcIntf"][self.inputs.vrf] == self.inputs.intf: self.result.is_success() else: - self.result.is_failure(f"VRF: {self.inputs.vrf} - Source interface mismatch - Expected: {self.inputs.intf} Actual: {src_interface}") + self.result.is_failure(f"Wrong source-interface configured in VRF {self.inputs.vrf}") except KeyError: - self.result.is_failure(f"VRF: {self.inputs.vrf} Source Interface: {self.inputs.intf} - Not configured") + self.result.is_failure(f"Source-interface {self.inputs.intf} is not configured in VRF {self.inputs.vrf}") class VerifyTacacsServers(AntaTest): @@ -108,7 +108,7 @@ class VerifyTacacsServers(AntaTest): if not not_configured: self.result.is_success() else: - self.result.is_failure(f"TACACS servers {', '.join(not_configured)} are not configured in VRF {self.inputs.vrf}") + self.result.is_failure(f"TACACS servers {not_configured} are not configured in VRF {self.inputs.vrf}") class VerifyTacacsServerGroups(AntaTest): @@ -151,7 +151,7 @@ class VerifyTacacsServerGroups(AntaTest): if not not_configured: self.result.is_success() else: - self.result.is_failure(f"TACACS server group(s) {', '.join(not_configured)} are not configured") + self.result.is_failure(f"TACACS server group(s) {not_configured} are not configured") class VerifyAuthenMethods(AntaTest): @@ -204,14 +204,14 @@ class VerifyAuthenMethods(AntaTest): self.result.is_failure("AAA authentication methods are not configured for login console") return if v["login"]["methods"] != self.inputs.methods: - self.result.is_failure(f"AAA authentication methods {', '.join(self.inputs.methods)} are not matching for login console") + self.result.is_failure(f"AAA authentication methods {self.inputs.methods} are not matching for login console") return not_matching.extend(auth_type for methods in v.values() if methods["methods"] != self.inputs.methods) if not not_matching: self.result.is_success() else: - self.result.is_failure(f"AAA authentication methods {', '.join(self.inputs.methods)} are not matching for {', '.join(not_matching)}") + self.result.is_failure(f"AAA authentication methods {self.inputs.methods} are not matching for {not_matching}") class VerifyAuthzMethods(AntaTest): @@ -263,7 +263,7 @@ class VerifyAuthzMethods(AntaTest): if not not_matching: self.result.is_success() else: - self.result.is_failure(f"AAA authorization methods {', '.join(self.inputs.methods)} are not matching for {', '.join(not_matching)}") + self.result.is_failure(f"AAA authorization methods {self.inputs.methods} are not matching for {not_matching}") class VerifyAcctDefaultMethods(AntaTest): @@ -319,12 +319,12 @@ class VerifyAcctDefaultMethods(AntaTest): if methods["defaultMethods"] != self.inputs.methods: not_matching.append(acct_type) if not_configured: - self.result.is_failure(f"AAA default accounting is not configured for {', '.join(not_configured)}") + self.result.is_failure(f"AAA default accounting is not configured for {not_configured}") return if not not_matching: self.result.is_success() else: - self.result.is_failure(f"AAA accounting default methods {', '.join(self.inputs.methods)} are not matching for {', '.join(not_matching)}") + self.result.is_failure(f"AAA accounting default methods {self.inputs.methods} are not matching for {not_matching}") class VerifyAcctConsoleMethods(AntaTest): @@ -380,9 +380,9 @@ class VerifyAcctConsoleMethods(AntaTest): if methods["consoleMethods"] != self.inputs.methods: not_matching.append(acct_type) if not_configured: - self.result.is_failure(f"AAA console accounting is not configured for {', '.join(not_configured)}") + self.result.is_failure(f"AAA console accounting is not configured for {not_configured}") return if not not_matching: self.result.is_success() else: - self.result.is_failure(f"AAA accounting console methods {', '.join(self.inputs.methods)} are not matching for {', '.join(not_matching)}") + self.result.is_failure(f"AAA accounting console methods {self.inputs.methods} are not matching for {not_matching}") diff --git a/anta/tests/avt.py b/anta/tests/avt.py index 2173510..b0f1a46 100644 --- a/anta/tests/avt.py +++ b/anta/tests/avt.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to Adaptive virtual topology tests.""" @@ -47,7 +47,7 @@ class VerifyAVTPathHealth(AntaTest): # Check if AVT is configured if not command_output: - self.result.is_failure("Adaptive virtual topology paths are not configured") + self.result.is_failure("Adaptive virtual topology paths are not configured.") return # Iterate over each VRF @@ -61,11 +61,11 @@ class VerifyAVTPathHealth(AntaTest): # Check the status of the AVT path if not valid and not active: - self.result.is_failure(f"VRF: {vrf} Profile: {profile} AVT path: {path} - Invalid and not active") + self.result.is_failure(f"AVT path {path} for profile {profile} in VRF {vrf} is invalid and not active.") elif not valid: - self.result.is_failure(f"VRF: {vrf} Profile: {profile} AVT path: {path} - Invalid") + self.result.is_failure(f"AVT path {path} for profile {profile} in VRF {vrf} is invalid.") elif not active: - self.result.is_failure(f"VRF: {vrf} Profile: {profile} AVT path: {path} - Not active") + self.result.is_failure(f"AVT path {path} for profile {profile} in VRF {vrf} is not active.") class VerifyAVTSpecificPath(AntaTest): @@ -143,7 +143,7 @@ class VerifyAVTSpecificPath(AntaTest): valid = get_value(path_data, "flags.valid") active = get_value(path_data, "flags.active") if not all([valid, active]): - self.result.is_failure(f"{avt_path} - Incorrect path {path} - Valid: {valid} Active: {active}") + self.result.is_failure(f"{avt_path} - Incorrect path {path} - Valid: {valid}, Active: {active}") # If no matching path found, mark the test as failed if not path_found: @@ -192,4 +192,4 @@ class VerifyAVTRole(AntaTest): # Check if the AVT role matches the expected role if self.inputs.role != command_output.get("role"): - self.result.is_failure(f"AVT role mismatch - Expected: {self.inputs.role} Actual: {command_output.get('role')}") + self.result.is_failure(f"Expected AVT role as `{self.inputs.role}`, but found `{command_output.get('role')}` instead.") diff --git a/anta/tests/bfd.py b/anta/tests/bfd.py index f677ae1..ba27f94 100644 --- a/anta/tests/bfd.py +++ b/anta/tests/bfd.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to BFD tests.""" @@ -8,9 +8,9 @@ from __future__ import annotations from datetime import datetime, timezone -from typing import TYPE_CHECKING, ClassVar, TypeVar +from typing import TYPE_CHECKING, ClassVar -from pydantic import Field, field_validator +from pydantic import Field from anta.input_models.bfd import BFDPeer from anta.models import AntaCommand, AntaTest @@ -19,9 +19,6 @@ from anta.tools import get_value if TYPE_CHECKING: from anta.models import AntaTemplate -# Using a TypeVar for the BFDPeer model since mypy thinks it's a ClassVar and not a valid type when used in field validators -T = TypeVar("T", bound=BFDPeer) - class VerifyBFDSpecificPeers(AntaTest): """Verifies the state of IPv4 BFD peer sessions. @@ -102,18 +99,15 @@ class VerifyBFDPeersIntervals(AntaTest): 1. Confirms that the specified VRF is configured. 2. Verifies that the peer exists in the BFD configuration. 3. Confirms that BFD peer is correctly configured with the `Transmit interval, Receive interval and Multiplier`. - 4. Verifies that BFD peer is correctly configured with the `Detection time`, if provided. Expected Results ---------------- * Success: If all of the following conditions are met: - All specified peers are found in the BFD configuration within the specified VRF. - All BFD peers are correctly configured with the `Transmit interval, Receive interval and Multiplier`. - - If provided, the `Detection time` is correctly configured. * Failure: If any of the following occur: - A specified peer is not found in the BFD configuration within the specified VRF. - Any BFD peer not correctly configured with the `Transmit interval, Receive interval and Multiplier`. - - Any BFD peer is not correctly configured with `Detection time`, if provided. Examples -------- @@ -131,7 +125,6 @@ class VerifyBFDPeersIntervals(AntaTest): tx_interval: 1200 rx_interval: 1200 multiplier: 3 - detection_time: 3600 ``` """ @@ -146,23 +139,6 @@ class VerifyBFDPeersIntervals(AntaTest): BFDPeer: ClassVar[type[BFDPeer]] = BFDPeer """To maintain backward compatibility""" - @field_validator("bfd_peers") - @classmethod - def validate_bfd_peers(cls, bfd_peers: list[T]) -> list[T]: - """Validate that 'tx_interval', 'rx_interval' and 'multiplier' fields are provided in each BFD peer.""" - for peer in bfd_peers: - missing_fileds = [] - if peer.tx_interval is None: - missing_fileds.append("tx_interval") - if peer.rx_interval is None: - missing_fileds.append("rx_interval") - if peer.multiplier is None: - missing_fileds.append("multiplier") - if missing_fileds: - msg = f"{peer} {', '.join(missing_fileds)} field(s) are missing in the input" - raise ValueError(msg) - return bfd_peers - @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBFDPeersIntervals.""" @@ -175,7 +151,6 @@ class VerifyBFDPeersIntervals(AntaTest): tx_interval = bfd_peer.tx_interval rx_interval = bfd_peer.rx_interval multiplier = bfd_peer.multiplier - detect_time = bfd_peer.detection_time # Check if BFD peer configured bfd_output = get_value( @@ -191,7 +166,6 @@ class VerifyBFDPeersIntervals(AntaTest): bfd_details = bfd_output.get("peerStatsDetail", {}) op_tx_interval = bfd_details.get("operTxInterval") // 1000 op_rx_interval = bfd_details.get("operRxInterval") // 1000 - op_detection_time = bfd_details.get("detectTime") // 1000 detect_multiplier = bfd_details.get("detectMult") if op_tx_interval != tx_interval: @@ -203,9 +177,6 @@ class VerifyBFDPeersIntervals(AntaTest): if detect_multiplier != multiplier: self.result.is_failure(f"{bfd_peer} - Incorrect Multiplier - Expected: {multiplier} Actual: {detect_multiplier}") - if detect_time and op_detection_time != detect_time: - self.result.is_failure(f"{bfd_peer} - Incorrect Detection Time - Expected: {detect_time} Actual: {op_detection_time}") - class VerifyBFDPeersHealth(AntaTest): """Verifies the health of IPv4 BFD peers across all VRFs. @@ -260,7 +231,7 @@ class VerifyBFDPeersHealth(AntaTest): # Check if any IPv4 BFD peer is configured ipv4_neighbors_exist = any(vrf_data["ipv4Neighbors"] for vrf_data in bfd_output["vrfs"].values()) if not ipv4_neighbors_exist: - self.result.is_failure("No IPv4 BFD peers are configured for any VRF") + self.result.is_failure("No IPv4 BFD peers are configured for any VRF.") return # Iterate over IPv4 BFD peers @@ -328,16 +299,6 @@ class VerifyBFDPeersRegProtocols(AntaTest): BFDPeer: ClassVar[type[BFDPeer]] = BFDPeer """To maintain backward compatibility""" - @field_validator("bfd_peers") - @classmethod - def validate_bfd_peers(cls, bfd_peers: list[T]) -> list[T]: - """Validate that 'protocols' field is provided in each BFD peer.""" - for peer in bfd_peers: - if peer.protocols is None: - msg = f"{peer} 'protocols' field missing in the input" - raise ValueError(msg) - return bfd_peers - @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBFDPeersRegProtocols.""" @@ -362,5 +323,5 @@ class VerifyBFDPeersRegProtocols(AntaTest): # Check registered protocols difference = sorted(set(protocols) - set(get_value(bfd_output, "peerStatsDetail.apps"))) if difference: - failures = ", ".join(f"`{item}`" for item in difference) + failures = " ".join(f"`{item}`" for item in difference) self.result.is_failure(f"{bfd_peer} - {failures} routing protocol(s) not configured") diff --git a/anta/tests/configuration.py b/anta/tests/configuration.py index a1c57a1..cff7ec6 100644 --- a/anta/tests/configuration.py +++ b/anta/tests/configuration.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the device configuration tests.""" @@ -125,4 +125,4 @@ class VerifyRunningConfigLines(AntaTest): if not failure_msgs: self.result.is_success() else: - self.result.is_failure("Following patterns were not found: " + ", ".join(failure_msgs)) + self.result.is_failure("Following patterns were not found: " + ",".join(failure_msgs)) diff --git a/anta/tests/connectivity.py b/anta/tests/connectivity.py index a5ba5ff..afcfa11 100644 --- a/anta/tests/connectivity.py +++ b/anta/tests/connectivity.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to various connectivity tests.""" @@ -7,16 +7,11 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from typing import ClassVar, TypeVar - -from pydantic import field_validator +from typing import ClassVar from anta.input_models.connectivity import Host, LLDPNeighbor, Neighbor from anta.models import AntaCommand, AntaTemplate, AntaTest -# Using a TypeVar for the Host model since mypy thinks it's a ClassVar and not a valid type when used in field validators -T = TypeVar("T", bound=Host) - class VerifyReachability(AntaTest): """Test network reachability to one or many destination IP(s). @@ -37,18 +32,11 @@ class VerifyReachability(AntaTest): vrf: MGMT df_bit: True size: 100 - reachable: true - source: Management0 destination: 8.8.8.8 vrf: MGMT df_bit: True size: 100 - - source: fd12:3456:789a:1::1 - destination: fd12:3456:789a:1::2 - vrf: default - df_bit: True - size: 100 - reachable: false ``` """ @@ -66,16 +54,6 @@ class VerifyReachability(AntaTest): Host: ClassVar[type[Host]] = Host """To maintain backward compatibility.""" - @field_validator("hosts") - @classmethod - def validate_hosts(cls, hosts: list[T]) -> list[T]: - """Validate the 'destination' and 'source' IP address family in each host.""" - for host in hosts: - if not isinstance(host.source, str) and host.destination.version != host.source.version: - msg = f"{host} IP address family for destination does not match source" - raise ValueError(msg) - return hosts - def render(self, template: AntaTemplate) -> list[AntaCommand]: """Render the template for each host in the input list.""" return [ @@ -91,14 +69,9 @@ class VerifyReachability(AntaTest): self.result.is_success() for command, host in zip(self.instance_commands, self.inputs.hosts): - # Verifies the network is reachable - if host.reachable and f"{host.repeat} received" not in command.json_output["messages"][0]: + if f"{host.repeat} received" not in command.json_output["messages"][0]: self.result.is_failure(f"{host} - Unreachable") - # Verifies the network is unreachable. - if not host.reachable and f"{host.repeat} received" in command.json_output["messages"][0]: - self.result.is_failure(f"{host} - Destination is expected to be unreachable but found reachable") - class VerifyLLDPNeighbors(AntaTest): """Verifies the connection status of the specified LLDP (Link Layer Discovery Protocol) neighbors. diff --git a/anta/tests/cvx.py b/anta/tests/cvx.py index 25d8245..6160082 100644 --- a/anta/tests/cvx.py +++ b/anta/tests/cvx.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the CVX tests.""" @@ -49,7 +49,7 @@ class VerifyMcsClientMounts(AntaTest): continue mcs_mount_state_detected = True if (state := mount_state["state"]) != "mountStateMountComplete": - self.result.is_failure(f"MCS Client mount states are not valid - Expected: mountStateMountComplete Actual: {state}") + self.result.is_failure(f"MCS Client mount states are not valid: {state}") if not mcs_mount_state_detected: self.result.is_failure("MCS Client mount states are not present") @@ -88,12 +88,7 @@ class VerifyManagementCVX(AntaTest): command_output = self.instance_commands[0].json_output self.result.is_success() if (cluster_state := get_value(command_output, "clusterStatus.enabled")) != self.inputs.enabled: - if cluster_state is None: - self.result.is_failure("Management CVX status - Not configured") - return - cluster_state = "enabled" if cluster_state else "disabled" - self.inputs.enabled = "enabled" if self.inputs.enabled else "disabled" - self.result.is_failure(f"Management CVX status is not valid: Expected: {self.inputs.enabled} Actual: {cluster_state}") + self.result.is_failure(f"Management CVX status is not valid: {cluster_state}") class VerifyMcsServerMounts(AntaTest): @@ -131,15 +126,13 @@ class VerifyMcsServerMounts(AntaTest): mount_states = mount["mountStates"][0] if (num_path_states := len(mount_states["pathStates"])) != (expected_num := len(self.mcs_path_types)): - self.result.is_failure(f"Host: {hostname} - Incorrect number of mount path states - Expected: {expected_num} Actual: {num_path_states}") + self.result.is_failure(f"Incorrect number of mount path states for {hostname} - Expected: {expected_num}, Actual: {num_path_states}") for path in mount_states["pathStates"]: if (path_type := path.get("type")) not in self.mcs_path_types: - self.result.is_failure(f"Host: {hostname} - Unexpected MCS path type - Expected: {', '.join(self.mcs_path_types)} Actual: {path_type}") + self.result.is_failure(f"Unexpected MCS path type for {hostname}: '{path_type}'.") if (path_state := path.get("state")) != "mountStateMountComplete": - self.result.is_failure( - f"Host: {hostname} Path Type: {path_type} - MCS server mount state is not valid - Expected: mountStateMountComplete Actual:{path_state}" - ) + self.result.is_failure(f"MCS server mount state for path '{path_type}' is not valid is for {hostname}: '{path_state}'.") @AntaTest.anta_test def test(self) -> None: @@ -159,18 +152,18 @@ class VerifyMcsServerMounts(AntaTest): mcs_mounts = [mount for mount in mounts if mount["service"] == "Mcs"] if not mounts: - self.result.is_failure(f"Host: {hostname} - No mount status found") + self.result.is_failure(f"No mount status for {hostname}") continue if not mcs_mounts: - self.result.is_failure(f"Host: {hostname} - MCS mount state not detected") + self.result.is_failure(f"MCS mount state not detected for {hostname}") else: for mount in mcs_mounts: self.validate_mount_states(mount, hostname) active_count += 1 if active_count != self.inputs.connections_count: - self.result.is_failure(f"Incorrect CVX successful connections count - Expected: {self.inputs.connections_count} Actual: {active_count}") + self.result.is_failure(f"Incorrect CVX successful connections count. Expected: {self.inputs.connections_count}, Actual : {active_count}") class VerifyActiveCVXConnections(AntaTest): @@ -207,13 +200,13 @@ class VerifyActiveCVXConnections(AntaTest): self.result.is_success() if not (connections := command_output.get("connections")): - self.result.is_failure("CVX connections are not available") + self.result.is_failure("CVX connections are not available.") return active_count = len([connection for connection in connections if connection.get("oobConnectionActive")]) if self.inputs.connections_count != active_count: - self.result.is_failure(f"Incorrect CVX active connections count - Expected: {self.inputs.connections_count} Actual: {active_count}") + self.result.is_failure(f"CVX active connections count. Expected: {self.inputs.connections_count}, Actual : {active_count}") class VerifyCVXClusterStatus(AntaTest): @@ -268,7 +261,7 @@ class VerifyCVXClusterStatus(AntaTest): # Check cluster role if (cluster_role := cluster_status.get("role")) != self.inputs.role: - self.result.is_failure(f"CVX Role is not valid: Expected: {self.inputs.role} Actual: {cluster_role}") + self.result.is_failure(f"CVX Role is not valid: {cluster_role}") return # Validate peer status @@ -276,15 +269,15 @@ class VerifyCVXClusterStatus(AntaTest): # Check peer count if (num_of_peers := len(peer_cluster)) != (expected_num_of_peers := len(self.inputs.peer_status)): - self.result.is_failure(f"Unexpected number of peers - Expected: {expected_num_of_peers} Actual: {num_of_peers}") + self.result.is_failure(f"Unexpected number of peers {num_of_peers} vs {expected_num_of_peers}") # Check each peer for peer in self.inputs.peer_status: # Retrieve the peer status from the peer cluster if (eos_peer_status := get_value(peer_cluster, peer.peer_name, separator="..")) is None: - self.result.is_failure(f"{peer.peer_name} - Not present") + self.result.is_failure(f"{peer.peer_name} is not present") continue # Validate the registration state of the peer if (peer_reg_state := eos_peer_status.get("registrationState")) != peer.registration_state: - self.result.is_failure(f"{peer.peer_name} - Invalid registration state - Expected: {peer.registration_state} Actual: {peer_reg_state}") + self.result.is_failure(f"{peer.peer_name} registration state is not complete: {peer_reg_state}") diff --git a/anta/tests/field_notices.py b/anta/tests/field_notices.py index cc7fab9..41e81a8 100644 --- a/anta/tests/field_notices.py +++ b/anta/tests/field_notices.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to field notices tests.""" @@ -96,7 +96,7 @@ class VerifyFieldNotice44Resolution(AntaTest): for variant in variants: model = model.replace(variant, "") if model not in devices: - self.result.is_skipped("Device is not impacted by FN044") + self.result.is_skipped("device is not impacted by FN044") return for component in command_output["details"]["components"]: @@ -117,7 +117,7 @@ class VerifyFieldNotice44Resolution(AntaTest): ) ) if incorrect_aboot_version: - self.result.is_failure(f"Device is running incorrect version of aboot {aboot_version}") + self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})") class VerifyFieldNotice72Resolution(AntaTest): diff --git a/anta/tests/flow_tracking.py b/anta/tests/flow_tracking.py index a115949..9b9acc6 100644 --- a/anta/tests/flow_tracking.py +++ b/anta/tests/flow_tracking.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the flow tracking tests.""" @@ -9,13 +9,37 @@ from __future__ import annotations from typing import ClassVar +from pydantic import BaseModel + from anta.decorators import skip_on_platforms -from anta.input_models.flow_tracking import FlowTracker from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import get_value +from anta.tools import get_failed_logs -def validate_exporters(exporters: list[dict[str, str]], tracker_info: dict[str, str]) -> list[str]: +def validate_record_export(record_export: dict[str, str], tracker_info: dict[str, str]) -> str: + """Validate the record export configuration against the tracker info. + + Parameters + ---------- + record_export + The expected record export configuration. + tracker_info + The actual tracker info from the command output. + + Returns + ------- + str + A failure message if the record export configuration does not match, otherwise blank string. + """ + failed_log = "" + actual_export = {"inactive timeout": tracker_info.get("inactiveTimeout"), "interval": tracker_info.get("activeInterval")} + expected_export = {"inactive timeout": record_export.get("on_inactive_timeout"), "interval": record_export.get("on_interval")} + if actual_export != expected_export: + failed_log = get_failed_logs(expected_export, actual_export) + return failed_log + + +def validate_exporters(exporters: list[dict[str, str]], tracker_info: dict[str, str]) -> str: """Validate the exporter configurations against the tracker info. Parameters @@ -27,52 +51,36 @@ def validate_exporters(exporters: list[dict[str, str]], tracker_info: dict[str, Returns ------- - list - List of failure messages for any exporter configuration that does not match. + str + Failure message if any exporter configuration does not match. """ - failure_messages = [] + failed_log = "" for exporter in exporters: - exporter_name = exporter.name + exporter_name = exporter["name"] actual_exporter_info = tracker_info["exporters"].get(exporter_name) if not actual_exporter_info: - failure_messages.append(f"{exporter} - Not configured") + failed_log += f"\nExporter `{exporter_name}` is not configured." continue - local_interface = actual_exporter_info["localIntf"] - template_interval = actual_exporter_info["templateInterval"] - if local_interface != exporter.local_interface: - failure_messages.append(f"{exporter} - Incorrect local interface - Expected: {exporter.local_interface} Actual: {local_interface}") + expected_exporter_data = {"local interface": exporter["local_interface"], "template interval": exporter["template_interval"]} + actual_exporter_data = {"local interface": actual_exporter_info["localIntf"], "template interval": actual_exporter_info["templateInterval"]} - if template_interval != exporter.template_interval: - failure_messages.append(f"{exporter} - Incorrect template interval - Expected: {exporter.template_interval} Actual: {template_interval}") - return failure_messages + if expected_exporter_data != actual_exporter_data: + failed_msg = get_failed_logs(expected_exporter_data, actual_exporter_data) + failed_log += f"\nExporter `{exporter_name}`: {failed_msg}" + return failed_log class VerifyHardwareFlowTrackerStatus(AntaTest): - """Verifies the hardware flow tracking state. + """Verifies if hardware flow tracking is running and an input tracker is active. - This test performs the following checks: - - 1. Confirms that hardware flow tracking is running. - 2. For each specified flow tracker: - - Confirms that the tracker is active. - - Optionally, checks the tracker interval/timeout configuration. - - Optionally, verifies the tracker exporter configuration + This test optionally verifies the tracker interval/timeout and exporter configuration. Expected Results ---------------- - * Success: The test will pass if all of the following conditions are met: - - Hardware flow tracking is running. - - For each specified flow tracker: - - The flow tracker is active. - - The tracker interval/timeout matches the expected values, if provided. - - The exporter configuration matches the expected values, if provided. - * Failure: The test will fail if any of the following conditions are met: - - Hardware flow tracking is not running. - - For any specified flow tracker: - - The flow tracker is not active. - - The tracker interval/timeout does not match the expected values, if provided. - - The exporter configuration does not match the expected values, if provided. + * Success: The test will pass if hardware flow tracking is running and an input tracker is active. + * Failure: The test will fail if hardware flow tracking is not running, an input tracker is not active, + or the tracker interval/timeout and exporter configuration does not match the expected values. Examples -------- @@ -91,8 +99,11 @@ class VerifyHardwareFlowTrackerStatus(AntaTest): ``` """ + description = ( + "Verifies if hardware flow tracking is running and an input tracker is active. Optionally verifies the tracker interval/timeout and exporter configuration." + ) categories: ClassVar[list[str]] = ["flow tracking"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show flow tracking hardware", revision=1)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show flow tracking hardware tracker {name}", revision=1)] class Input(AntaTest.Input): """Input model for the VerifyHardwareFlowTrackerStatus test.""" @@ -100,42 +111,82 @@ class VerifyHardwareFlowTrackerStatus(AntaTest): trackers: list[FlowTracker] """List of flow trackers to verify.""" + class FlowTracker(BaseModel): + """Detail of a flow tracker.""" + + name: str + """Name of the flow tracker.""" + + record_export: RecordExport | None = None + """Record export configuration for the flow tracker.""" + + exporters: list[Exporter] | None = None + """List of exporters for the flow tracker.""" + + class RecordExport(BaseModel): + """Record export configuration.""" + + on_inactive_timeout: int + """Timeout in milliseconds for exporting records when inactive.""" + + on_interval: int + """Interval in milliseconds for exporting records.""" + + class Exporter(BaseModel): + """Detail of an exporter.""" + + name: str + """Name of the exporter.""" + + local_interface: str + """Local interface used by the exporter.""" + + template_interval: int + """Template interval in milliseconds for the exporter.""" + + def render(self, template: AntaTemplate) -> list[AntaCommand]: + """Render the template for each hardware tracker.""" + return [template.render(name=tracker.name) for tracker in self.inputs.trackers] + @skip_on_platforms(["cEOSLab", "vEOS-lab"]) @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyHardwareFlowTrackerStatus.""" self.result.is_success() + for command, tracker_input in zip(self.instance_commands, self.inputs.trackers): + hardware_tracker_name = command.params.name + record_export = tracker_input.record_export.model_dump() if tracker_input.record_export else None + exporters = [exporter.model_dump() for exporter in tracker_input.exporters] if tracker_input.exporters else None + command_output = command.json_output - command_output = self.instance_commands[0].json_output - # Check if hardware flow tracking is configured - if not command_output.get("running"): - self.result.is_failure("Hardware flow tracking is not running.") - return + # Check if hardware flow tracking is configured + if not command_output.get("running"): + self.result.is_failure("Hardware flow tracking is not running.") + return - for tracker in self.inputs.trackers: # Check if the input hardware tracker is configured - if not (tracker_info := get_value(command_output["trackers"], f"{tracker.name}")): - self.result.is_failure(f"{tracker} - Not found") + tracker_info = command_output["trackers"].get(hardware_tracker_name) + if not tracker_info: + self.result.is_failure(f"Hardware flow tracker `{hardware_tracker_name}` is not configured.") continue # Check if the input hardware tracker is active if not tracker_info.get("active"): - self.result.is_failure(f"{tracker} - Disabled") + self.result.is_failure(f"Hardware flow tracker `{hardware_tracker_name}` is not active.") continue # Check the input hardware tracker timeouts - if tracker.record_export: - inactive_interval = tracker.record_export.on_inactive_timeout - on_interval = tracker.record_export.on_interval - act_inactive = tracker_info.get("inactiveTimeout") - act_interval = tracker_info.get("activeInterval") - if not all([inactive_interval == act_inactive, on_interval == act_interval]): - self.result.is_failure( - f"{tracker} {tracker.record_export} - Incorrect timers - Inactive Timeout: {act_inactive} OnActive Interval: {act_interval}" - ) + failure_msg = "" + if record_export: + record_export_failure = validate_record_export(record_export, tracker_info) + if record_export_failure: + failure_msg += record_export_failure - # Check the input hardware tracker exporters configuration - if tracker.exporters: - failure_messages = validate_exporters(tracker.exporters, tracker_info) - for message in failure_messages: - self.result.is_failure(f"{tracker} {message}") + # Check the input hardware tracker exporters' configuration + if exporters: + exporters_failure = validate_exporters(exporters, tracker_info) + if exporters_failure: + failure_msg += exporters_failure + + if failure_msg: + self.result.is_failure(f"{hardware_tracker_name}: {failure_msg}\n") diff --git a/anta/tests/greent.py b/anta/tests/greent.py index 345f01b..67bb25b 100644 --- a/anta/tests/greent.py +++ b/anta/tests/greent.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to GreenT (Postcard Telemetry) tests.""" diff --git a/anta/tests/hardware.py b/anta/tests/hardware.py index 7edd41b..1c562b0 100644 --- a/anta/tests/hardware.py +++ b/anta/tests/hardware.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the hardware or environment tests.""" @@ -49,14 +49,14 @@ class VerifyTransceiversManufacturers(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyTransceiversManufacturers.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - for interface, value in command_output["xcvrSlots"].items(): - if value["mfgName"] not in self.inputs.manufacturers: - self.result.is_failure( - f"Interface: {interface} - Transceiver is from unapproved manufacturers - Expected: {', '.join(self.inputs.manufacturers)}" - f" Actual: {value['mfgName']}" - ) + wrong_manufacturers = { + interface: value["mfgName"] for interface, value in command_output["xcvrSlots"].items() if value["mfgName"] not in self.inputs.manufacturers + } + if not wrong_manufacturers: + self.result.is_success() + else: + self.result.is_failure(f"Some transceivers are from unapproved manufacturers: {wrong_manufacturers}") class VerifyTemperature(AntaTest): @@ -82,11 +82,12 @@ class VerifyTemperature(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyTemperature.""" - self.result.is_success() command_output = self.instance_commands[0].json_output temperature_status = command_output.get("systemStatus", "") - if temperature_status != "temperatureOk": - self.result.is_failure(f"Device temperature exceeds acceptable limits - Expected: temperatureOk Actual: {temperature_status}") + if temperature_status == "temperatureOk": + self.result.is_success() + else: + self.result.is_failure(f"Device temperature exceeds acceptable limits. Current system status: '{temperature_status}'") class VerifyTransceiversTemperature(AntaTest): @@ -112,14 +113,20 @@ class VerifyTransceiversTemperature(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyTransceiversTemperature.""" - self.result.is_success() command_output = self.instance_commands[0].json_output sensors = command_output.get("tempSensors", "") - for sensor in sensors: - if sensor["hwStatus"] != "ok": - self.result.is_failure(f"Sensor: {sensor['name']} - Invalid hardware state - Expected: ok Actual: {sensor['hwStatus']}") - if sensor["alertCount"] != 0: - self.result.is_failure(f"Sensor: {sensor['name']} - Incorrect alert counter - Expected: 0 Actual: {sensor['alertCount']}") + wrong_sensors = { + sensor["name"]: { + "hwStatus": sensor["hwStatus"], + "alertCount": sensor["alertCount"], + } + for sensor in sensors + if sensor["hwStatus"] != "ok" or sensor["alertCount"] != 0 + } + if not wrong_sensors: + self.result.is_success() + else: + self.result.is_failure(f"The following sensors are operating outside the acceptable temperature range or have raised alerts: {wrong_sensors}") class VerifyEnvironmentSystemCooling(AntaTest): @@ -149,7 +156,7 @@ class VerifyEnvironmentSystemCooling(AntaTest): sys_status = command_output.get("systemStatus", "") self.result.is_success() if sys_status != "coolingOk": - self.result.is_failure(f"Device system cooling status invalid - Expected: coolingOk Actual: {sys_status}") + self.result.is_failure(f"Device system cooling is not OK: '{sys_status}'") class VerifyEnvironmentCooling(AntaTest): @@ -170,6 +177,8 @@ class VerifyEnvironmentCooling(AntaTest): ``` """ + name = "VerifyEnvironmentCooling" + description = "Verifies the status of power supply fans and all fan trays." categories: ClassVar[list[str]] = ["hardware"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show system environment cooling", revision=1)] @@ -189,16 +198,12 @@ class VerifyEnvironmentCooling(AntaTest): for power_supply in command_output.get("powerSupplySlots", []): for fan in power_supply.get("fans", []): if (state := fan["status"]) not in self.inputs.states: - self.result.is_failure( - f"Power Slot: {power_supply['label']} Fan: {fan['label']} - Invalid state - Expected: {', '.join(self.inputs.states)} Actual: {state}" - ) + self.result.is_failure(f"Fan {fan['label']} on PowerSupply {power_supply['label']} is: '{state}'") # Then go through fan trays for fan_tray in command_output.get("fanTraySlots", []): for fan in fan_tray.get("fans", []): if (state := fan["status"]) not in self.inputs.states: - self.result.is_failure( - f"Fan Tray: {fan_tray['label']} Fan: {fan['label']} - Invalid state - Expected: {', '.join(self.inputs.states)} Actual: {state}" - ) + self.result.is_failure(f"Fan {fan['label']} on Fan Tray {fan_tray['label']} is: '{state}'") class VerifyEnvironmentPower(AntaTest): @@ -232,16 +237,19 @@ class VerifyEnvironmentPower(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyEnvironmentPower.""" - self.result.is_success() command_output = self.instance_commands[0].json_output power_supplies = command_output.get("powerSupplies", "{}") - for power_supply, value in dict(power_supplies).items(): - if (state := value["state"]) not in self.inputs.states: - self.result.is_failure(f"Power Slot: {power_supply} - Invalid power supplies state - Expected: {', '.join(self.inputs.states)} Actual: {state}") + wrong_power_supplies = { + powersupply: {"state": value["state"]} for powersupply, value in dict(power_supplies).items() if value["state"] not in self.inputs.states + } + if not wrong_power_supplies: + self.result.is_success() + else: + self.result.is_failure(f"The following power supplies status are not in the accepted states list: {wrong_power_supplies}") class VerifyAdverseDrops(AntaTest): - """Verifies there are no adverse drops on DCS-7280 and DCS-7500 family switches. + """Verifies there are no adverse drops on DCS-7280 and DCS-7500 family switches (Arad/Jericho chips). Expected Results ---------------- @@ -256,6 +264,7 @@ class VerifyAdverseDrops(AntaTest): ``` """ + description = "Verifies there are no adverse drops on DCS-7280 and DCS-7500 family switches." categories: ClassVar[list[str]] = ["hardware"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show hardware counter drop", revision=1)] @@ -263,8 +272,9 @@ class VerifyAdverseDrops(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyAdverseDrops.""" - self.result.is_success() command_output = self.instance_commands[0].json_output total_adverse_drop = command_output.get("totalAdverseDrops", "") - if total_adverse_drop != 0: - self.result.is_failure(f"Incorrect total adverse drops counter - Expected: 0 Actual: {total_adverse_drop}") + if total_adverse_drop == 0: + self.result.is_success() + else: + self.result.is_failure(f"Device totalAdverseDrops counter is: '{total_adverse_drop}'") diff --git a/anta/tests/interfaces.py b/anta/tests/interfaces.py index e291bd6..bc1acbb 100644 --- a/anta/tests/interfaces.py +++ b/anta/tests/interfaces.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the device interfaces tests.""" @@ -8,22 +8,21 @@ from __future__ import annotations import re -from typing import ClassVar, TypeVar +from ipaddress import IPv4Interface +from typing import Any, ClassVar -from pydantic import Field, field_validator +from pydantic import BaseModel, Field from pydantic_extra_types.mac_address import MacAddress -from anta.custom_types import Interface, Percent, PositiveInteger +from anta import GITHUB_SUGGESTION +from anta.custom_types import EthernetInterface, Interface, Percent, PositiveInteger from anta.decorators import skip_on_platforms -from anta.input_models.interfaces import InterfaceDetail, InterfaceState +from anta.input_models.interfaces import InterfaceState from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import custom_division, format_data, get_item, get_value +from anta.tools import custom_division, format_data, get_failed_logs, get_item, get_value BPS_GBPS_CONVERSIONS = 1000000000 -# Using a TypeVar for the InterfaceState model since mypy thinks it's a ClassVar and not a valid type when used in field validators -T = TypeVar("T", bound=InterfaceState) - class VerifyInterfaceUtilization(AntaTest): """Verifies that the utilization of interfaces is below a certain threshold. @@ -61,8 +60,8 @@ class VerifyInterfaceUtilization(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyInterfaceUtilization.""" - self.result.is_success() duplex_full = "duplexFull" + failed_interfaces: dict[str, dict[str, float]] = {} rates = self.instance_commands[0].json_output interfaces = self.instance_commands[1].json_output @@ -78,13 +77,15 @@ class VerifyInterfaceUtilization(AntaTest): self.logger.debug("Interface %s has been ignored due to null bandwidth value", intf) continue - # If one or more interfaces have a usage above the threshold, test fails. for bps_rate in ("inBpsRate", "outBpsRate"): usage = rate[bps_rate] / bandwidth * 100 if usage > self.inputs.threshold: - self.result.is_failure( - f"Interface: {intf} BPS Rate: {bps_rate} - Usage exceeds the threshold - Expected: < {self.inputs.threshold}% Actual: {usage}%" - ) + failed_interfaces.setdefault(intf, {})[bps_rate] = usage + + if not failed_interfaces: + self.result.is_success() + else: + self.result.is_failure(f"The following interfaces have a usage > {self.inputs.threshold}%: {failed_interfaces}") class VerifyInterfaceErrors(AntaTest): @@ -109,12 +110,15 @@ class VerifyInterfaceErrors(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyInterfaceErrors.""" - self.result.is_success() command_output = self.instance_commands[0].json_output + wrong_interfaces: list[dict[str, dict[str, int]]] = [] for interface, counters in command_output["interfaceErrorCounters"].items(): - counters_data = [f"{counter}: {value}" for counter, value in counters.items() if value > 0] - if counters_data: - self.result.is_failure(f"Interface: {interface} - Non-zero error counter(s) - {', '.join(counters_data)}") + if any(value > 0 for value in counters.values()) and all(interface not in wrong_interface for wrong_interface in wrong_interfaces): + wrong_interfaces.append({interface: counters}) + if not wrong_interfaces: + self.result.is_success() + else: + self.result.is_failure(f"The following interface(s) have non-zero error counters: {wrong_interfaces}") class VerifyInterfaceDiscards(AntaTest): @@ -139,12 +143,14 @@ class VerifyInterfaceDiscards(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyInterfaceDiscards.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - for interface, interface_data in command_output["interfaces"].items(): - counters_data = [f"{counter}: {value}" for counter, value in interface_data.items() if value > 0] - if counters_data: - self.result.is_failure(f"Interface: {interface} - Non-zero discard counter(s): {', '.join(counters_data)}") + wrong_interfaces: list[dict[str, dict[str, int]]] = [] + for interface, outer_v in command_output["interfaces"].items(): + wrong_interfaces.extend({interface: outer_v} for value in outer_v.values() if value > 0) + if not wrong_interfaces: + self.result.is_success() + else: + self.result.is_failure(f"The following interfaces have non 0 discard counter(s): {wrong_interfaces}") class VerifyInterfaceErrDisabled(AntaTest): @@ -169,11 +175,12 @@ class VerifyInterfaceErrDisabled(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyInterfaceErrDisabled.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - for interface, value in command_output["interfaceStatuses"].items(): - if value["linkStatus"] == "errdisabled": - self.result.is_failure(f"Interface: {interface} - Link status Error disabled") + errdisabled_interfaces = [interface for interface, value in command_output["interfaceStatuses"].items() if value["linkStatus"] == "errdisabled"] + if errdisabled_interfaces: + self.result.is_failure(f"The following interfaces are in error disabled state: {errdisabled_interfaces}") + else: + self.result.is_success() class VerifyInterfacesStatus(AntaTest): @@ -219,16 +226,6 @@ class VerifyInterfacesStatus(AntaTest): """List of interfaces with their expected state.""" InterfaceState: ClassVar[type[InterfaceState]] = InterfaceState - @field_validator("interfaces") - @classmethod - def validate_interfaces(cls, interfaces: list[T]) -> list[T]: - """Validate that 'status' field is provided in each interface.""" - for interface in interfaces: - if interface.status is None: - msg = f"{interface} 'status' field missing in the input" - raise ValueError(msg) - return interfaces - @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyInterfacesStatus.""" @@ -245,16 +242,16 @@ class VerifyInterfacesStatus(AntaTest): # If line protocol status is provided, prioritize checking against both status and line protocol status if interface.line_protocol_status: - if any([interface.status != status, interface.line_protocol_status != proto]): + if interface.status != status or interface.line_protocol_status != proto: actual_state = f"Expected: {interface.status}/{interface.line_protocol_status}, Actual: {status}/{proto}" - self.result.is_failure(f"{interface.name} - Status mismatch - {actual_state}") + self.result.is_failure(f"{interface.name} - {actual_state}") # If line protocol status is not provided and interface status is "up", expect both status and proto to be "up" # If interface status is not "up", check only the interface status without considering line protocol status - elif all([interface.status == "up", status != "up" or proto != "up"]): - self.result.is_failure(f"{interface.name} - Status mismatch - Expected: up/up, Actual: {status}/{proto}") + elif interface.status == "up" and (status != "up" or proto != "up"): + self.result.is_failure(f"{interface.name} - Expected: up/up, Actual: {status}/{proto}") elif interface.status != status: - self.result.is_failure(f"{interface.name} - Status mismatch - Expected: {interface.status}, Actual: {status}") + self.result.is_failure(f"{interface.name} - Expected: {interface.status}, Actual: {status}") class VerifyStormControlDrops(AntaTest): @@ -281,15 +278,16 @@ class VerifyStormControlDrops(AntaTest): def test(self) -> None: """Main test function for VerifyStormControlDrops.""" command_output = self.instance_commands[0].json_output - storm_controlled_interfaces = [] - self.result.is_success() - + storm_controlled_interfaces: dict[str, dict[str, Any]] = {} for interface, interface_dict in command_output["interfaces"].items(): for traffic_type, traffic_type_dict in interface_dict["trafficTypes"].items(): if "drop" in traffic_type_dict and traffic_type_dict["drop"] != 0: - storm_controlled_interfaces.append(f"{traffic_type}: {traffic_type_dict['drop']}") - if storm_controlled_interfaces: - self.result.is_failure(f"Interface: {interface} - Non-zero storm-control drop counter(s) - {', '.join(storm_controlled_interfaces)}") + storm_controlled_interface_dict = storm_controlled_interfaces.setdefault(interface, {}) + storm_controlled_interface_dict.update({traffic_type: traffic_type_dict["drop"]}) + if not storm_controlled_interfaces: + self.result.is_success() + else: + self.result.is_failure(f"The following interfaces have none 0 storm-control drop counters {storm_controlled_interfaces}") class VerifyPortChannels(AntaTest): @@ -314,12 +312,15 @@ class VerifyPortChannels(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyPortChannels.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - for port_channel, port_channel_details in command_output["portChannels"].items(): - # Verify that the no inactive ports in all port channels. - if inactive_ports := port_channel_details["inactivePorts"]: - self.result.is_failure(f"{port_channel} - Inactive port(s) - {', '.join(inactive_ports.keys())}") + po_with_inactive_ports: list[dict[str, str]] = [] + for portchannel, portchannel_dict in command_output["portChannels"].items(): + if len(portchannel_dict["inactivePorts"]) != 0: + po_with_inactive_ports.extend({portchannel: portchannel_dict["inactivePorts"]}) + if not po_with_inactive_ports: + self.result.is_success() + else: + self.result.is_failure(f"The following port-channels have inactive port(s): {po_with_inactive_ports}") class VerifyIllegalLACP(AntaTest): @@ -344,13 +345,16 @@ class VerifyIllegalLACP(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyIllegalLACP.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - for port_channel, port_channel_dict in command_output["portChannels"].items(): - for interface, interface_details in port_channel_dict["interfaces"].items(): - # Verify that the no illegal LACP packets in all port channels. - if interface_details["illegalRxCount"] != 0: - self.result.is_failure(f"{port_channel} Interface: {interface} - Illegal LACP packets found") + po_with_illegal_lacp: list[dict[str, dict[str, int]]] = [] + for portchannel, portchannel_dict in command_output["portChannels"].items(): + po_with_illegal_lacp.extend( + {portchannel: interface} for interface, interface_dict in portchannel_dict["interfaces"].items() if interface_dict["illegalRxCount"] != 0 + ) + if not po_with_illegal_lacp: + self.result.is_success() + else: + self.result.is_failure(f"The following port-channels have received illegal LACP packets on the following ports: {po_with_illegal_lacp}") class VerifyLoopbackCount(AntaTest): @@ -383,20 +387,23 @@ class VerifyLoopbackCount(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyLoopbackCount.""" - self.result.is_success() command_output = self.instance_commands[0].json_output loopback_count = 0 - for interface, interface_details in command_output["interfaces"].items(): + down_loopback_interfaces = [] + for interface in command_output["interfaces"]: + interface_dict = command_output["interfaces"][interface] if "Loopback" in interface: loopback_count += 1 - if (status := interface_details["lineProtocolStatus"]) != "up": - self.result.is_failure(f"Interface: {interface} - Invalid line protocol status - Expected: up Actual: {status}") - - if (status := interface_details["interfaceStatus"]) != "connected": - self.result.is_failure(f"Interface: {interface} - Invalid interface status - Expected: connected Actual: {status}") - - if loopback_count != self.inputs.number: - self.result.is_failure(f"Loopback interface(s) count mismatch: Expected {self.inputs.number} Actual: {loopback_count}") + if not (interface_dict["lineProtocolStatus"] == "up" and interface_dict["interfaceStatus"] == "connected"): + down_loopback_interfaces.append(interface) + if loopback_count == self.inputs.number and len(down_loopback_interfaces) == 0: + self.result.is_success() + else: + self.result.is_failure() + if loopback_count != self.inputs.number: + self.result.is_failure(f"Found {loopback_count} Loopbacks when expecting {self.inputs.number}") + elif len(down_loopback_interfaces) != 0: # pragma: no branch + self.result.is_failure(f"The following Loopbacks are not up: {down_loopback_interfaces}") class VerifySVI(AntaTest): @@ -421,13 +428,16 @@ class VerifySVI(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySVI.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - for interface, int_data in command_output["interfaces"].items(): - if "Vlan" in interface and (status := int_data["lineProtocolStatus"]) != "up": - self.result.is_failure(f"SVI: {interface} - Invalid line protocol status - Expected: up Actual: {status}") - if "Vlan" in interface and int_data["interfaceStatus"] != "connected": - self.result.is_failure(f"SVI: {interface} - Invalid interface status - Expected: connected Actual: {int_data['interfaceStatus']}") + down_svis = [] + for interface in command_output["interfaces"]: + interface_dict = command_output["interfaces"][interface] + if "Vlan" in interface and not (interface_dict["lineProtocolStatus"] == "up" and interface_dict["interfaceStatus"] == "connected"): + down_svis.append(interface) + if len(down_svis) == 0: + self.result.is_success() + else: + self.result.is_failure(f"The following SVIs are not up: {down_svis}") class VerifyL3MTU(AntaTest): @@ -472,7 +482,8 @@ class VerifyL3MTU(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyL3MTU.""" - self.result.is_success() + # Parameter to save incorrect interface settings + wrong_l3mtu_intf: list[dict[str, int]] = [] command_output = self.instance_commands[0].json_output # Set list of interfaces with specific settings specific_interfaces: list[str] = [] @@ -482,18 +493,18 @@ class VerifyL3MTU(AntaTest): for interface, values in command_output["interfaces"].items(): if re.findall(r"[a-z]+", interface, re.IGNORECASE)[0] not in self.inputs.ignored_interfaces and values["forwardingModel"] == "routed": if interface in specific_interfaces: - invalid_mtu = next( - (values["mtu"] for custom_data in self.inputs.specific_mtu if values["mtu"] != (expected_mtu := custom_data[interface])), None - ) - if invalid_mtu: - self.result.is_failure(f"Interface: {interface} - Incorrect MTU - Expected: {expected_mtu} Actual: {invalid_mtu}") + wrong_l3mtu_intf.extend({interface: values["mtu"]} for custom_data in self.inputs.specific_mtu if values["mtu"] != custom_data[interface]) # Comparison with generic setting elif values["mtu"] != self.inputs.mtu: - self.result.is_failure(f"Interface: {interface} - Incorrect MTU - Expected: {self.inputs.mtu} Actual: {values['mtu']}") + wrong_l3mtu_intf.append({interface: values["mtu"]}) + if wrong_l3mtu_intf: + self.result.is_failure(f"Some interfaces do not have correct MTU configured:\n{wrong_l3mtu_intf}") + else: + self.result.is_success() class VerifyIPProxyARP(AntaTest): - """Verifies if Proxy ARP is enabled. + """Verifies if Proxy-ARP is enabled for the provided list of interface(s). Expected Results ---------------- @@ -511,28 +522,32 @@ class VerifyIPProxyARP(AntaTest): ``` """ + description = "Verifies if Proxy ARP is enabled." categories: ClassVar[list[str]] = ["interfaces"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip interface", revision=2)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip interface {intf}", revision=2)] class Input(AntaTest.Input): """Input model for the VerifyIPProxyARP test.""" - interfaces: list[Interface] + interfaces: list[str] """List of interfaces to be tested.""" + def render(self, template: AntaTemplate) -> list[AntaCommand]: + """Render the template for each interface in the input list.""" + return [template.render(intf=intf) for intf in self.inputs.interfaces] + @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyIPProxyARP.""" - self.result.is_success() - command_output = self.instance_commands[0].json_output - - for interface in self.inputs.interfaces: - if (interface_detail := get_value(command_output["interfaces"], f"{interface}", separator="..")) is None: - self.result.is_failure(f"Interface: {interface} - Not found") - continue - - if not interface_detail["proxyArp"]: - self.result.is_failure(f"Interface: {interface} - Proxy-ARP disabled") + disabled_intf = [] + for command in self.instance_commands: + intf = command.params.intf + if not command.json_output["interfaces"][intf]["proxyArp"]: + disabled_intf.append(intf) + if disabled_intf: + self.result.is_failure(f"The following interface(s) have Proxy-ARP disabled: {disabled_intf}") + else: + self.result.is_success() class VerifyL2MTU(AntaTest): @@ -571,29 +586,36 @@ class VerifyL2MTU(AntaTest): """Default MTU we should have configured on all non-excluded interfaces. Defaults to 9214.""" ignored_interfaces: list[str] = Field(default=["Management", "Loopback", "Vxlan", "Tunnel"]) """A list of L2 interfaces to ignore. Defaults to ["Management", "Loopback", "Vxlan", "Tunnel"]""" - specific_mtu: list[dict[Interface, int]] = Field(default=[]) + specific_mtu: list[dict[str, int]] = Field(default=[]) """A list of dictionary of L2 interfaces with their specific MTU configured""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyL2MTU.""" - self.result.is_success() - interface_output = self.instance_commands[0].json_output["interfaces"] - specific_interfaces = {key: value for details in self.inputs.specific_mtu for key, value in details.items()} - - for interface, details in interface_output.items(): + # Parameter to save incorrect interface settings + wrong_l2mtu_intf: list[dict[str, int]] = [] + command_output = self.instance_commands[0].json_output + # Set list of interfaces with specific settings + specific_interfaces: list[str] = [] + if self.inputs.specific_mtu: + for d in self.inputs.specific_mtu: + specific_interfaces.extend(d) + for interface, values in command_output["interfaces"].items(): catch_interface = re.findall(r"^[e,p][a-zA-Z]+[-,a-zA-Z]*\d+\/*\d*", interface, re.IGNORECASE) - if catch_interface and catch_interface not in self.inputs.ignored_interfaces and details["forwardingModel"] == "bridged": + if len(catch_interface) and catch_interface[0] not in self.inputs.ignored_interfaces and values["forwardingModel"] == "bridged": if interface in specific_interfaces: - if (mtu := specific_interfaces[interface]) != (act_mtu := details["mtu"]): - self.result.is_failure(f"Interface: {interface} - Incorrect MTU configured - Expected: {mtu} Actual: {act_mtu}") - - elif (act_mtu := details["mtu"]) != self.inputs.mtu: - self.result.is_failure(f"Interface: {interface} - Incorrect MTU configured - Expected: {self.inputs.mtu} Actual: {act_mtu}") + wrong_l2mtu_intf.extend({interface: values["mtu"]} for custom_data in self.inputs.specific_mtu if values["mtu"] != custom_data[interface]) + # Comparison with generic setting + elif values["mtu"] != self.inputs.mtu: + wrong_l2mtu_intf.append({interface: values["mtu"]}) + if wrong_l2mtu_intf: + self.result.is_failure(f"Some L2 interfaces do not have correct MTU configured:\n{wrong_l2mtu_intf}") + else: + self.result.is_success() class VerifyInterfaceIPv4(AntaTest): - """Verifies the interface IPv4 addresses. + """Verifies if an interface is configured with a correct primary and list of optional secondary IPv4 addresses. Expected Results ---------------- @@ -614,61 +636,83 @@ class VerifyInterfaceIPv4(AntaTest): ``` """ + description = "Verifies the interface IPv4 addresses." categories: ClassVar[list[str]] = ["interfaces"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip interface", revision=2)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip interface {interface}", revision=2)] class Input(AntaTest.Input): """Input model for the VerifyInterfaceIPv4 test.""" - interfaces: list[InterfaceState] + interfaces: list[InterfaceDetail] """List of interfaces with their details.""" - InterfaceDetail: ClassVar[type[InterfaceDetail]] = InterfaceDetail - @field_validator("interfaces") - @classmethod - def validate_interfaces(cls, interfaces: list[T]) -> list[T]: - """Validate that 'primary_ip' field is provided in each interface.""" - for interface in interfaces: - if interface.primary_ip is None: - msg = f"{interface} 'primary_ip' field missing in the input" - raise ValueError(msg) - return interfaces + class InterfaceDetail(BaseModel): + """Model for an interface detail.""" + + name: Interface + """Name of the interface.""" + primary_ip: IPv4Interface + """Primary IPv4 address in CIDR notation.""" + secondary_ips: list[IPv4Interface] | None = None + """Optional list of secondary IPv4 addresses in CIDR notation.""" + + def render(self, template: AntaTemplate) -> list[AntaCommand]: + """Render the template for each interface in the input list.""" + return [template.render(interface=interface.name) for interface in self.inputs.interfaces] @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyInterfaceIPv4.""" self.result.is_success() - command_output = self.instance_commands[0].json_output - - for interface in self.inputs.interfaces: - if (interface_detail := get_value(command_output["interfaces"], f"{interface.name}", separator="..")) is None: - self.result.is_failure(f"{interface} - Not found") + for command in self.instance_commands: + intf = command.params.interface + for interface in self.inputs.interfaces: + if interface.name == intf: + input_interface_detail = interface + break + else: + self.result.is_failure(f"Could not find `{intf}` in the input interfaces. {GITHUB_SUGGESTION}") continue - if (ip_address := get_value(interface_detail, "interfaceAddress.primaryIp")) is None: - self.result.is_failure(f"{interface} - IP address is not configured") + input_primary_ip = str(input_interface_detail.primary_ip) + failed_messages = [] + + # Check if the interface has an IP address configured + if not (interface_output := get_value(command.json_output, f"interfaces.{intf}.interfaceAddress")): + self.result.is_failure(f"For interface `{intf}`, IP address is not configured.") continue + primary_ip = get_value(interface_output, "primaryIp") + # Combine IP address and subnet for primary IP - actual_primary_ip = f"{ip_address['address']}/{ip_address['maskLen']}" + actual_primary_ip = f"{primary_ip['address']}/{primary_ip['maskLen']}" # Check if the primary IP address matches the input - if actual_primary_ip != str(interface.primary_ip): - self.result.is_failure(f"{interface} - IP address mismatch - Expected: {interface.primary_ip} Actual: {actual_primary_ip}") + if actual_primary_ip != input_primary_ip: + failed_messages.append(f"The expected primary IP address is `{input_primary_ip}`, but the actual primary IP address is `{actual_primary_ip}`.") - if interface.secondary_ips: - if not (secondary_ips := get_value(interface_detail, "interfaceAddress.secondaryIpsOrderedList")): - self.result.is_failure(f"{interface} - Secondary IP address is not configured") - continue + if (param_secondary_ips := input_interface_detail.secondary_ips) is not None: + input_secondary_ips = sorted([str(network) for network in param_secondary_ips]) + secondary_ips = get_value(interface_output, "secondaryIpsOrderedList") + # Combine IP address and subnet for secondary IPs actual_secondary_ips = sorted([f"{secondary_ip['address']}/{secondary_ip['maskLen']}" for secondary_ip in secondary_ips]) - input_secondary_ips = sorted([str(ip) for ip in interface.secondary_ips]) - if actual_secondary_ips != input_secondary_ips: - self.result.is_failure( - f"{interface} - Secondary IP address mismatch - Expected: {', '.join(input_secondary_ips)} Actual: {', '.join(actual_secondary_ips)}" + # Check if the secondary IP address is configured + if not actual_secondary_ips: + failed_messages.append( + f"The expected secondary IP addresses are `{input_secondary_ips}`, but the actual secondary IP address is not configured." ) + # Check if the secondary IP addresses match the input + elif actual_secondary_ips != input_secondary_ips: + failed_messages.append( + f"The expected secondary IP addresses are `{input_secondary_ips}`, but the actual secondary IP addresses are `{actual_secondary_ips}`." + ) + + if failed_messages: + self.result.is_failure(f"For interface `{intf}`, " + " ".join(failed_messages)) + class VerifyIpVirtualRouterMac(AntaTest): """Verifies the IP virtual router MAC address. @@ -699,10 +743,13 @@ class VerifyIpVirtualRouterMac(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyIpVirtualRouterMac.""" - self.result.is_success() command_output = self.instance_commands[0].json_output["virtualMacs"] - if get_item(command_output, "macAddress", self.inputs.mac_address) is None: - self.result.is_failure(f"IP virtual router MAC address: {self.inputs.mac_address} - Not configured") + mac_address_found = get_item(command_output, "macAddress", self.inputs.mac_address) + + if mac_address_found is None: + self.result.is_failure(f"IP virtual router MAC address `{self.inputs.mac_address}` is not configured.") + else: + self.result.is_success() class VerifyInterfacesSpeed(AntaTest): @@ -741,19 +788,20 @@ class VerifyInterfacesSpeed(AntaTest): class Input(AntaTest.Input): """Inputs for the VerifyInterfacesSpeed test.""" - interfaces: list[InterfaceState] - """List of interfaces with their expected state.""" - InterfaceDetail: ClassVar[type[InterfaceDetail]] = InterfaceDetail + interfaces: list[InterfaceDetail] + """List of interfaces to be tested""" - @field_validator("interfaces") - @classmethod - def validate_interfaces(cls, interfaces: list[T]) -> list[T]: - """Validate that 'speed' field is provided in each interface.""" - for interface in interfaces: - if interface.speed is None: - msg = f"{interface} 'speed' field missing in the input" - raise ValueError(msg) - return interfaces + class InterfaceDetail(BaseModel): + """Detail of an interface.""" + + name: EthernetInterface + """The name of the interface.""" + auto: bool + """The auto-negotiation status of the interface.""" + speed: float = Field(ge=1, le=1000) + """The speed of the interface in Gigabits per second. Valid range is 1 to 1000.""" + lanes: None | int = Field(None, ge=1, le=8) + """The number of lanes in the interface. Valid range is 1 to 8. This field is optional.""" @AntaTest.anta_test def test(self) -> None: @@ -763,27 +811,40 @@ class VerifyInterfacesSpeed(AntaTest): # Iterate over all the interfaces for interface in self.inputs.interfaces: - if (interface_detail := get_value(command_output["interfaces"], f"{interface.name}", separator="..")) is None: - self.result.is_failure(f"{interface} - Not found") + intf = interface.name + + # Check if interface exists + if not (interface_output := get_value(command_output, f"interfaces.{intf}")): + self.result.is_failure(f"Interface `{intf}` is not found.") continue - # Verifies the bandwidth - if (speed := interface_detail.get("bandwidth")) != interface.speed * BPS_GBPS_CONVERSIONS: - self.result.is_failure( - f"{interface} - Bandwidth mismatch - Expected: {interface.speed}Gbps Actual: {custom_division(speed, BPS_GBPS_CONVERSIONS)}Gbps" - ) + auto_negotiation = interface_output.get("autoNegotiate") + actual_lanes = interface_output.get("lanes") - # Verifies the duplex mode - if (duplex := interface_detail.get("duplex")) != "duplexFull": - self.result.is_failure(f"{interface} - Duplex mode mismatch - Expected: duplexFull Actual: {duplex}") + # Collecting actual interface details + actual_interface_output = { + "auto negotiation": auto_negotiation if interface.auto is True else None, + "duplex mode": interface_output.get("duplex"), + "speed": interface_output.get("bandwidth"), + "lanes": actual_lanes if interface.lanes is not None else None, + } - # Verifies the auto-negotiation as success if specified - if interface.auto and (auto_negotiation := interface_detail.get("autoNegotiate")) != "success": - self.result.is_failure(f"{interface} - Auto-negotiation mismatch - Expected: success Actual: {auto_negotiation}") + # Forming expected interface details + expected_interface_output = { + "auto negotiation": "success" if interface.auto is True else None, + "duplex mode": "duplexFull", + "speed": interface.speed * BPS_GBPS_CONVERSIONS, + "lanes": interface.lanes, + } - # Verifies the communication lanes if specified - if interface.lanes and (lanes := interface_detail.get("lanes")) != interface.lanes: - self.result.is_failure(f"{interface} - Data lanes count mismatch - Expected: {interface.lanes} Actual: {lanes}") + # Forming failure message + if actual_interface_output != expected_interface_output: + for output in [actual_interface_output, expected_interface_output]: + # Convert speed to Gbps for readability + if output["speed"] is not None: + output["speed"] = f"{custom_division(output['speed'], BPS_GBPS_CONVERSIONS)}Gbps" + failed_log = get_failed_logs(expected_interface_output, actual_interface_output) + self.result.is_failure(f"For interface {intf}:{failed_log}\n") class VerifyLACPInterfacesStatus(AntaTest): @@ -830,16 +891,6 @@ class VerifyLACPInterfacesStatus(AntaTest): """List of interfaces with their expected state.""" InterfaceState: ClassVar[type[InterfaceState]] = InterfaceState - @field_validator("interfaces") - @classmethod - def validate_interfaces(cls, interfaces: list[T]) -> list[T]: - """Validate that 'portchannel' field is provided in each interface.""" - for interface in interfaces: - if interface.portchannel is None: - msg = f"{interface} 'portchannel' field missing in the input" - raise ValueError(msg) - return interfaces - @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyLACPInterfacesStatus.""" diff --git a/anta/tests/lanz.py b/anta/tests/lanz.py index 33e5472..0995af7 100644 --- a/anta/tests/lanz.py +++ b/anta/tests/lanz.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to LANZ tests.""" diff --git a/anta/tests/logging.py b/anta/tests/logging.py index f13860e..c391947 100644 --- a/anta/tests/logging.py +++ b/anta/tests/logging.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the EOS various logging tests. @@ -14,13 +14,13 @@ import re from ipaddress import IPv4Address from typing import TYPE_CHECKING, ClassVar -from anta.custom_types import LogSeverityLevel -from anta.input_models.logging import LoggingQuery -from anta.models import AntaCommand, AntaTemplate, AntaTest +from anta.models import AntaCommand, AntaTest if TYPE_CHECKING: import logging + from anta.models import AntaTemplate + def _get_logging_states(logger: logging.Logger, command_output: str) -> str: """Parse `show logging` output and gets operational logging states used in the tests in this module. @@ -43,35 +43,6 @@ def _get_logging_states(logger: logging.Logger, command_output: str) -> str: return log_states -class VerifySyslogLogging(AntaTest): - """Verifies if syslog logging is enabled. - - Expected Results - ---------------- - * Success: The test will pass if syslog logging is enabled. - * Failure: The test will fail if syslog logging is disabled. - - Examples - -------- - ```yaml - anta.tests.logging: - - VerifySyslogLogging: - ``` - """ - - categories: ClassVar[list[str]] = ["logging"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show logging", ofmt="text")] - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifySyslogLogging.""" - self.result.is_success() - log_output = self.instance_commands[0].text_output - - if "Syslog logging: enabled" not in _get_logging_states(self.logger, log_output): - self.result.is_failure("Syslog logging is disabled") - - class VerifyLoggingPersistent(AntaTest): """Verifies if logging persistent is enabled and logs are saved in flash. @@ -146,7 +117,7 @@ class VerifyLoggingSourceIntf(AntaTest): if re.search(pattern, _get_logging_states(self.logger, output)): self.result.is_success() else: - self.result.is_failure(f"Source-interface: {self.inputs.interface} VRF: {self.inputs.vrf} - Not configured") + self.result.is_failure(f"Source-interface '{self.inputs.interface}' is not configured in VRF {self.inputs.vrf}") class VerifyLoggingHosts(AntaTest): @@ -193,7 +164,7 @@ class VerifyLoggingHosts(AntaTest): if not not_configured: self.result.is_success() else: - self.result.is_failure(f"Syslog servers {', '.join(not_configured)} are not configured in VRF {self.inputs.vrf}") + self.result.is_failure(f"Syslog servers {not_configured} are not configured in VRF {self.inputs.vrf}") class VerifyLoggingLogsGeneration(AntaTest): @@ -201,43 +172,35 @@ class VerifyLoggingLogsGeneration(AntaTest): This test performs the following checks: - 1. Sends a test log message at the specified severity log level. - 2. Retrieves the most recent logs (last 30 seconds). - 3. Verifies that the test message was successfully logged. + 1. Sends a test log message at the **informational** level + 2. Retrieves the most recent logs (last 30 seconds) + 3. Verifies that the test message was successfully logged + + !!! warning + EOS logging buffer should be set to severity level `informational` or higher for this test to work. Expected Results ---------------- * Success: If logs are being generated and the test message is found in recent logs. * Failure: If any of the following occur: - - The test message is not found in recent logs. - - The logging system is not capturing new messages. - - No logs are being generated. + - The test message is not found in recent logs + - The logging system is not capturing new messages + - No logs are being generated Examples -------- ```yaml anta.tests.logging: - VerifyLoggingLogsGeneration: - severity_level: informational ``` """ categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ - AntaTemplate(template="send log level {severity_level} message ANTA VerifyLoggingLogsGeneration validation", ofmt="text"), - AntaTemplate(template="show logging {severity_level} last 30 seconds | grep ANTA", ofmt="text", use_cache=False), + AntaCommand(command="send log level informational message ANTA VerifyLoggingLogsGeneration validation", ofmt="text"), + AntaCommand(command="show logging informational last 30 seconds | grep ANTA", ofmt="text", use_cache=False), ] - class Input(AntaTest.Input): - """Input model for the VerifyLoggingLogsGeneration test.""" - - severity_level: LogSeverityLevel = "informational" - """Log severity level. Defaults to informational.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for log severity level in the input.""" - return [template.render(severity_level=self.inputs.severity_level)] - @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyLoggingLogsGeneration.""" @@ -256,45 +219,37 @@ class VerifyLoggingHostname(AntaTest): This test performs the following checks: - 1. Retrieves the device's configured FQDN. - 2. Sends a test log message at the specified severity log level. - 3. Retrieves the most recent logs (last 30 seconds). - 4. Verifies that the test message includes the complete FQDN of the device. + 1. Retrieves the device's configured FQDN + 2. Sends a test log message at the **informational** level + 3. Retrieves the most recent logs (last 30 seconds) + 4. Verifies that the test message includes the complete FQDN of the device + + !!! warning + EOS logging buffer should be set to severity level `informational` or higher for this test to work. Expected Results ---------------- * Success: If logs are generated with the device's complete FQDN. * Failure: If any of the following occur: - - The test message is not found in recent logs. - - The log message does not include the device's FQDN. - - The FQDN in the log message doesn't match the configured FQDN. + - The test message is not found in recent logs + - The log message does not include the device's FQDN + - The FQDN in the log message doesn't match the configured FQDN Examples -------- ```yaml anta.tests.logging: - VerifyLoggingHostname: - severity_level: informational ``` """ categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaCommand(command="show hostname", revision=1), - AntaTemplate(template="send log level {severity_level} message ANTA VerifyLoggingHostname validation", ofmt="text"), - AntaTemplate(template="show logging {severity_level} last 30 seconds | grep ANTA", ofmt="text", use_cache=False), + AntaCommand(command="send log level informational message ANTA VerifyLoggingHostname validation", ofmt="text"), + AntaCommand(command="show logging informational last 30 seconds | grep ANTA", ofmt="text", use_cache=False), ] - class Input(AntaTest.Input): - """Input model for the VerifyLoggingHostname test.""" - - severity_level: LogSeverityLevel = "informational" - """Log severity level. Defaults to informational.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for log severity level in the input.""" - return [template.render(severity_level=self.inputs.severity_level)] - @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyLoggingHostname.""" @@ -319,45 +274,37 @@ class VerifyLoggingTimestamp(AntaTest): This test performs the following checks: - 1. Sends a test log message at the specified severity log level. - 2. Retrieves the most recent logs (last 30 seconds). - 3. Verifies that the test message is present with a high-resolution RFC3339 timestamp format. - - Example format: `2024-01-25T15:30:45.123456+00:00`. - - Includes microsecond precision. - - Contains timezone offset. + 1. Sends a test log message at the **informational** level + 2. Retrieves the most recent logs (last 30 seconds) + 3. Verifies that the test message is present with a high-resolution RFC3339 timestamp format + - Example format: `2024-01-25T15:30:45.123456+00:00` + - Includes microsecond precision + - Contains timezone offset + + !!! warning + EOS logging buffer should be set to severity level `informational` or higher for this test to work. Expected Results ---------------- * Success: If logs are generated with the correct high-resolution RFC3339 timestamp format. * Failure: If any of the following occur: - - The test message is not found in recent logs. - - The timestamp format does not match the expected RFC3339 format. + - The test message is not found in recent logs + - The timestamp format does not match the expected RFC3339 format Examples -------- ```yaml anta.tests.logging: - VerifyLoggingTimestamp: - severity_level: informational ``` """ categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ - AntaTemplate(template="send log level {severity_level} message ANTA VerifyLoggingTimestamp validation", ofmt="text"), - AntaTemplate(template="show logging {severity_level} last 30 seconds | grep ANTA", ofmt="text", use_cache=False), + AntaCommand(command="send log level informational message ANTA VerifyLoggingTimestamp validation", ofmt="text"), + AntaCommand(command="show logging informational last 30 seconds | grep ANTA", ofmt="text", use_cache=False), ] - class Input(AntaTest.Input): - """Input model for the VerifyLoggingTimestamp test.""" - - severity_level: LogSeverityLevel = "informational" - """Log severity level. Defaults to informational.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for log severity level in the input.""" - return [template.render(severity_level=self.inputs.severity_level)] - @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyLoggingTimestamp.""" @@ -434,53 +381,3 @@ class VerifyLoggingErrors(AntaTest): self.result.is_success() else: self.result.is_failure("Device has reported syslog messages with a severity of ERRORS or higher") - - -class VerifyLoggingEntries(AntaTest): - """Verifies that the expected log string is present in the last specified log messages. - - Expected Results - ---------------- - * Success: The test will pass if the expected log string for the mentioned severity level is present in the last specified log messages. - * Failure: The test will fail if the specified log string is not present in the last specified log messages. - - Examples - -------- - ```yaml - anta.tests.logging: - - VerifyLoggingEntries: - logging_entries: - - regex_match: ".ACCOUNTING-5-EXEC: cvpadmin ssh." - last_number_messages: 30 - severity_level: alerts - - regex_match: ".SPANTREE-6-INTERFACE_ADD:." - last_number_messages: 10 - severity_level: critical - ``` - """ - - categories: ClassVar[list[str]] = ["logging"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ - AntaTemplate(template="show logging {last_number_messages} {severity_level}", ofmt="text", use_cache=False) - ] - - class Input(AntaTest.Input): - """Input model for the VerifyLoggingEntries test.""" - - logging_entries: list[LoggingQuery] - """List of logging entries and regex match.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for last number messages and log severity level in the input.""" - return [template.render(last_number_messages=entry.last_number_messages, severity_level=entry.severity_level) for entry in self.inputs.logging_entries] - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyLoggingEntries.""" - self.result.is_success() - for command_output, logging_entry in zip(self.instance_commands, self.inputs.logging_entries): - output = command_output.text_output - if not re.search(logging_entry.regex_match, output): - self.result.is_failure( - f"Pattern: {logging_entry.regex_match} - Not found in last {logging_entry.last_number_messages} {logging_entry.severity_level} log entries" - ) diff --git a/anta/tests/mlag.py b/anta/tests/mlag.py index 7217d57..e353420 100644 --- a/anta/tests/mlag.py +++ b/anta/tests/mlag.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to Multi-chassis Link Aggregation (MLAG) tests.""" @@ -22,8 +22,10 @@ class VerifyMlagStatus(AntaTest): Expected Results ---------------- - * Success: The test will pass if the MLAG state is 'active', negotiation status is 'connected', peer-link status and local interface status are 'up'. - * Failure: The test will fail if the MLAG state is not 'active', negotiation status is not 'connected', peer-link status or local interface status are not 'up'. + * Success: The test will pass if the MLAG state is 'active', negotiation status is 'connected', + peer-link status and local interface status are 'up'. + * Failure: The test will fail if the MLAG state is not 'active', negotiation status is not 'connected', + peer-link status or local interface status are not 'up'. * Skipped: The test will be skipped if MLAG is 'disabled'. Examples @@ -40,25 +42,21 @@ class VerifyMlagStatus(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyMlagStatus.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - - # Skipping the test if MLAG is disabled if command_output["state"] == "disabled": self.result.is_skipped("MLAG is disabled") return - - # Verifies the negotiation status - if (neg_status := command_output["negStatus"]) != "connected": - self.result.is_failure(f"MLAG negotiation status mismatch - Expected: connected Actual: {neg_status}") - - # Verifies the local interface interface status - if (intf_state := command_output["localIntfStatus"]) != "up": - self.result.is_failure(f"Operational state of the MLAG local interface is not correct - Expected: up Actual: {intf_state}") - - # Verifies the peerLinkStatus - if (peer_link_state := command_output["peerLinkStatus"]) != "up": - self.result.is_failure(f"Operational state of the MLAG peer link is not correct - Expected: up Actual: {peer_link_state}") + keys_to_verify = ["state", "negStatus", "localIntfStatus", "peerLinkStatus"] + verified_output = {key: get_value(command_output, key) for key in keys_to_verify} + if ( + verified_output["state"] == "active" + and verified_output["negStatus"] == "connected" + and verified_output["localIntfStatus"] == "up" + and verified_output["peerLinkStatus"] == "up" + ): + self.result.is_success() + else: + self.result.is_failure(f"MLAG status is not OK: {verified_output}") class VerifyMlagInterfaces(AntaTest): @@ -84,19 +82,14 @@ class VerifyMlagInterfaces(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyMlagInterfaces.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - - # Skipping the test if MLAG is disabled if command_output["state"] == "disabled": self.result.is_skipped("MLAG is disabled") return - - # Verifies the Inactive and Active-partial ports - inactive_ports = command_output["mlagPorts"]["Inactive"] - partial_active_ports = command_output["mlagPorts"]["Active-partial"] - if inactive_ports != 0 or partial_active_ports != 0: - self.result.is_failure(f"MLAG status is not ok - Inactive Ports: {inactive_ports} Partial Active Ports: {partial_active_ports}") + if command_output["mlagPorts"]["Inactive"] == 0 and command_output["mlagPorts"]["Active-partial"] == 0: + self.result.is_success() + else: + self.result.is_failure(f"MLAG status is not OK: {command_output['mlagPorts']}") class VerifyMlagConfigSanity(AntaTest): @@ -123,21 +116,16 @@ class VerifyMlagConfigSanity(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyMlagConfigSanity.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - - # Skipping the test if MLAG is disabled if command_output["mlagActive"] is False: self.result.is_skipped("MLAG is disabled") return - - # Verifies the globalConfiguration config-sanity - if get_value(command_output, "globalConfiguration"): - self.result.is_failure("MLAG config-sanity found in global configuration") - - # Verifies the interfaceConfiguration config-sanity - if get_value(command_output, "interfaceConfiguration"): - self.result.is_failure("MLAG config-sanity found in interface configuration") + keys_to_verify = ["globalConfiguration", "interfaceConfiguration"] + verified_output = {key: get_value(command_output, key) for key in keys_to_verify} + if not any(verified_output.values()): + self.result.is_success() + else: + self.result.is_failure(f"MLAG config-sanity returned inconsistencies: {verified_output}") class VerifyMlagReloadDelay(AntaTest): @@ -173,21 +161,17 @@ class VerifyMlagReloadDelay(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyMlagReloadDelay.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - - # Skipping the test if MLAG is disabled if command_output["state"] == "disabled": self.result.is_skipped("MLAG is disabled") return + keys_to_verify = ["reloadDelay", "reloadDelayNonMlag"] + verified_output = {key: get_value(command_output, key) for key in keys_to_verify} + if verified_output["reloadDelay"] == self.inputs.reload_delay and verified_output["reloadDelayNonMlag"] == self.inputs.reload_delay_non_mlag: + self.result.is_success() - # Verifies the reloadDelay - if (reload_delay := get_value(command_output, "reloadDelay")) != self.inputs.reload_delay: - self.result.is_failure(f"MLAG reload-delay mismatch - Expected: {self.inputs.reload_delay}s Actual: {reload_delay}s") - - # Verifies the reloadDelayNonMlag - if (non_mlag_reload_delay := get_value(command_output, "reloadDelayNonMlag")) != self.inputs.reload_delay_non_mlag: - self.result.is_failure(f"Delay for non-MLAG ports mismatch - Expected: {self.inputs.reload_delay_non_mlag}s Actual: {non_mlag_reload_delay}s") + else: + self.result.is_failure(f"The reload-delay parameters are not configured properly: {verified_output}") class VerifyMlagDualPrimary(AntaTest): @@ -230,37 +214,25 @@ class VerifyMlagDualPrimary(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyMlagDualPrimary.""" - self.result.is_success() errdisabled_action = "errdisableAllInterfaces" if self.inputs.errdisabled else "none" command_output = self.instance_commands[0].json_output - - # Skipping the test if MLAG is disabled if command_output["state"] == "disabled": self.result.is_skipped("MLAG is disabled") return - - # Verifies the dualPrimaryDetectionState if command_output["dualPrimaryDetectionState"] == "disabled": self.result.is_failure("Dual-primary detection is disabled") return - - # Verifies the dualPrimaryAction - if (primary_action := get_value(command_output, "detail.dualPrimaryAction")) != errdisabled_action: - self.result.is_failure(f"Dual-primary action mismatch - Expected: {errdisabled_action} Actual: {primary_action}") - - # Verifies the dualPrimaryDetectionDelay - if (detection_delay := get_value(command_output, "detail.dualPrimaryDetectionDelay")) != self.inputs.detection_delay: - self.result.is_failure(f"Dual-primary detection delay mismatch - Expected: {self.inputs.detection_delay} Actual: {detection_delay}") - - # Verifies the dualPrimaryMlagRecoveryDelay - if (recovery_delay := get_value(command_output, "dualPrimaryMlagRecoveryDelay")) != self.inputs.recovery_delay: - self.result.is_failure(f"Dual-primary MLAG recovery delay mismatch - Expected: {self.inputs.recovery_delay} Actual: {recovery_delay}") - - # Verifies the dualPrimaryNonMlagRecoveryDelay - if (recovery_delay_non_mlag := get_value(command_output, "dualPrimaryNonMlagRecoveryDelay")) != self.inputs.recovery_delay_non_mlag: - self.result.is_failure( - f"Dual-primary non MLAG recovery delay mismatch - Expected: {self.inputs.recovery_delay_non_mlag} Actual: {recovery_delay_non_mlag}" - ) + keys_to_verify = ["detail.dualPrimaryDetectionDelay", "detail.dualPrimaryAction", "dualPrimaryMlagRecoveryDelay", "dualPrimaryNonMlagRecoveryDelay"] + verified_output = {key: get_value(command_output, key) for key in keys_to_verify} + if ( + verified_output["detail.dualPrimaryDetectionDelay"] == self.inputs.detection_delay + and verified_output["detail.dualPrimaryAction"] == errdisabled_action + and verified_output["dualPrimaryMlagRecoveryDelay"] == self.inputs.recovery_delay + and verified_output["dualPrimaryNonMlagRecoveryDelay"] == self.inputs.recovery_delay_non_mlag + ): + self.result.is_success() + else: + self.result.is_failure(f"The dual-primary parameters are not configured properly: {verified_output}") class VerifyMlagPrimaryPriority(AntaTest): @@ -306,8 +278,10 @@ class VerifyMlagPrimaryPriority(AntaTest): # Check MLAG state if mlag_state != "primary": - self.result.is_failure("The device is not set as MLAG primary") + self.result.is_failure("The device is not set as MLAG primary.") # Check primary priority if primary_priority != self.inputs.primary_priority: - self.result.is_failure(f"MLAG primary priority mismatch - Expected: {self.inputs.primary_priority} Actual: {primary_priority}") + self.result.is_failure( + f"The primary priority does not match expected. Expected `{self.inputs.primary_priority}`, but found `{primary_priority}` instead.", + ) diff --git a/anta/tests/multicast.py b/anta/tests/multicast.py index fe09c94..f6e84ba 100644 --- a/anta/tests/multicast.py +++ b/anta/tests/multicast.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to multicast and IGMP tests.""" @@ -51,12 +51,12 @@ class VerifyIGMPSnoopingVlans(AntaTest): self.result.is_success() for vlan, enabled in self.inputs.vlans.items(): if str(vlan) not in command_output["vlans"]: - self.result.is_failure(f"Supplied vlan {vlan} is not present on the device") + self.result.is_failure(f"Supplied vlan {vlan} is not present on the device.") continue - expected_state = "enabled" if enabled else "disabled" + igmp_state = command_output["vlans"][str(vlan)]["igmpSnoopingState"] - if igmp_state != expected_state: - self.result.is_failure(f"VLAN{vlan} - Incorrect IGMP state - Expected: {expected_state} Actual: {igmp_state}") + if igmp_state != "enabled" if enabled else igmp_state != "disabled": + self.result.is_failure(f"IGMP state for vlan {vlan} is {igmp_state}") class VerifyIGMPSnoopingGlobal(AntaTest): @@ -91,6 +91,5 @@ class VerifyIGMPSnoopingGlobal(AntaTest): command_output = self.instance_commands[0].json_output self.result.is_success() igmp_state = command_output["igmpSnoopingState"] - expected_state = "enabled" if self.inputs.enabled else "disabled" - if igmp_state != expected_state: - self.result.is_failure(f"IGMP state is not valid - Expected: {expected_state} Actual: {igmp_state}") + if igmp_state != "enabled" if self.inputs.enabled else igmp_state != "disabled": + self.result.is_failure(f"IGMP state is not valid: {igmp_state}") diff --git a/anta/tests/path_selection.py b/anta/tests/path_selection.py index 0599ecd..15b06ae 100644 --- a/anta/tests/path_selection.py +++ b/anta/tests/path_selection.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Test functions related to various router path-selection settings.""" @@ -7,10 +7,12 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations +from ipaddress import IPv4Address from typing import ClassVar +from pydantic import BaseModel + from anta.decorators import skip_on_platforms -from anta.input_models.path_selection import DpsPath from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.tools import get_value @@ -48,7 +50,7 @@ class VerifyPathsHealth(AntaTest): # If no paths are configured for router path-selection, the test fails if not command_output: - self.result.is_failure("No path configured for router path-selection") + self.result.is_failure("No path configured for router path-selection.") return # Check the state of each path @@ -59,33 +61,25 @@ class VerifyPathsHealth(AntaTest): session = path_data["dpsSessions"]["0"]["active"] # If the path state of any path is not 'ipsecEstablished' or 'routeResolved', the test fails - expected_state = ["ipsecEstablished", "routeResolved"] - if path_state not in expected_state: - self.result.is_failure(f"Peer: {peer} Path Group: {group} - Invalid path state - Expected: {', '.join(expected_state)} Actual: {path_state}") + if path_state not in ["ipsecEstablished", "routeResolved"]: + self.result.is_failure(f"Path state for peer {peer} in path-group {group} is `{path_state}`.") # If the telemetry state of any path is inactive, the test fails elif not session: - self.result.is_failure(f"Peer: {peer} Path Group {group} - Telemetry state inactive") + self.result.is_failure(f"Telemetry state for peer {peer} in path-group {group} is `inactive`.") class VerifySpecificPath(AntaTest): - """Verifies the DPS path and telemetry state of an IPv4 peer. + """Verifies the path and telemetry state of a specific path for an IPv4 peer under router path-selection. - This test performs the following checks: - - 1. Verifies that the specified peer is configured. - 2. Verifies that the specified path group is found. - 3. For each specified DPS path: - - Verifies that the expected source and destination address matches the expected. - - Verifies that the state is `ipsecEstablished` or `routeResolved`. - - Verifies that the telemetry state is `active`. + The expected states are 'IPsec established', 'Resolved' for path and 'active' for telemetry. Expected Results ---------------- - * Success: The test will pass if the path state under router path-selection is either 'IPsecEstablished' or 'Resolved' + * Success: The test will pass if the path state under router path-selection is either 'IPsec established' or 'Resolved' and telemetry state as 'active'. - * Failure: The test will fail if router path selection or the peer is not configured or if the path state is not 'IPsec established' or 'Resolved', - or the telemetry state is 'inactive'. + * Failure: The test will fail if router path-selection is not configured or if the path state is not 'IPsec established' or 'Resolved', + or if the telemetry state is 'inactive'. Examples -------- @@ -101,15 +95,36 @@ class VerifySpecificPath(AntaTest): """ categories: ClassVar[list[str]] = ["path-selection"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show path-selection paths", revision=1)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ + AntaTemplate(template="show path-selection paths peer {peer} path-group {group} source {source} destination {destination}", revision=1) + ] class Input(AntaTest.Input): """Input model for the VerifySpecificPath test.""" - paths: list[DpsPath] + paths: list[RouterPath] """List of router paths to verify.""" - RouterPath: ClassVar[type[DpsPath]] = DpsPath - """To maintain backward compatibility.""" + + class RouterPath(BaseModel): + """Detail of a router path.""" + + peer: IPv4Address + """Static peer IPv4 address.""" + + path_group: str + """Router path group name.""" + + source_address: IPv4Address + """Source IPv4 address of path.""" + + destination_address: IPv4Address + """Destination IPv4 address of path.""" + + def render(self, template: AntaTemplate) -> list[AntaCommand]: + """Render the template for each router path.""" + return [ + template.render(peer=path.peer, group=path.path_group, source=path.source_address, destination=path.destination_address) for path in self.inputs.paths + ] @skip_on_platforms(["cEOSLab", "vEOS-lab"]) @AntaTest.anta_test @@ -117,42 +132,28 @@ class VerifySpecificPath(AntaTest): """Main test function for VerifySpecificPath.""" self.result.is_success() - command_output = self.instance_commands[0].json_output + # Check the state of each path + for command in self.instance_commands: + peer = command.params.peer + path_group = command.params.group + source = command.params.source + destination = command.params.destination + command_output = command.json_output.get("dpsPeers", []) - # If the dpsPeers details are not found in the command output, the test fails. - if not (dps_peers_details := get_value(command_output, "dpsPeers")): - self.result.is_failure("Router path-selection not configured") - return - - # Iterating on each DPS peer mentioned in the inputs. - for dps_path in self.inputs.paths: - peer = str(dps_path.peer) - peer_details = dps_peers_details.get(peer, {}) # If the peer is not configured for the path group, the test fails - if not peer_details: - self.result.is_failure(f"{dps_path} - Peer not found") + if not command_output: + self.result.is_failure(f"Path `peer: {peer} source: {source} destination: {destination}` is not configured for path-group `{path_group}`.") continue - path_group = dps_path.path_group - source = str(dps_path.source_address) - destination = str(dps_path.destination_address) - path_group_details = get_value(peer_details, f"dpsGroups..{path_group}..dpsPaths", separator="..") - # If the expected path group is not found for the peer, the test fails. - if not path_group_details: - self.result.is_failure(f"{dps_path} - No DPS path found for this peer and path group") - continue - - path_data = next((path for path in path_group_details.values() if (path.get("source") == source and path.get("destination") == destination)), None) - # Source and destination address do not match, the test fails. - if not path_data: - self.result.is_failure(f"{dps_path} - No path matching the source and destination found") - continue - - path_state = path_data.get("state") - session = get_value(path_data, "dpsSessions.0.active") + # Extract the state of the path + path_output = get_value(command_output, f"{peer}..dpsGroups..{path_group}..dpsPaths", separator="..") + path_state = next(iter(path_output.values())).get("state") + session = get_value(next(iter(path_output.values())), "dpsSessions.0.active") # If the state of the path is not 'ipsecEstablished' or 'routeResolved', or the telemetry state is 'inactive', the test fails if path_state not in ["ipsecEstablished", "routeResolved"]: - self.result.is_failure(f"{dps_path} - Invalid state path - Expected: ipsecEstablished, routeResolved Actual: {path_state}") + self.result.is_failure(f"Path state for `peer: {peer} source: {source} destination: {destination}` in path-group {path_group} is `{path_state}`.") elif not session: - self.result.is_failure(f"{dps_path} - Telemetry state inactive for this path") + self.result.is_failure( + f"Telemetry state for path `peer: {peer} source: {source} destination: {destination}` in path-group {path_group} is `inactive`." + ) diff --git a/anta/tests/profiles.py b/anta/tests/profiles.py index 1279948..93edacd 100644 --- a/anta/tests/profiles.py +++ b/anta/tests/profiles.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to ASIC profile tests.""" @@ -51,7 +51,7 @@ class VerifyUnifiedForwardingTableMode(AntaTest): if command_output["uftMode"] == str(self.inputs.mode): self.result.is_success() else: - self.result.is_failure(f"Not running the correct UFT mode - Expected: {self.inputs.mode} Actual: {command_output['uftMode']}") + self.result.is_failure(f"Device is not running correct UFT mode (expected: {self.inputs.mode} / running: {command_output['uftMode']})") class VerifyTcamProfile(AntaTest): diff --git a/anta/tests/ptp.py b/anta/tests/ptp.py index 309871b..687f175 100644 --- a/anta/tests/ptp.py +++ b/anta/tests/ptp.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. +# Copyright (c) 2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to PTP tests.""" @@ -17,7 +17,7 @@ if TYPE_CHECKING: class VerifyPtpModeStatus(AntaTest): - """Verifies that the device is configured as a PTP Boundary Clock. + """Verifies that the device is configured as a Precision Time Protocol (PTP) Boundary Clock (BC). Expected Results ---------------- @@ -33,6 +33,7 @@ class VerifyPtpModeStatus(AntaTest): ``` """ + description = "Verifies that the device is configured as a PTP Boundary Clock." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp", revision=2)] @@ -47,13 +48,13 @@ class VerifyPtpModeStatus(AntaTest): return if ptp_mode != "ptpBoundaryClock": - self.result.is_failure(f"Not configured as a PTP Boundary Clock - Actual: {ptp_mode}") + self.result.is_failure(f"The device is not configured as a PTP Boundary Clock: '{ptp_mode}'") else: self.result.is_success() class VerifyPtpGMStatus(AntaTest): - """Verifies that the device is locked to a valid PTP Grandmaster. + """Verifies that the device is locked to a valid Precision Time Protocol (PTP) Grandmaster (GM). To test PTP failover, re-run the test with a secondary GMID configured. @@ -78,6 +79,7 @@ class VerifyPtpGMStatus(AntaTest): gmid: str """Identifier of the Grandmaster to which the device should be locked.""" + description = "Verifies that the device is locked to a valid PTP Grandmaster." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp", revision=2)] @@ -85,19 +87,22 @@ class VerifyPtpGMStatus(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyPtpGMStatus.""" - self.result.is_success() command_output = self.instance_commands[0].json_output if (ptp_clock_summary := command_output.get("ptpClockSummary")) is None: self.result.is_skipped("PTP is not configured") return - if (act_gmid := ptp_clock_summary["gmClockIdentity"]) != self.inputs.gmid: - self.result.is_failure(f"The device is locked to the incorrect Grandmaster - Expected: {self.inputs.gmid} Actual: {act_gmid}") + if ptp_clock_summary["gmClockIdentity"] != self.inputs.gmid: + self.result.is_failure( + f"The device is locked to the following Grandmaster: '{ptp_clock_summary['gmClockIdentity']}', which differ from the expected one.", + ) + else: + self.result.is_success() class VerifyPtpLockStatus(AntaTest): - """Verifies that the device was locked to the upstream PTP GM in the last minute. + """Verifies that the device was locked to the upstream Precision Time Protocol (PTP) Grandmaster (GM) in the last minute. Expected Results ---------------- @@ -113,6 +118,7 @@ class VerifyPtpLockStatus(AntaTest): ``` """ + description = "Verifies that the device was locked to the upstream PTP GM in the last minute." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp", revision=2)] @@ -130,13 +136,13 @@ class VerifyPtpLockStatus(AntaTest): time_difference = ptp_clock_summary["currentPtpSystemTime"] - ptp_clock_summary["lastSyncTime"] if time_difference >= threshold: - self.result.is_failure(f"Lock is more than {threshold}s old - Actual: {time_difference}s") + self.result.is_failure(f"The device lock is more than {threshold}s old: {time_difference}s") else: self.result.is_success() class VerifyPtpOffset(AntaTest): - """Verifies that the PTP timing offset is within +/- 1000ns from the master clock. + """Verifies that the Precision Time Protocol (PTP) timing offset is within +/- 1000ns from the master clock. Expected Results ---------------- @@ -152,6 +158,7 @@ class VerifyPtpOffset(AntaTest): ``` """ + description = "Verifies that the PTP timing offset is within +/- 1000ns from the master clock." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp monitor", revision=1)] @@ -160,9 +167,9 @@ class VerifyPtpOffset(AntaTest): def test(self) -> None: """Main test function for VerifyPtpOffset.""" threshold = 1000 - self.result.is_success() - command_output = self.instance_commands[0].json_output offset_interfaces: dict[str, list[int]] = {} + command_output = self.instance_commands[0].json_output + if not command_output["ptpMonitorData"]: self.result.is_skipped("PTP is not configured") return @@ -171,12 +178,14 @@ class VerifyPtpOffset(AntaTest): if abs(interface["offsetFromMaster"]) > threshold: offset_interfaces.setdefault(interface["intf"], []).append(interface["offsetFromMaster"]) - for interface, data in offset_interfaces.items(): - self.result.is_failure(f"Interface: {interface} - Timing offset from master is greater than +/- {threshold}ns: Actual: {', '.join(map(str, data))}") + if offset_interfaces: + self.result.is_failure(f"The device timing offset from master is greater than +/- {threshold}ns: {offset_interfaces}") + else: + self.result.is_success() class VerifyPtpPortModeStatus(AntaTest): - """Verifies the PTP interfaces state. + """Verifies that all interfaces are in a valid Precision Time Protocol (PTP) state. The interfaces can be in one of the following state: Master, Slave, Passive, or Disabled. @@ -193,6 +202,7 @@ class VerifyPtpPortModeStatus(AntaTest): ``` """ + description = "Verifies the PTP interfaces state." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp", revision=2)] @@ -217,4 +227,4 @@ class VerifyPtpPortModeStatus(AntaTest): if not invalid_interfaces: self.result.is_success() else: - self.result.is_failure(f"The following interface(s) are not in a valid PTP state: {', '.join(invalid_interfaces)}") + self.result.is_failure(f"The following interface(s) are not in a valid PTP state: '{invalid_interfaces}'") diff --git a/anta/tests/routing/__init__.py b/anta/tests/routing/__init__.py index 85ca1ab..d4b3786 100644 --- a/anta/tests/routing/__init__.py +++ b/anta/tests/routing/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Package related to routing tests.""" diff --git a/anta/tests/routing/bgp.py b/anta/tests/routing/bgp.py index bfbcb7f..2a140dd 100644 --- a/anta/tests/routing/bgp.py +++ b/anta/tests/routing/bgp.py @@ -1,25 +1,23 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to BGP tests.""" -# pylint: disable=too-many-lines +# Mypy does not understand AntaTest.Input typing # mypy: disable-error-code=attr-defined from __future__ import annotations -from typing import Any, ClassVar, TypeVar +from typing import ClassVar, TypeVar -from pydantic import PositiveInt, field_validator +from pydantic import field_validator -from anta.input_models.routing.bgp import BgpAddressFamily, BgpAfi, BgpNeighbor, BgpPeer, BgpRoute, BgpVrf, VxlanEndpoint +from anta.input_models.routing.bgp import BgpAddressFamily, BgpAfi, BgpNeighbor, BgpPeer, VxlanEndpoint from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.tools import format_data, get_item, get_value # Using a TypeVar for the BgpPeer model since mypy thinks it's a ClassVar and not a valid type when used in field validators T = TypeVar("T", bound=BgpPeer) -# TODO: Refactor to reduce the number of lines in this module later - def _check_bgp_neighbor_capability(capability_status: dict[str, bool]) -> bool: """Check if a BGP neighbor capability is advertised, received, and enabled. @@ -131,7 +129,7 @@ class VerifyBGPPeerCount(AntaTest): # Check if the count matches the expected count if address_family.num_peers != peer_count: - self.result.is_failure(f"{address_family} - Peer count mismatch - Expected: {address_family.num_peers} Actual: {peer_count}") + self.result.is_failure(f"{address_family} - Expected: {address_family.num_peers}, Actual: {peer_count}") class VerifyBGPPeersHealth(AntaTest): @@ -142,7 +140,7 @@ class VerifyBGPPeersHealth(AntaTest): 1. Validates that the VRF is configured. 2. Checks if there are any peers for the given AFI/SAFI. 3. For each relevant peer: - - Verifies that the BGP session is `Established` and, if specified, has remained established for at least the duration given by `minimum_established_time`. + - Verifies that the BGP session is in the `Established` state. - Confirms that the AFI/SAFI state is `negotiated`. - Checks that both input and output TCP message queues are empty. Can be disabled by setting `check_tcp_queues` to `False`. @@ -153,8 +151,7 @@ class VerifyBGPPeersHealth(AntaTest): * Failure: If any of the following occur: - The specified VRF is not configured. - No peers are found for a given AFI/SAFI. - - A peer's session state is not `Established` or if specified, has not remained established for at least the duration specified by - the `minimum_established_time`. + - Any BGP session is not in the `Established` state. - The AFI/SAFI state is not 'negotiated' for any peer. - Any TCP message queue (input or output) is not empty when `check_tcp_queues` is `True` (default). @@ -164,7 +161,6 @@ class VerifyBGPPeersHealth(AntaTest): anta.tests.routing: bgp: - VerifyBGPPeersHealth: - minimum_established_time: 10000 address_families: - afi: "evpn" - afi: "ipv4" @@ -183,8 +179,6 @@ class VerifyBGPPeersHealth(AntaTest): class Input(AntaTest.Input): """Input model for the VerifyBGPPeersHealth test.""" - minimum_established_time: PositiveInt | None = None - """Minimum established time (seconds) for all the BGP sessions.""" address_families: list[BgpAddressFamily] """List of BGP address families.""" BgpAfi: ClassVar[type[BgpAfi]] = BgpAfi @@ -214,13 +208,9 @@ class VerifyBGPPeersHealth(AntaTest): for peer in relevant_peers: # Check if the BGP session is established if peer["state"] != "Established": - self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - Incorrect session state - Expected: Established Actual: {peer['state']}") + self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - Session state is not established - State: {peer['state']}") continue - if self.inputs.minimum_established_time and (act_time := peer["establishedTime"]) < self.inputs.minimum_established_time: - msg = f"BGP session not established for the minimum required duration - Expected: {self.inputs.minimum_established_time}s Actual: {act_time}s" - self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - {msg}") - # Check if the AFI/SAFI state is negotiated capability_status = get_value(peer, f"neighborCapabilities.multiprotocolCaps.{address_family.eos_key}") if not _check_bgp_neighbor_capability(capability_status): @@ -231,7 +221,7 @@ class VerifyBGPPeersHealth(AntaTest): inq = peer["peerTcpInfo"]["inputQueueLength"] outq = peer["peerTcpInfo"]["outputQueueLength"] if inq != 0 or outq != 0: - self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - Session has non-empty message queues - InQ: {inq} OutQ: {outq}") + self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - Session has non-empty message queues - InQ: {inq}, OutQ: {outq}") class VerifyBGPSpecificPeers(AntaTest): @@ -242,7 +232,7 @@ class VerifyBGPSpecificPeers(AntaTest): 1. Confirms that the specified VRF is configured. 2. For each specified peer: - Verifies that the peer is found in the BGP configuration. - - Verifies that the BGP session is `Established` and, if specified, has remained established for at least the duration given by `minimum_established_time`. + - Checks that the BGP session is in the `Established` state. - Confirms that the AFI/SAFI state is `negotiated`. - Ensures that both input and output TCP message queues are empty. Can be disabled by setting `check_tcp_queues` to `False`. @@ -253,8 +243,7 @@ class VerifyBGPSpecificPeers(AntaTest): * Failure: If any of the following occur: - The specified VRF is not configured. - A specified peer is not found in the BGP configuration. - - A peer's session state is not `Established` or if specified, has not remained established for at least the duration specified by - the `minimum_established_time`. + - The BGP session for a peer is not in the `Established` state. - The AFI/SAFI state is not `negotiated` for a peer. - Any TCP message queue (input or output) is not empty for a peer when `check_tcp_queues` is `True` (default). @@ -264,7 +253,6 @@ class VerifyBGPSpecificPeers(AntaTest): anta.tests.routing: bgp: - VerifyBGPSpecificPeers: - minimum_established_time: 10000 address_families: - afi: "evpn" peers: @@ -286,8 +274,6 @@ class VerifyBGPSpecificPeers(AntaTest): class Input(AntaTest.Input): """Input model for the VerifyBGPSpecificPeers test.""" - minimum_established_time: PositiveInt | None = None - """Minimum established time (seconds) for all the BGP sessions.""" address_families: list[BgpAddressFamily] """List of BGP address families.""" BgpAfi: ClassVar[type[BgpAfi]] = BgpAfi @@ -325,13 +311,9 @@ class VerifyBGPSpecificPeers(AntaTest): # Check if the BGP session is established if peer_data["state"] != "Established": - self.result.is_failure(f"{address_family} Peer: {peer_ip} - Incorrect session state - Expected: Established Actual: {peer_data['state']}") + self.result.is_failure(f"{address_family} Peer: {peer_ip} - Session state is not established - State: {peer_data['state']}") continue - if self.inputs.minimum_established_time and (act_time := peer_data["establishedTime"]) < self.inputs.minimum_established_time: - msg = f"BGP session not established for the minimum required duration - Expected: {self.inputs.minimum_established_time}s Actual: {act_time}s" - self.result.is_failure(f"{address_family} Peer: {peer_ip} - {msg}") - # Check if the AFI/SAFI state is negotiated capability_status = get_value(peer_data, f"neighborCapabilities.multiprotocolCaps.{address_family.eos_key}") if not capability_status: @@ -341,104 +323,15 @@ class VerifyBGPSpecificPeers(AntaTest): self.result.is_failure(f"{address_family} Peer: {peer_ip} - AFI/SAFI state is not negotiated - {format_data(capability_status)}") # Check the TCP session message queues - inq = peer_data["peerTcpInfo"]["inputQueueLength"] - outq = peer_data["peerTcpInfo"]["outputQueueLength"] - if address_family.check_tcp_queues and (inq != 0 or outq != 0): - self.result.is_failure(f"{address_family} Peer: {peer_ip} - Session has non-empty message queues - InQ: {inq} OutQ: {outq}") - - -class VerifyBGPPeerSession(AntaTest): - """Verifies the session state of BGP IPv4 peer(s). - - This test performs the following checks for each specified peer: - - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Verifies that the BGP session is `Established` and, if specified, has remained established for at least the duration given by `minimum_established_time`. - 3. Ensures that both input and output TCP message queues are empty. - Can be disabled by setting `check_tcp_queues` global flag to `False`. - - Expected Results - ---------------- - * Success: If all of the following conditions are met: - - All specified peers are found in the BGP configuration. - - All peers sessions state are `Established` and, if specified, has remained established for at least the duration given by `minimum_established_time`. - - All peers have empty TCP message queues if `check_tcp_queues` is `True` (default). - - All peers are established for specified minimum duration. - * Failure: If any of the following occur: - - A specified peer is not found in the BGP configuration. - - A peer's session state is not `Established` or if specified, has not remained established for at least the duration specified by - the `minimum_established_time`. - - A peer has non-empty TCP message queues (input or output) when `check_tcp_queues` is `True`. - - Examples - -------- - ```yaml - anta.tests.routing: - bgp: - - VerifyBGPPeerSession: - minimum_established_time: 10000 - check_tcp_queues: false - bgp_peers: - - peer_address: 10.1.0.1 - vrf: default - - peer_address: 10.1.0.2 - vrf: default - - peer_address: 10.1.255.2 - vrf: DEV - - peer_address: 10.1.255.4 - vrf: DEV - ``` - """ - - categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] - - class Input(AntaTest.Input): - """Input model for the VerifyBGPPeerSession test.""" - - minimum_established_time: PositiveInt | None = None - """Minimum established time (seconds) for all the BGP sessions.""" - check_tcp_queues: bool = True - """Flag to check if the TCP session queues are empty for all BGP peers. Defaults to `True`.""" - bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyBGPPeerSession.""" - self.result.is_success() - - output = self.instance_commands[0].json_output - - for peer in self.inputs.bgp_peers: - peer_ip = str(peer.peer_address) - peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) - - # Check if the peer is found - if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: - self.result.is_failure(f"{peer} - Not found") - continue - - # Check if the BGP session is established - if peer_data["state"] != "Established": - self.result.is_failure(f"{peer} - Incorrect session state - Expected: Established Actual: {peer_data['state']}") - continue - - if self.inputs.minimum_established_time and (act_time := peer_data["establishedTime"]) < self.inputs.minimum_established_time: - self.result.is_failure( - f"{peer} - BGP session not established for the minimum required duration - Expected: {self.inputs.minimum_established_time}s Actual: {act_time}s" - ) - - # Check the TCP session message queues - if self.inputs.check_tcp_queues: - inq = peer_data["peerTcpInfo"]["inputQueueLength"] - outq = peer_data["peerTcpInfo"]["outputQueueLength"] - if inq != 0 or outq != 0: - self.result.is_failure(f"{peer} - Session has non-empty message queues - InQ: {inq} OutQ: {outq}") + if address_family.check_tcp_queues: + inq = peer_data["peerTcpInfo"]["inputQueueLength"] + outq = peer_data["peerTcpInfo"]["outputQueueLength"] + if inq != 0 or outq != 0: + self.result.is_failure(f"{address_family} Peer: {peer_ip} - Session has non-empty message queues - InQ: {inq}, OutQ: {outq}") class VerifyBGPExchangedRoutes(AntaTest): - """Verifies the advertised and received routes of BGP IPv4 peer(s). + """Verifies the advertised and received routes of BGP peers. This test performs the following checks for each specified peer: @@ -473,6 +366,8 @@ class VerifyBGPExchangedRoutes(AntaTest): advertised_routes: - 192.0.255.1/32 - 192.0.254.5/32 + received_routes: + - 192.0.254.3/32 ``` """ @@ -486,15 +381,15 @@ class VerifyBGPExchangedRoutes(AntaTest): """Input model for the VerifyBGPExchangedRoutes test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers.""" BgpNeighbor: ClassVar[type[BgpNeighbor]] = BgpNeighbor @field_validator("bgp_peers") @classmethod def validate_bgp_peers(cls, bgp_peers: list[BgpPeer]) -> list[BgpPeer]: - """Validate that 'advertised_routes' or 'received_routes' field is provided in each BGP peer.""" + """Validate that 'advertised_routes' or 'received_routes' field is provided in each address family.""" for peer in bgp_peers: - if peer.advertised_routes is None and peer.received_routes is None: + if peer.advertised_routes is None or peer.received_routes is None: msg = f"{peer} 'advertised_routes' or 'received_routes' field missing in the input" raise ValueError(msg) return bgp_peers @@ -503,20 +398,6 @@ class VerifyBGPExchangedRoutes(AntaTest): """Render the template for each BGP peer in the input list.""" return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers] - def _validate_bgp_route_paths(self, peer: str, route_type: str, route: str, entries: dict[str, Any]) -> str | None: - """Validate the BGP route paths.""" - # Check if the route is found - if route in entries: - # Check if the route is active and valid - route_paths = entries[route]["bgpRoutePaths"][0]["routeType"] - is_active = route_paths["active"] - is_valid = route_paths["valid"] - if not is_active or not is_valid: - return f"{peer} {route_type} route: {route} - Valid: {is_valid} Active: {is_active}" - return None - - return f"{peer} {route_type} route: {route} - Not found" - @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPExchangedRoutes.""" @@ -538,38 +419,44 @@ class VerifyBGPExchangedRoutes(AntaTest): # Validate both advertised and received routes for route_type, routes in zip(["Advertised", "Received"], [peer.advertised_routes, peer.received_routes]): - # Skipping the validation for routes if user input is None - if not routes: - continue - entries = command_output[route_type] for route in routes: - # Check if the route is found. If yes then checks the route is active and valid - failure_msg = self._validate_bgp_route_paths(str(peer), route_type, str(route), entries) - if failure_msg: - self.result.is_failure(failure_msg) + # Check if the route is found + if str(route) not in entries: + self.result.is_failure(f"{peer} {route_type} route: {route} - Not found") + continue + + # Check if the route is active and valid + route_paths = entries[str(route)]["bgpRoutePaths"][0]["routeType"] + is_active = route_paths["active"] + is_valid = route_paths["valid"] + if not is_active or not is_valid: + self.result.is_failure(f"{peer} {route_type} route: {route} - Valid: {is_valid}, Active: {is_active}") class VerifyBGPPeerMPCaps(AntaTest): - """Verifies the multiprotocol capabilities of BGP IPv4 peer(s). + """Verifies the multiprotocol capabilities of BGP peers. This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. For each specified capability: + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. For each specified capability: - Validates that the capability is present in the peer configuration. - Confirms that the capability is advertised, received, and enabled. - 3. When strict mode is enabled (`strict: true`): + 4. When strict mode is enabled (`strict: true`): - Verifies that only the specified capabilities are configured. - Ensures an exact match between configured and expected capabilities. Expected Results ---------------- * Success: If all of the following conditions are met: + - The specified VRF is configured. - All specified peers are found in the BGP configuration. - All specified capabilities are present and properly negotiated. - In strict mode, only the specified capabilities are configured. * Failure: If any of the following occur: + - The specified VRF is not configured. - A specified peer is not found in the BGP configuration. - A specified capability is not found. - A capability is not properly negotiated (not advertised, received, or enabled). @@ -586,8 +473,7 @@ class VerifyBGPPeerMPCaps(AntaTest): vrf: default strict: False capabilities: - - ipv4 labeled-Unicast - - ipv4MplsVpn + - ipv4Unicast ``` """ @@ -598,13 +484,13 @@ class VerifyBGPPeerMPCaps(AntaTest): """Input model for the VerifyBGPPeerMPCaps test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers""" BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @field_validator("bgp_peers") @classmethod def validate_bgp_peers(cls, bgp_peers: list[T]) -> list[T]: - """Validate that 'capabilities' field is provided in each BGP peer.""" + """Validate that 'capabilities' field is provided in each address family.""" for peer in bgp_peers: if peer.capabilities is None: msg = f"{peer} 'capabilities' field missing in the input" @@ -620,10 +506,14 @@ class VerifyBGPPeerMPCaps(AntaTest): for peer in self.inputs.bgp_peers: peer_ip = str(peer.peer_address) - peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) + + # Check if the VRF is configured + if (vrf_output := get_value(output, f"vrfs.{peer.vrf}")) is None: + self.result.is_failure(f"{peer} - VRF not configured") + continue # Check if the peer is found - if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + if (peer_data := get_item(vrf_output["peerList"], "peerAddress", peer_ip)) is None: self.result.is_failure(f"{peer} - Not found") continue @@ -647,13 +537,14 @@ class VerifyBGPPeerMPCaps(AntaTest): class VerifyBGPPeerASNCap(AntaTest): - """Verifies the four octet ASN capability of BGP IPv4 peer(s). + """Verifies the four octet ASN capability of BGP peers. This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Validates that the capability is present in the peer configuration. - 3. Confirms that the capability is advertised, received, and enabled. + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates that the capability is present in the peer configuration. + 4. Confirms that the capability is advertised, received, and enabled. Expected Results ---------------- @@ -685,7 +576,7 @@ class VerifyBGPPeerASNCap(AntaTest): """Input model for the VerifyBGPPeerASNCap test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers.""" BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test @@ -715,13 +606,14 @@ class VerifyBGPPeerASNCap(AntaTest): class VerifyBGPPeerRouteRefreshCap(AntaTest): - """Verifies the route refresh capabilities of IPv4 BGP peer(s) in a specified VRF. + """Verifies the route refresh capabilities of a BGP peer in a specified VRF. This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Validates that the route refresh capability is present in the peer configuration. - 3. Confirms that the capability is advertised, received, and enabled. + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates that the route refresh capability is present in the peer configuration. + 4. Confirms that the capability is advertised, received, and enabled. Expected Results ---------------- @@ -753,7 +645,7 @@ class VerifyBGPPeerRouteRefreshCap(AntaTest): """Input model for the VerifyBGPPeerRouteRefreshCap test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers""" BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test @@ -783,13 +675,14 @@ class VerifyBGPPeerRouteRefreshCap(AntaTest): class VerifyBGPPeerMD5Auth(AntaTest): - """Verifies the MD5 authentication and state of IPv4 BGP peer(s) in a specified VRF. + """Verifies the MD5 authentication and state of IPv4 BGP peers in a specified VRF. This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Validates that the BGP session is in `Established` state. - 3. Confirms that MD5 authentication is enabled for the peer. + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates that the BGP session is in `Established` state. + 4. Confirms that MD5 authentication is enabled for the peer. Expected Results ---------------- @@ -846,7 +739,7 @@ class VerifyBGPPeerMD5Auth(AntaTest): state = peer_data.get("state") md5_auth_enabled = peer_data.get("md5AuthEnabled") if state != "Established": - self.result.is_failure(f"{peer} - Incorrect session state - Expected: Established Actual: {state}") + self.result.is_failure(f"{peer} - Session state is not established - State: {state}") if not md5_auth_enabled: self.result.is_failure(f"{peer} - Session does not have MD5 authentication enabled") @@ -921,12 +814,13 @@ class VerifyEVPNType2Route(AntaTest): class VerifyBGPAdvCommunities(AntaTest): - """Verifies that advertised communities are standard, extended and large for BGP IPv4 peer(s). + """Verifies that advertised communities are standard, extended and large for BGP peers. This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Validates that all required community types are advertised: + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates that all required community types are advertised: - Standard communities - Extended communities - Large communities @@ -961,7 +855,7 @@ class VerifyBGPAdvCommunities(AntaTest): """Input model for the VerifyBGPAdvCommunities test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers.""" BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test @@ -986,12 +880,13 @@ class VerifyBGPAdvCommunities(AntaTest): class VerifyBGPTimers(AntaTest): - """Verifies the timers of BGP IPv4 peer(s). + """Verifies the timers of BGP peers. This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Confirms the BGP session hold time/keepalive timers match the expected value. + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Confirms the BGP session hold time/keepalive timers match the expected value. Expected Results ---------------- @@ -1027,13 +922,13 @@ class VerifyBGPTimers(AntaTest): """Input model for the VerifyBGPTimers test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers""" BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @field_validator("bgp_peers") @classmethod def validate_bgp_peers(cls, bgp_peers: list[T]) -> list[T]: - """Validate that 'hold_time' or 'keep_alive_time' field is provided in each BGP peer.""" + """Validate that 'hold_time' or 'keep_alive_time' field is provided in each address family.""" for peer in bgp_peers: if peer.hold_time is None or peer.keep_alive_time is None: msg = f"{peer} 'hold_time' or 'keep_alive_time' field missing in the input" @@ -1058,9 +953,9 @@ class VerifyBGPTimers(AntaTest): # Check BGP peer timers if peer_data["holdTime"] != peer.hold_time: - self.result.is_failure(f"{peer} - Hold time mismatch - Expected: {peer.hold_time} Actual: {peer_data['holdTime']}") + self.result.is_failure(f"{peer} - Hold time mismatch - Expected: {peer.hold_time}, Actual: {peer_data['holdTime']}") if peer_data["keepaliveTime"] != peer.keep_alive_time: - self.result.is_failure(f"{peer} - Keepalive time mismatch - Expected: {peer.keep_alive_time} Actual: {peer_data['keepaliveTime']}") + self.result.is_failure(f"{peer} - Keepalive time mismatch - Expected: {peer.keep_alive_time}, Actual: {peer_data['keepaliveTime']}") class VerifyBGPPeerDropStats(AntaTest): @@ -1068,8 +963,9 @@ class VerifyBGPPeerDropStats(AntaTest): This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Validates the BGP drop statistics: + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates the BGP drop statistics: - If specific drop statistics are provided, checks only those counters. - If no specific drop statistics are provided, checks all available counters. - Confirms that all checked counters have a value of zero. @@ -1106,7 +1002,7 @@ class VerifyBGPPeerDropStats(AntaTest): """Input model for the VerifyBGPPeerDropStats test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers""" BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test @@ -1144,8 +1040,9 @@ class VerifyBGPPeerUpdateErrors(AntaTest): This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Validates the BGP update error counters: + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates the BGP update error counters: - If specific update error counters are provided, checks only those counters. - If no update error counters are provided, checks all available counters. - Confirms that all checked counters have a value of zero. @@ -1183,7 +1080,7 @@ class VerifyBGPPeerUpdateErrors(AntaTest): """Input model for the VerifyBGPPeerUpdateErrors test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers""" BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test @@ -1221,8 +1118,9 @@ class VerifyBgpRouteMaps(AntaTest): This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Validates the correct BGP route maps are applied in the correct direction (inbound or outbound). + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates the correct BGP route maps are applied in the correct direction (inbound or outbound). Expected Results ---------------- @@ -1254,16 +1152,19 @@ class VerifyBgpRouteMaps(AntaTest): """Input model for the VerifyBgpRouteMaps test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers""" BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @field_validator("bgp_peers") @classmethod def validate_bgp_peers(cls, bgp_peers: list[T]) -> list[T]: - """Validate that 'inbound_route_map' or 'outbound_route_map' field is provided in each BGP peer.""" + """Validate that 'peers' field is provided in each address family. + + At least one of 'inbound' or 'outbound' route-map must be provided. + """ for peer in bgp_peers: if not (peer.inbound_route_map or peer.outbound_route_map): - msg = f"{peer} 'inbound_route_map' or 'outbound_route_map' field missing in the input" + msg = f"{peer}; At least one of 'inbound_route_map' or 'outbound_route_map' must be provided." raise ValueError(msg) return bgp_peers @@ -1287,29 +1188,30 @@ class VerifyBgpRouteMaps(AntaTest): # Verify Inbound route-map if inbound_route_map and (inbound_map := peer_data.get("routeMapInbound", "Not Configured")) != inbound_route_map: - self.result.is_failure(f"{peer} - Inbound route-map mismatch - Expected: {inbound_route_map} Actual: {inbound_map}") + self.result.is_failure(f"{peer} - Inbound route-map mismatch - Expected: {inbound_route_map}, Actual: {inbound_map}") # Verify Outbound route-map if outbound_route_map and (outbound_map := peer_data.get("routeMapOutbound", "Not Configured")) != outbound_route_map: - self.result.is_failure(f"{peer} - Outbound route-map mismatch - Expected: {outbound_route_map} Actual: {outbound_map}") + self.result.is_failure(f"{peer} - Outbound route-map mismatch - Expected: {outbound_route_map}, Actual: {outbound_map}") class VerifyBGPPeerRouteLimit(AntaTest): - """Verifies maximum routes and warning limit for BGP IPv4 peer(s). + """Verifies maximum routes and outbound route-maps of BGP IPv4 peer(s). This test performs the following checks for each specified peer: - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Confirms the maximum routes and maximum routes warning limit, if provided, match the expected value. + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Confirms the Maximum routes and maximum routes warning limit, if provided match the expected value. Expected Results ---------------- * Success: If all of the following conditions are met: - All specified peers are found in the BGP configuration. - - The maximum routes/maximum routes warning limit match the expected value for a peer. + - The maximum routese/maximum routes warning limit match the expected value for a peer. * Failure: If any of the following occur: - A specified peer is not found in the BGP configuration. - - The maximum routes/maximum routes warning limit do not match the expected value for a peer. + - The maximum routese/maximum routes warning limit do not match the expected value for a peer. Examples -------- @@ -1332,16 +1234,16 @@ class VerifyBGPPeerRouteLimit(AntaTest): """Input model for the VerifyBGPPeerRouteLimit test.""" bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" + """List of BGP peers""" BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @field_validator("bgp_peers") @classmethod def validate_bgp_peers(cls, bgp_peers: list[T]) -> list[T]: - """Validate that 'maximum_routes' field is provided in each BGP peer.""" + """Validate that 'peers' field is provided in each address family.""" for peer in bgp_peers: if peer.maximum_routes is None: - msg = f"{peer} 'maximum_routes' field missing in the input" + msg = f"{peer}; 'maximum_routes' field missing in the input" raise ValueError(msg) return bgp_peers @@ -1363,648 +1265,10 @@ class VerifyBGPPeerRouteLimit(AntaTest): self.result.is_failure(f"{peer} - Not found") continue - # Verify maximum routes - if (actual_maximum_routes := peer_data.get("maxTotalRoutes", "Not Found")) != maximum_routes: - self.result.is_failure(f"{peer} - Maximum routes mismatch - Expected: {maximum_routes} Actual: {actual_maximum_routes}") + # Verify maximum routes configured. + if (actual_routes := peer_data.get("maxTotalRoutes", "Not Found")) != maximum_routes: + self.result.is_failure(f"{peer} - Maximum routes mismatch - Expected: {maximum_routes}, Actual: {actual_routes}") - # Verify warning limit if provided. By default, EOS does not have a warning limit and `totalRoutesWarnLimit` is not present in the output. - if warning_limit is not None and (actual_warning_limit := peer_data.get("totalRoutesWarnLimit", 0)) != warning_limit: - self.result.is_failure(f"{peer} - Maximum routes warning limit mismatch - Expected: {warning_limit} Actual: {actual_warning_limit}") - - -class VerifyBGPPeerGroup(AntaTest): - """Verifies BGP peer group of BGP IPv4 peer(s). - - This test performs the following checks for each specified peer: - - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Confirms the peer group is correctly assigned to the specified BGP peer. - - Expected Results - ---------------- - * Success: If all of the following conditions are met: - - All specified peers are found in the BGP configuration. - - The peer group is correctly assigned to the specified BGP peer. - * Failure: If any of the following occur: - - A specified peer is not found in the BGP configuration. - - The peer group is not correctly assigned to the specified BGP peer. - - Examples - -------- - ```yaml - anta.tests.routing: - bgp: - - VerifyBGPPeerGroup: - bgp_peers: - - peer_address: 172.30.11.1 - vrf: default - peer_group: IPv4-UNDERLAY-PEERS - ``` - """ - - categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] - - class Input(AntaTest.Input): - """Input model for the VerifyBGPPeerGroup test.""" - - bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" - - @field_validator("bgp_peers") - @classmethod - def validate_bgp_peers(cls, bgp_peers: list[BgpPeer]) -> list[BgpPeer]: - """Validate that 'peer_group' field is provided in each BGP peer.""" - for peer in bgp_peers: - if peer.peer_group is None: - msg = f"{peer} 'peer_group' field missing in the input" - raise ValueError(msg) - return bgp_peers - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyBGPPeerGroup.""" - self.result.is_success() - - output = self.instance_commands[0].json_output - - for peer in self.inputs.bgp_peers: - peer_ip = str(peer.peer_address) - peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) - - # Check if the peer is found - if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: - self.result.is_failure(f"{peer} - Not found") - continue - - if (actual_peer_group := peer_data.get("peerGroupName", "Not Found")) != peer.peer_group: - self.result.is_failure(f"{peer} - Incorrect peer group configured - Expected: {peer.peer_group} Actual: {actual_peer_group}") - - -class VerifyBGPPeerSessionRibd(AntaTest): - """Verifies the session state of BGP IPv4 peer(s). - - Compatible with EOS operating in `ribd` routing protocol model. - - This test performs the following checks for each specified peer: - - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Verifies that the BGP session is `Established` and, if specified, has remained established for at least the duration given by `minimum_established_time`. - 3. Ensures that both input and output TCP message queues are empty. - Can be disabled by setting `check_tcp_queues` global flag to `False`. - - Expected Results - ---------------- - * Success: If all of the following conditions are met: - - All specified peers are found in the BGP configuration. - - All peers sessions state are `Established` and, if specified, has remained established for at least the duration given by `minimum_established_time`. - - All peers have empty TCP message queues if `check_tcp_queues` is `True` (default). - - All peers are established for specified minimum duration. - * Failure: If any of the following occur: - - A specified peer is not found in the BGP configuration. - - A peer's session state is not `Established` or if specified, has not remained established for at least the duration specified by - the `minimum_established_time`. - - A peer has non-empty TCP message queues (input or output) when `check_tcp_queues` is `True`. - - Examples - -------- - ```yaml - anta.tests.routing: - bgp: - - VerifyBGPPeerSessionRibd: - minimum_established_time: 10000 - check_tcp_queues: false - bgp_peers: - - peer_address: 10.1.0.1 - vrf: default - - peer_address: 10.1.0.2 - vrf: default - - peer_address: 10.1.255.2 - vrf: DEV - - peer_address: 10.1.255.4 - vrf: DEV - ``` - """ - - categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip bgp neighbors vrf all", revision=2)] - - class Input(AntaTest.Input): - """Input model for the VerifyBGPPeerSessionRibd test.""" - - minimum_established_time: PositiveInt | None = None - """Minimum established time (seconds) for all the BGP sessions.""" - check_tcp_queues: bool = True - """Flag to check if the TCP session queues are empty for all BGP peers. Defaults to `True`.""" - bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyBGPPeerSessionRibd.""" - self.result.is_success() - - output = self.instance_commands[0].json_output - - for peer in self.inputs.bgp_peers: - peer_address = str(peer.peer_address) - peers = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) - - # Check if the peer is found - if (peer_data := get_item(peers, "peerAddress", peer_address)) is None: - self.result.is_failure(f"{peer} - Not found") - continue - - # Check if the BGP session is established - if peer_data["state"] != "Established": - self.result.is_failure(f"{peer} - Incorrect session state - Expected: Established Actual: {peer_data['state']}") - continue - - if self.inputs.minimum_established_time and (act_time := peer_data["establishedTime"]) < self.inputs.minimum_established_time: - self.result.is_failure( - f"{peer} - BGP session not established for the minimum required duration - Expected: {self.inputs.minimum_established_time}s Actual: {act_time}s" - ) - - # Check the TCP session message queues - if self.inputs.check_tcp_queues: - inq_stat = peer_data["peerTcpInfo"]["inputQueueLength"] - outq_stat = peer_data["peerTcpInfo"]["outputQueueLength"] - if inq_stat != 0 or outq_stat != 0: - self.result.is_failure(f"{peer} - Session has non-empty message queues - InQ: {inq_stat} OutQ: {outq_stat}") - - -class VerifyBGPPeersHealthRibd(AntaTest): - """Verifies the health of all the BGP IPv4 peer(s). - - Compatible with EOS operating in `ribd` routing protocol model. - - This test performs the following checks for all BGP IPv4 peers: - - 1. Verifies that the BGP session is in the `Established` state. - 2. Checks that both input and output TCP message queues are empty. - Can be disabled by setting `check_tcp_queues` global flag to `False`. - - Expected Results - ---------------- - * Success: If all checks pass for all BGP IPv4 peers. - * Failure: If any of the following occur: - - Any BGP session is not in the `Established` state. - - Any TCP message queue (input or output) is not empty when `check_tcp_queues` is `True` (default). - - Examples - -------- - ```yaml - anta.tests.routing: - bgp: - - VerifyBGPPeersHealthRibd: - check_tcp_queues: True - ``` - """ - - categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip bgp neighbors vrf all", revision=2)] - - class Input(AntaTest.Input): - """Input model for the VerifyBGPPeersHealthRibd test.""" - - check_tcp_queues: bool = True - """Flag to check if the TCP session queues are empty for all BGP peers. Defaults to `True`.""" - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyBGPPeersHealthRibd.""" - self.result.is_success() - - output = self.instance_commands[0].json_output - - for vrf, vrf_data in output["vrfs"].items(): - peer_list = vrf_data.get("peerList", []) - - for peer in peer_list: - # Check if the BGP session is established - if peer["state"] != "Established": - self.result.is_failure(f"Peer: {peer['peerAddress']} VRF: {vrf} - Incorrect session state - Expected: Established Actual: {peer['state']}") - continue - - # Check the TCP session message queues - inq = peer["peerTcpInfo"]["inputQueueLength"] - outq = peer["peerTcpInfo"]["outputQueueLength"] - if self.inputs.check_tcp_queues and (inq != 0 or outq != 0): - self.result.is_failure(f"Peer: {peer['peerAddress']} VRF: {vrf} - Session has non-empty message queues - InQ: {inq} OutQ: {outq}") - - -class VerifyBGPNlriAcceptance(AntaTest): - """Verifies that all received NLRI are accepted for all AFI/SAFI configured for BGP IPv4 peer(s). - - This test performs the following checks for each specified peer: - - 1. Verifies that the peer is found in its VRF in the BGP configuration. - 2. Verifies that all received NLRI were accepted by comparing `nlrisReceived` with `nlrisAccepted`. - - Expected Results - ---------------- - * Success: If `nlrisReceived` equals `nlrisAccepted`, indicating all NLRI were accepted. - * Failure: If any of the following occur: - - The specified VRF is not configured. - - `nlrisReceived` does not equal `nlrisAccepted`, indicating some NLRI were rejected or filtered. - - Examples - -------- - ```yaml - anta.tests.routing: - bgp: - - VerifyBGPNlriAcceptance: - bgp_peers: - - peer_address: 10.100.0.128 - vrf: default - capabilities: - - ipv4Unicast - ``` - """ - - categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp summary vrf all", revision=1)] - - class Input(AntaTest.Input): - """Input model for the VerifyBGPNlriAcceptance test.""" - - bgp_peers: list[BgpPeer] - """List of BGP IPv4 peers.""" - - @field_validator("bgp_peers") - @classmethod - def validate_bgp_peers(cls, bgp_peers: list[T]) -> list[T]: - """Validate that 'capabilities' field is provided in each BGP peer.""" - for peer in bgp_peers: - if peer.capabilities is None: - msg = f"{peer} 'capabilities' field missing in the input" - raise ValueError(msg) - return bgp_peers - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyBGPNlriAcceptance.""" - self.result.is_success() - - output = self.instance_commands[0].json_output - - for peer in self.inputs.bgp_peers: - # Check if the peer is found - if not (peer_data := get_value(output, f"vrfs..{peer.vrf}..peers..{peer.peer_address}", separator="..")): - self.result.is_failure(f"{peer} - Not found") - continue - - # Fetching the multiprotocol capabilities - for capability in peer.capabilities: - # Check if the capability is found - if (capability_status := get_value(peer_data, capability)) is None: - self.result.is_failure(f"{peer} - {capability} not found") - continue - - if capability_status["afiSafiState"] != "negotiated": - self.result.is_failure(f"{peer} - {capability} not negotiated") - - if (received := capability_status.get("nlrisReceived")) != (accepted := capability_status.get("nlrisAccepted")): - self.result.is_failure(f"{peer} AFI/SAFI: {capability} - Some NLRI were filtered or rejected - Accepted: {accepted} Received: {received}") - - -class VerifyBGPRoutePaths(AntaTest): - """Verifies BGP IPv4 route paths. - - This test performs the following checks for each specified BGP route entry: - - 1. Verifies the specified BGP route exists in the routing table. - 2. For each expected paths: - - Verifies a path with matching next-hop exists. - - Verifies the path's origin attribute matches the expected value. - - Expected Results - ---------------- - * Success: The test will pass if all specified routes exist with paths matching the expected next-hops and origin attributes. - * Failure: The test will fail if: - - A specified BGP route is not found. - - A path with specified next-hop is not found. - - A path's origin attribute doesn't match the expected value. - - Examples - -------- - ```yaml - anta.tests.routing: - bgp: - - VerifyBGPRoutePaths: - route_entries: - - prefix: 10.100.0.128/31 - vrf: default - paths: - - nexthop: 10.100.0.10 - origin: Igp - - nexthop: 10.100.4.5 - origin: Incomplete - ``` - """ - - categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip bgp vrf all", revision=3)] - - class Input(AntaTest.Input): - """Input model for the VerifyBGPRoutePaths test.""" - - route_entries: list[BgpRoute] - """List of BGP IPv4 route(s).""" - - @field_validator("route_entries") - @classmethod - def validate_route_entries(cls, route_entries: list[BgpRoute]) -> list[BgpRoute]: - """Validate that 'paths' field is provided in each BGP route.""" - for route in route_entries: - if route.paths is None: - msg = f"{route} 'paths' field missing in the input" - raise ValueError(msg) - return route_entries - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyBGPRoutePaths.""" - self.result.is_success() - - for route in self.inputs.route_entries: - # Verify if the prefix exists in BGP table - if not (bgp_routes := get_value(self.instance_commands[0].json_output, f"vrfs..{route.vrf}..bgpRouteEntries..{route.prefix}", separator="..")): - self.result.is_failure(f"{route} - Prefix not found") - continue - - # Iterating over each path. - for path in route.paths: - nexthop = str(path.nexthop) - origin = path.origin - if not (route_path := get_item(bgp_routes["bgpRoutePaths"], "nextHop", nexthop)): - self.result.is_failure(f"{route} {path} - Path not found") - continue - - if (actual_origin := get_value(route_path, "routeType.origin")) != origin: - self.result.is_failure(f"{route} {path} - Origin mismatch - Actual: {actual_origin}") - - -class VerifyBGPRouteECMP(AntaTest): - """Verifies BGP IPv4 route ECMP paths. - - This test performs the following checks for each specified BGP route entry: - - 1. Route exists in BGP table. - 2. First path is a valid and active ECMP head. - 3. Correct number of valid ECMP contributors follow the head path. - 4. Route is installed in RIB with same amount of next-hops. - - Expected Results - ---------------- - * Success: The test will pass if all specified routes exist in both BGP and RIB tables with correct amount of ECMP paths. - * Failure: The test will fail if: - - A specified route is not found in BGP table. - - A valid and active ECMP head is not found. - - ECMP contributors count does not match the expected value. - - Route is not installed in RIB table. - - BGP and RIB nexthops count do not match. - - Examples - -------- - ```yaml - anta.tests.routing: - bgp: - - VerifyBGPRouteECMP: - route_entries: - - prefix: 10.100.0.128/31 - vrf: default - ecmp_count: 2 - ``` - """ - - categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ - AntaCommand(command="show ip bgp vrf all", revision=3), - AntaCommand(command="show ip route vrf all bgp", revision=4), - ] - - class Input(AntaTest.Input): - """Input model for the VerifyBGPRouteECMP test.""" - - route_entries: list[BgpRoute] - """List of BGP IPv4 route(s).""" - - @field_validator("route_entries") - @classmethod - def validate_route_entries(cls, route_entries: list[BgpRoute]) -> list[BgpRoute]: - """Validate that 'ecmp_count' field is provided in each BGP route.""" - for route in route_entries: - if route.ecmp_count is None: - msg = f"{route} 'ecmp_count' field missing in the input" - raise ValueError(msg) - return route_entries - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyBGPRouteECMP.""" - self.result.is_success() - - for route in self.inputs.route_entries: - # Verify if the prefix exists in BGP table. - if not (bgp_route_entry := get_value(self.instance_commands[0].json_output, f"vrfs..{route.vrf}..bgpRouteEntries..{route.prefix}", separator="..")): - self.result.is_failure(f"{route} - Prefix not found in BGP table") - continue - - route_paths = iter(bgp_route_entry["bgpRoutePaths"]) - head = next(route_paths, None) - # Verify if the active ECMP head exists. - if head is None or not all(head["routeType"][key] for key in ["valid", "active", "ecmpHead"]): - self.result.is_failure(f"{route} - Valid and active ECMP head not found") - continue - - bgp_nexthops = {head["nextHop"]} - bgp_nexthops.update([path["nextHop"] for path in route_paths if all(path["routeType"][key] for key in ["valid", "ecmp", "ecmpContributor"])]) - - # Verify ECMP count is correct. - if len(bgp_nexthops) != route.ecmp_count: - self.result.is_failure(f"{route} - ECMP count mismatch - Expected: {route.ecmp_count} Actual: {len(bgp_nexthops)}") - continue - - # Verify if the prefix exists in routing table. - if not (route_entry := get_value(self.instance_commands[1].json_output, f"vrfs..{route.vrf}..routes..{route.prefix}", separator="..")): - self.result.is_failure(f"{route} - Prefix not found in routing table") - continue - - # Verify BGP and RIB nexthops are same. - if len(bgp_nexthops) != len(route_entry["vias"]): - self.result.is_failure(f"{route} - Nexthops count mismatch - BGP: {len(bgp_nexthops)} RIB: {len(route_entry['vias'])}") - - -class VerifyBGPRedistribution(AntaTest): - """Verifies BGP redistribution. - - This test performs the following checks for each specified VRF in the BGP instance: - - 1. Ensures that the expected address-family is configured on the device. - 2. Confirms that the redistributed route protocol, include leaked and route map match the expected values. - - - Expected Results - ---------------- - * Success: If all of the following conditions are met: - - The expected address-family is configured on the device. - - The redistributed route protocol, include leaked and route map align with the expected values for the route. - * Failure: If any of the following occur: - - The expected address-family is not configured on device. - - The redistributed route protocol, include leaked or route map does not match the expected values. - - Examples - -------- - ```yaml - anta.tests.routing: - bgp: - - VerifyBGPRedistribution: - vrfs: - - vrf: default - address_families: - - afi_safi: ipv4multicast - redistributed_routes: - - proto: Connected - include_leaked: True - route_map: RM-CONN-2-BGP - - proto: IS-IS - include_leaked: True - route_map: RM-CONN-2-BGP - - afi_safi: IPv6 Unicast - redistributed_routes: - - proto: User # Converted to EOS SDK - route_map: RM-CONN-2-BGP - - proto: Static - include_leaked: True - route_map: RM-CONN-2-BGP - ``` - """ - - categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp instance vrf all", revision=4)] - - class Input(AntaTest.Input): - """Input model for the VerifyBGPRedistribution test.""" - - vrfs: list[BgpVrf] - """List of VRFs in the BGP instance.""" - - def _validate_redistribute_route(self, vrf_data: str, addr_family: str, afi_safi_configs: list[dict[str, Any]], route_info: dict[str, Any]) -> list[Any]: - """Validate the redstributed route details for a given address family.""" - failure_msg = [] - # If the redistributed route protocol does not match the expected value, test fails. - if not (actual_route := get_item(afi_safi_configs.get("redistributedRoutes"), "proto", route_info.proto)): - failure_msg.append(f"{vrf_data}, {addr_family}, Proto: {route_info.proto} - Not configured") - return failure_msg - - # If includes leaked field applicable, and it does not matches the expected value, test fails. - if (act_include_leaked := actual_route.get("includeLeaked", False)) != route_info.include_leaked: - failure_msg.append(f"{vrf_data}, {addr_family}, {route_info} - Include leaked mismatch - Actual: {act_include_leaked}") - - # If route map is required and it is not matching the expected value, test fails. - if all([route_info.route_map, (act_route_map := actual_route.get("routeMap", "Not Found")) != route_info.route_map]): - failure_msg.append(f"{vrf_data}, {addr_family}, {route_info} - Route map mismatch - Actual: {act_route_map}") - return failure_msg - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyBGPRedistribution.""" - self.result.is_success() - command_output = self.instance_commands[0].json_output - - for vrf_data in self.inputs.vrfs: - # If the specified VRF details are not found, test fails. - if not (instance_details := get_value(command_output, f"vrfs.{vrf_data.vrf}")): - self.result.is_failure(f"{vrf_data} - Not configured") - continue - for address_family in vrf_data.address_families: - # If the AFI-SAFI configuration details are not found, test fails. - if not (afi_safi_configs := get_value(instance_details, f"afiSafiConfig.{address_family.afi_safi}")): - self.result.is_failure(f"{vrf_data}, {address_family} - Not redistributed") - continue - - for route_info in address_family.redistributed_routes: - failure_msg = self._validate_redistribute_route(str(vrf_data), str(address_family), afi_safi_configs, route_info) - for msg in failure_msg: - self.result.is_failure(msg) - - -class VerifyBGPPeerTtlMultiHops(AntaTest): - """Verifies BGP TTL and max-ttl-hops count for BGP IPv4 peer(s). - - This test performs the following checks for each specified BGP peer: - - 1. Verifies the specified BGP peer exists in the BGP configuration. - 2. Verifies the TTL and max-ttl-hops attribute matches the expected value. - - Expected Results - ---------------- - * Success: The test will pass if all specified peers exist with TTL and max-ttl-hops attributes matching the expected values. - * Failure: If any of the following occur: - - A specified BGP peer is not found. - - A TTL or max-ttl-hops attribute doesn't match the expected value for any peer. - - Examples - -------- - ```yaml - anta.tests.routing: - bgp: - - VerifyBGPPeerTtlMultiHops: - bgp_peers: - - peer_address: 172.30.11.1 - vrf: default - ttl: 3 - max_ttl_hops: 3 - - peer_address: 172.30.11.2 - vrf: test - ttl: 30 - max_ttl_hops: 30 - ``` - """ - - categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip bgp neighbors vrf all", revision=2)] - - class Input(AntaTest.Input): - """Input model for the VerifyBGPPeerTtlMultiHops test.""" - - bgp_peers: list[BgpPeer] - """List of IPv4 peer(s).""" - - @field_validator("bgp_peers") - @classmethod - def validate_bgp_peers(cls, bgp_peers: list[BgpPeer]) -> list[BgpPeer]: - """Validate that 'ttl' and 'max_ttl_hops' field is provided in each BGP peer.""" - for peer in bgp_peers: - if peer.ttl is None: - msg = f"{peer} 'ttl' field missing in the input" - raise ValueError(msg) - if peer.max_ttl_hops is None: - msg = f"{peer} 'max_ttl_hops' field missing in the input" - raise ValueError(msg) - - return bgp_peers - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyBGPPeerTtlMultiHops.""" - self.result.is_success() - command_output = self.instance_commands[0].json_output - - for peer in self.inputs.bgp_peers: - peer_ip = str(peer.peer_address) - peer_list = get_value(command_output, f"vrfs.{peer.vrf}.peerList", default=[]) - - # Check if the peer is found - if (peer_details := get_item(peer_list, "peerAddress", peer_ip)) is None: - self.result.is_failure(f"{peer} - Not found") - continue - - # Verify if the TTL duration matches the expected value. - if peer_details.get("ttl") != peer.ttl: - self.result.is_failure(f"{peer} - TTL mismatch - Expected: {peer.ttl} Actual: {peer_details.get('ttl')}") - - # Verify if the max-ttl-hops time matches the expected value. - if peer_details.get("maxTtlHops") != peer.max_ttl_hops: - self.result.is_failure(f"{peer} - Max TTL Hops mismatch - Expected: {peer.max_ttl_hops} Actual: {peer_details.get('maxTtlHops')}") + # Verify warning limit if given. + if warning_limit and (actual_warning_limit := peer_data.get("totalRoutesWarnLimit", "Not Found")) != warning_limit: + self.result.is_failure(f"{peer} - Maximum route warning limit mismatch - Expected: {warning_limit}, Actual: {actual_warning_limit}") diff --git a/anta/tests/routing/generic.py b/anta/tests/routing/generic.py index 066d39c..7b916a3 100644 --- a/anta/tests/routing/generic.py +++ b/anta/tests/routing/generic.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to generic routing tests.""" @@ -11,12 +11,12 @@ from functools import cache from ipaddress import IPv4Address, IPv4Interface from typing import TYPE_CHECKING, ClassVar, Literal -from pydantic import field_validator, model_validator +from pydantic import model_validator from anta.custom_types import PositiveInteger from anta.input_models.routing.generic import IPv4Routes from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import get_item, get_value +from anta.tools import get_value if TYPE_CHECKING: import sys @@ -63,7 +63,7 @@ class VerifyRoutingProtocolModel(AntaTest): if configured_model == operating_model == self.inputs.model: self.result.is_success() else: - self.result.is_failure(f"Routing model is misconfigured - Expected: {self.inputs.model} Actual: {operating_model}") + self.result.is_failure(f"routing model is misconfigured: configured: {configured_model} - operating: {operating_model} - expected: {self.inputs.model}") class VerifyRoutingTableSize(AntaTest): @@ -112,9 +112,7 @@ class VerifyRoutingTableSize(AntaTest): if self.inputs.minimum <= total_routes <= self.inputs.maximum: self.result.is_success() else: - self.result.is_failure( - f"Routing table routes are outside the routes range - Expected: {self.inputs.minimum} <= to >= {self.inputs.maximum} Actual: {total_routes}" - ) + self.result.is_failure(f"routing-table has {total_routes} routes and not between min ({self.inputs.minimum}) and maximum ({self.inputs.maximum})") class VerifyRoutingTableEntry(AntaTest): @@ -184,17 +182,16 @@ class VerifyRoutingTableEntry(AntaTest): if not missing_routes: self.result.is_success() else: - self.result.is_failure(f"The following route(s) are missing from the routing table of VRF {self.inputs.vrf}: {', '.join(missing_routes)}") + self.result.is_failure(f"The following route(s) are missing from the routing table of VRF {self.inputs.vrf}: {missing_routes}") class VerifyIPv4RouteType(AntaTest): """Verifies the route-type of the IPv4 prefixes. This test performs the following checks for each IPv4 route: - - 1. Verifies that the specified VRF is configured. - 2. Verifies that the specified IPv4 route is exists in the configuration. - 3. Verifies that the the specified IPv4 route is of the expected type. + 1. Verifies that the specified VRF is configured. + 2. Verifies that the specified IPv4 route is exists in the configuration. + 3. Verifies that the the specified IPv4 route is of the expected type. Expected Results ---------------- @@ -233,17 +230,6 @@ class VerifyIPv4RouteType(AntaTest): """Input model for the VerifyIPv4RouteType test.""" routes_entries: list[IPv4Routes] - """List of IPv4 route(s).""" - - @field_validator("routes_entries") - @classmethod - def validate_routes_entries(cls, routes_entries: list[IPv4Routes]) -> list[IPv4Routes]: - """Validate that 'route_type' field is provided in each BGP route entry.""" - for entry in routes_entries: - if entry.route_type is None: - msg = f"{entry} 'route_type' field missing in the input" - raise ValueError(msg) - return routes_entries @AntaTest.anta_test def test(self) -> None: @@ -270,82 +256,3 @@ class VerifyIPv4RouteType(AntaTest): # Verifying that the specified IPv4 routes are of the expected type. if expected_route_type != (actual_route_type := route_data.get("routeType")): self.result.is_failure(f"{entry} - Incorrect route type - Expected: {expected_route_type} Actual: {actual_route_type}") - - -class VerifyIPv4RouteNextHops(AntaTest): - """Verifies the next-hops of the IPv4 prefixes. - - This test performs the following checks for each IPv4 prefix: - - 1. Verifies the specified IPv4 route exists in the routing table. - 2. For each specified next-hop: - - Verifies a path with matching next-hop exists. - - Supports `strict: True` to verify that routes must be learned exclusively via the exact next-hops specified. - - Expected Results - ---------------- - * Success: The test will pass if routes exist with paths matching the expected next-hops. - * Failure: The test will fail if: - - A route entry is not found for given IPv4 prefixes. - - A path with specified next-hop is not found. - - Examples - -------- - ```yaml - anta.tests.routing: - generic: - - VerifyIPv4RouteNextHops: - route_entries: - - prefix: 10.10.0.1/32 - vrf: default - strict: false - nexthops: - - 10.100.0.8 - - 10.100.0.10 - ``` - """ - - categories: ClassVar[list[str]] = ["routing"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip route vrf all", revision=4)] - - class Input(AntaTest.Input): - """Input model for the VerifyIPv4RouteNextHops test.""" - - route_entries: list[IPv4Routes] - """List of IPv4 route(s).""" - - @field_validator("route_entries") - @classmethod - def validate_route_entries(cls, route_entries: list[IPv4Routes]) -> list[IPv4Routes]: - """Validate that 'nexthops' field is provided in each route entry.""" - for entry in route_entries: - if entry.nexthops is None: - msg = f"{entry} 'nexthops' field missing in the input" - raise ValueError(msg) - return route_entries - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyIPv4RouteNextHops.""" - self.result.is_success() - - output = self.instance_commands[0].json_output - - for entry in self.inputs.route_entries: - # Verify if the prefix exists in route table - if (route_data := get_value(output, f"vrfs..{entry.vrf}..routes..{entry.prefix}", separator="..")) is None: - self.result.is_failure(f"{entry} - prefix not found") - continue - - # Verify the nexthop addresses - actual_nexthops = sorted(["Directly connected" if (next_hop := route.get("nexthopAddr")) == "" else next_hop for route in route_data["vias"]]) - expected_nexthops = sorted([str(nexthop) for nexthop in entry.nexthops]) - - if entry.strict and expected_nexthops != actual_nexthops: - exp_nexthops = ", ".join(expected_nexthops) - self.result.is_failure(f"{entry} - List of next-hops not matching - Expected: {exp_nexthops} Actual: {', '.join(actual_nexthops)}") - continue - - for nexthop in entry.nexthops: - if not get_item(route_data["vias"], "nexthopAddr", str(nexthop)): - self.result.is_failure(f"{entry} Nexthop: {nexthop} - Route not found") diff --git a/anta/tests/routing/isis.py b/anta/tests/routing/isis.py index 6a73e40..54a4f14 100644 --- a/anta/tests/routing/isis.py +++ b/anta/tests/routing/isis.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to IS-IS tests.""" @@ -7,23 +7,147 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from typing import Any, ClassVar +from ipaddress import IPv4Address, IPv4Network +from typing import Any, ClassVar, Literal -from pydantic import field_validator +from pydantic import BaseModel -from anta.input_models.routing.isis import Entry, InterfaceCount, InterfaceState, ISISInstance, IsisInstance, ISISInterface, Tunnel, TunnelPath +from anta.custom_types import Interface from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import get_item, get_value +from anta.tools import get_value + + +def _count_isis_neighbor(isis_neighbor_json: dict[str, Any]) -> int: + """Count the number of isis neighbors. + + Parameters + ---------- + isis_neighbor_json + The JSON output of the `show isis neighbors` command. + + Returns + ------- + int + The number of isis neighbors. + + """ + count = 0 + for vrf_data in isis_neighbor_json["vrfs"].values(): + for instance_data in vrf_data["isisInstances"].values(): + count += len(instance_data.get("neighbors", {})) + return count + + +def _get_not_full_isis_neighbors(isis_neighbor_json: dict[str, Any]) -> list[dict[str, Any]]: + """Return the isis neighbors whose adjacency state is not `up`. + + Parameters + ---------- + isis_neighbor_json + The JSON output of the `show isis neighbors` command. + + Returns + ------- + list[dict[str, Any]] + A list of isis neighbors whose adjacency state is not `UP`. + + """ + return [ + { + "vrf": vrf, + "instance": instance, + "neighbor": adjacency["hostname"], + "state": state, + } + for vrf, vrf_data in isis_neighbor_json["vrfs"].items() + for instance, instance_data in vrf_data.get("isisInstances").items() + for neighbor, neighbor_data in instance_data.get("neighbors").items() + for adjacency in neighbor_data.get("adjacencies") + if (state := adjacency["state"]) != "up" + ] + + +def _get_full_isis_neighbors(isis_neighbor_json: dict[str, Any], neighbor_state: Literal["up", "down"] = "up") -> list[dict[str, Any]]: + """Return the isis neighbors whose adjacency state is `up`. + + Parameters + ---------- + isis_neighbor_json + The JSON output of the `show isis neighbors` command. + neighbor_state + Value of the neihbor state we are looking for. Defaults to `up`. + + Returns + ------- + list[dict[str, Any]] + A list of isis neighbors whose adjacency state is not `UP`. + + """ + return [ + { + "vrf": vrf, + "instance": instance, + "neighbor": adjacency["hostname"], + "neighbor_address": adjacency["routerIdV4"], + "interface": adjacency["interfaceName"], + "state": state, + } + for vrf, vrf_data in isis_neighbor_json["vrfs"].items() + for instance, instance_data in vrf_data.get("isisInstances").items() + for neighbor, neighbor_data in instance_data.get("neighbors").items() + for adjacency in neighbor_data.get("adjacencies") + if (state := adjacency["state"]) == neighbor_state + ] + + +def _get_isis_neighbors_count(isis_neighbor_json: dict[str, Any]) -> list[dict[str, Any]]: + """Count number of IS-IS neighbor of the device.""" + return [ + {"vrf": vrf, "interface": interface, "mode": mode, "count": int(level_data["numAdjacencies"]), "level": int(level)} + for vrf, vrf_data in isis_neighbor_json["vrfs"].items() + for instance, instance_data in vrf_data.get("isisInstances").items() + for interface, interface_data in instance_data.get("interfaces").items() + for level, level_data in interface_data.get("intfLevels").items() + if (mode := level_data["passive"]) is not True + ] + + +def _get_interface_data(interface: str, vrf: str, command_output: dict[str, Any]) -> dict[str, Any] | None: + """Extract data related to an IS-IS interface for testing.""" + if (vrf_data := get_value(command_output, f"vrfs.{vrf}")) is None: + return None + + for instance_data in vrf_data.get("isisInstances").values(): + if (intf_dict := get_value(dictionary=instance_data, key="interfaces")) is not None: + try: + return next(ifl_data for ifl, ifl_data in intf_dict.items() if ifl == interface) + except StopIteration: + return None + return None + + +def _get_adjacency_segment_data_by_neighbor(neighbor: str, instance: str, vrf: str, command_output: dict[str, Any]) -> dict[str, Any] | None: + """Extract data related to an IS-IS interface for testing.""" + search_path = f"vrfs.{vrf}.isisInstances.{instance}.adjacencySegments" + if get_value(dictionary=command_output, key=search_path, default=None) is None: + return None + + isis_instance = get_value(dictionary=command_output, key=search_path, default=None) + + return next( + (segment_data for segment_data in isis_instance if neighbor == segment_data["ipAddress"]), + None, + ) class VerifyISISNeighborState(AntaTest): - """Verifies the health of IS-IS neighbors. + """Verifies all IS-IS neighbors are in UP state. Expected Results ---------------- - * Success: The test will pass if all IS-IS neighbors are in the `up` state. - * Failure: The test will fail if any IS-IS neighbor adjacency is down. - * Skipped: The test will be skipped if IS-IS is not configured or no IS-IS neighbor is found. + * Success: The test will pass if all IS-IS neighbors are in UP state. + * Failure: The test will fail if some IS-IS neighbors are not in UP state. + * Skipped: The test will be skipped if no IS-IS neighbor is found. Examples -------- @@ -31,58 +155,33 @@ class VerifyISISNeighborState(AntaTest): anta.tests.routing: isis: - VerifyISISNeighborState: - check_all_vrfs: true ``` """ categories: ClassVar[list[str]] = ["isis"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis neighbors vrf all", revision=1)] - - class Input(AntaTest.Input): - """Input model for the VerifyISISNeighborState test.""" - - check_all_vrfs: bool = False - """If enabled, verifies IS-IS instances of all VRFs.""" + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis neighbors", revision=1)] @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyISISNeighborState.""" - self.result.is_success() - - # Verify if IS-IS is configured - if not (command_output := self.instance_commands[0].json_output["vrfs"]): - self.result.is_skipped("IS-IS not configured") - return - - vrfs_to_check = command_output - if not self.inputs.check_all_vrfs: - vrfs_to_check = {"default": command_output["default"]} - - no_neighbor = True - for vrf, vrf_data in vrfs_to_check.items(): - for isis_instance, instance_data in vrf_data["isisInstances"].items(): - neighbors = instance_data["neighbors"] - if not neighbors: - continue - no_neighbor = False - interfaces = [(adj["interfaceName"], adj["state"]) for neighbor in neighbors.values() for adj in neighbor["adjacencies"] if adj["state"] != "up"] - for interface in interfaces: - self.result.is_failure( - f"Instance: {isis_instance} VRF: {vrf} Interface: {interface[0]} - Incorrect adjacency state - Expected: up Actual: {interface[1]}" - ) - - if no_neighbor: + command_output = self.instance_commands[0].json_output + if _count_isis_neighbor(command_output) == 0: self.result.is_skipped("No IS-IS neighbor detected") + return + self.result.is_success() + not_full_neighbors = _get_not_full_isis_neighbors(command_output) + if not_full_neighbors: + self.result.is_failure(f"Some neighbors are not in the correct state (UP): {not_full_neighbors}.") class VerifyISISNeighborCount(AntaTest): - """Verifies the number of IS-IS neighbors per interface and level. + """Verifies number of IS-IS neighbors per level and per interface. Expected Results ---------------- - * Success: The test will pass if all provided IS-IS interfaces have the expected number of neighbors. - * Failure: The test will fail if any of the provided IS-IS interfaces are not configured or have an incorrect number of neighbors. - * Skipped: The test will be skipped if IS-IS is not configured. + * Success: The test will pass if the number of neighbors is correct. + * Failure: The test will fail if the number of neighbors is incorrect. + * Skipped: The test will be skipped if no IS-IS neighbor is found. Examples -------- @@ -99,54 +198,59 @@ class VerifyISISNeighborCount(AntaTest): count: 1 - name: Ethernet3 count: 2 + # level is set to 2 by default ``` """ categories: ClassVar[list[str]] = ["isis"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis interface brief vrf all", revision=1)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis interface brief", revision=1)] class Input(AntaTest.Input): """Input model for the VerifyISISNeighborCount test.""" - interfaces: list[ISISInterface] - """List of IS-IS interfaces with their information.""" - InterfaceCount: ClassVar[type[InterfaceCount]] = InterfaceCount + interfaces: list[InterfaceCount] + """list of interfaces with their information.""" + + class InterfaceCount(BaseModel): + """Input model for the VerifyISISNeighborCount test.""" + + name: Interface + """Interface name to check.""" + level: int = 2 + """IS-IS level to check.""" + count: int + """Number of IS-IS neighbors.""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyISISNeighborCount.""" + command_output = self.instance_commands[0].json_output self.result.is_success() - - # Verify if IS-IS is configured - if not (command_output := self.instance_commands[0].json_output["vrfs"]): - self.result.is_skipped("IS-IS not configured") + isis_neighbor_count = _get_isis_neighbors_count(command_output) + if len(isis_neighbor_count) == 0: + self.result.is_skipped("No IS-IS neighbor detected") return - for interface in self.inputs.interfaces: - interface_detail = {} - vrf_instances = get_value(command_output, f"{interface.vrf}..isisInstances", default={}, separator="..") - for instance_data in vrf_instances.values(): - if interface_data := get_value(instance_data, f"interfaces..{interface.name}..intfLevels..{interface.level}", separator=".."): - interface_detail = interface_data - # An interface can only be configured in one IS-IS instance at a time - break - - if not interface_detail: - self.result.is_failure(f"{interface} - Not configured") + eos_data = [ifl_data for ifl_data in isis_neighbor_count if ifl_data["interface"] == interface.name and ifl_data["level"] == interface.level] + if not eos_data: + self.result.is_failure(f"No neighbor detected for interface {interface.name}") continue - - if interface_detail["passive"] is False and (act_count := interface_detail["numAdjacencies"]) != interface.count: - self.result.is_failure(f"{interface} - Neighbor count mismatch - Expected: {interface.count} Actual: {act_count}") + if eos_data[0]["count"] != interface.count: + self.result.is_failure( + f"Interface {interface.name}: " + f"expected Level {interface.level}: count {interface.count}, " + f"got Level {eos_data[0]['level']}: count {eos_data[0]['count']}" + ) class VerifyISISInterfaceMode(AntaTest): - """Verifies IS-IS interfaces are running in the correct mode. + """Verifies ISIS Interfaces are running in correct mode. Expected Results ---------------- - * Success: The test will pass if all provided IS-IS interfaces are running in the correct mode. - * Failure: The test will fail if any of the provided IS-IS interfaces are not configured or running in the incorrect mode. - * Skipped: The test will be skipped if IS-IS is not configured. + * Success: The test will pass if all listed interfaces are running in correct mode. + * Failure: The test will fail if any of the listed interfaces is not running in correct mode. + * Skipped: The test will be skipped if no ISIS neighbor is found. Examples -------- @@ -157,71 +261,80 @@ class VerifyISISInterfaceMode(AntaTest): interfaces: - name: Loopback0 mode: passive + # vrf is set to default by default - name: Ethernet2 mode: passive level: 2 + # vrf is set to default by default - name: Ethernet1 mode: point-to-point - vrf: PROD + vrf: default + # level is set to 2 by default ``` """ + description = "Verifies interface mode for IS-IS" categories: ClassVar[list[str]] = ["isis"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis interface brief vrf all", revision=1)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis interface brief", revision=1)] class Input(AntaTest.Input): - """Input model for the VerifyISISInterfaceMode test.""" + """Input model for the VerifyISISNeighborCount test.""" - interfaces: list[ISISInterface] - """List of IS-IS interfaces with their information.""" - InterfaceState: ClassVar[type[InterfaceState]] = InterfaceState + interfaces: list[InterfaceState] + """list of interfaces with their information.""" + + class InterfaceState(BaseModel): + """Input model for the VerifyISISNeighborCount test.""" + + name: Interface + """Interface name to check.""" + level: Literal[1, 2] = 2 + """ISIS level configured for interface. Default is 2.""" + mode: Literal["point-to-point", "broadcast", "passive"] + """Number of IS-IS neighbors.""" + vrf: str = "default" + """VRF where the interface should be configured""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyISISInterfaceMode.""" + command_output = self.instance_commands[0].json_output self.result.is_success() - # Verify if IS-IS is configured - if not (command_output := self.instance_commands[0].json_output["vrfs"]): - self.result.is_skipped("IS-IS not configured") + if len(command_output["vrfs"]) == 0: + self.result.is_skipped("IS-IS is not configured on device") return + # Check for p2p interfaces for interface in self.inputs.interfaces: - interface_detail = {} - vrf_instances = get_value(command_output, f"{interface.vrf}..isisInstances", default={}, separator="..") - for instance_data in vrf_instances.values(): - if interface_data := get_value(instance_data, f"interfaces..{interface.name}", separator=".."): - interface_detail = interface_data - # An interface can only be configured in one IS-IS instance at a time - break - - if not interface_detail: - self.result.is_failure(f"{interface} - Not configured") - continue - - # Check for passive - if interface.mode == "passive": - if get_value(interface_detail, f"intfLevels.{interface.level}.passive", default=False) is False: - self.result.is_failure(f"{interface} - Not running in passive mode") - - # Check for point-to-point or broadcast - elif interface.mode != (interface_type := get_value(interface_detail, "interfaceType", default="unset")): - self.result.is_failure(f"{interface} - Incorrect interface mode - Expected: {interface.mode} Actual: {interface_type}") + interface_data = _get_interface_data( + interface=interface.name, + vrf=interface.vrf, + command_output=command_output, + ) + # Check for correct VRF + if interface_data is not None: + interface_type = get_value(dictionary=interface_data, key="interfaceType", default="unset") + # Check for interfaceType + if interface.mode == "point-to-point" and interface.mode != interface_type: + self.result.is_failure(f"Interface {interface.name} in VRF {interface.vrf} is not running in {interface.mode} reporting {interface_type}") + # Check for passive + elif interface.mode == "passive": + json_path = f"intfLevels.{interface.level}.passive" + if interface_data is None or get_value(dictionary=interface_data, key=json_path, default=False) is False: + self.result.is_failure(f"Interface {interface.name} in VRF {interface.vrf} is not running in passive mode") + else: + self.result.is_failure(f"Interface {interface.name} not found in VRF {interface.vrf}") class VerifyISISSegmentRoutingAdjacencySegments(AntaTest): - """Verifies IS-IS segment routing adjacency segments. - - !!! warning "IS-IS SR Limitation" - As of EOS 4.33.1F, IS-IS SR is supported only in the default VRF. - Please refer to the IS-IS Segment Routing [documentation](https://www.arista.com/en/support/toi/eos-4-17-0f/13789-isis-segment-routing) - for more information. + """Verify that all expected Adjacency segments are correctly visible for each interface. Expected Results ---------------- - * Success: The test will pass if all provided IS-IS instances have the correct adjacency segments. - * Failure: The test will fail if any of the provided IS-IS instances have no adjacency segments or incorrect segments. - * Skipped: The test will be skipped if IS-IS is not configured. + * Success: The test will pass if all listed interfaces have correct adjacencies. + * Failure: The test will fail if any of the listed interfaces has not expected list of adjacencies. + * Skipped: The test will be skipped if no ISIS SR Adjacency is found. Examples -------- @@ -245,62 +358,91 @@ class VerifyISISSegmentRoutingAdjacencySegments(AntaTest): class Input(AntaTest.Input): """Input model for the VerifyISISSegmentRoutingAdjacencySegments test.""" - instances: list[ISISInstance] - """List of IS-IS instances with their information.""" - IsisInstance: ClassVar[type[IsisInstance]] = IsisInstance + instances: list[IsisInstance] - @field_validator("instances") - @classmethod - def validate_instances(cls, instances: list[ISISInstance]) -> list[ISISInstance]: - """Validate that 'vrf' field is 'default' in each IS-IS instance.""" - for instance in instances: - if instance.vrf != "default": - msg = f"{instance} 'vrf' field must be 'default'" - raise ValueError(msg) - return instances + class IsisInstance(BaseModel): + """ISIS Instance model definition.""" + + name: str + """ISIS instance name.""" + vrf: str = "default" + """VRF name where ISIS instance is configured.""" + segments: list[Segment] + """List of Adjacency segments configured in this instance.""" + + class Segment(BaseModel): + """Segment model definition.""" + + interface: Interface + """Interface name to check.""" + level: Literal[1, 2] = 2 + """ISIS level configured for interface. Default is 2.""" + sid_origin: Literal["dynamic"] = "dynamic" + """Adjacency type""" + address: IPv4Address + """IP address of remote end of segment.""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyISISSegmentRoutingAdjacencySegments.""" + command_output = self.instance_commands[0].json_output self.result.is_success() - # Verify if IS-IS is configured - if not (command_output := self.instance_commands[0].json_output["vrfs"]): - self.result.is_skipped("IS-IS not configured") + if len(command_output["vrfs"]) == 0: + self.result.is_skipped("IS-IS is not configured on device") return + # initiate defaults + failure_message = [] + skip_vrfs = [] + skip_instances = [] + + # Check if VRFs and instances are present in output. for instance in self.inputs.instances: - if not (act_segments := get_value(command_output, f"{instance.vrf}..isisInstances..{instance.name}..adjacencySegments", default=[], separator="..")): - self.result.is_failure(f"{instance} - No adjacency segments found") - continue + vrf_data = get_value( + dictionary=command_output, + key=f"vrfs.{instance.vrf}", + default=None, + ) + if vrf_data is None: + skip_vrfs.append(instance.vrf) + failure_message.append(f"VRF {instance.vrf} is not configured to run segment routging.") - for segment in instance.segments: - if (act_segment := get_item(act_segments, "ipAddress", str(segment.address))) is None: - self.result.is_failure(f"{instance} {segment} - Adjacency segment not found") - continue + elif get_value(dictionary=vrf_data, key=f"isisInstances.{instance.name}", default=None) is None: + skip_instances.append(instance.name) + failure_message.append(f"Instance {instance.name} is not found in vrf {instance.vrf}.") - # Check SID origin - if (act_origin := act_segment["sidOrigin"]) != segment.sid_origin: - self.result.is_failure(f"{instance} {segment} - Incorrect SID origin - Expected: {segment.sid_origin} Actual: {act_origin}") + # Check Adjacency segments + for instance in self.inputs.instances: + if instance.vrf not in skip_vrfs and instance.name not in skip_instances: + for input_segment in instance.segments: + eos_segment = _get_adjacency_segment_data_by_neighbor( + neighbor=str(input_segment.address), + instance=instance.name, + vrf=instance.vrf, + command_output=command_output, + ) + if eos_segment is None: + failure_message.append(f"Your segment has not been found: {input_segment}.") - # Check IS-IS level - if (actual_level := act_segment["level"]) != segment.level: - self.result.is_failure(f"{instance} {segment} - Incorrect IS-IS level - Expected: {segment.level} Actual: {actual_level}") + elif ( + eos_segment["localIntf"] != input_segment.interface + or eos_segment["level"] != input_segment.level + or eos_segment["sidOrigin"] != input_segment.sid_origin + ): + failure_message.append(f"Your segment is not correct: Expected: {input_segment} - Found: {eos_segment}.") + if failure_message: + self.result.is_failure("\n".join(failure_message)) class VerifyISISSegmentRoutingDataplane(AntaTest): - """Verifies IS-IS segment routing data-plane configuration. - - !!! warning "IS-IS SR Limitation" - As of EOS 4.33.1F, IS-IS SR is supported only in the default VRF. - Please refer to the IS-IS Segment Routing [documentation](https://www.arista.com/en/support/toi/eos-4-17-0f/13789-isis-segment-routing) - for more information. + """Verify dataplane of a list of ISIS-SR instances. Expected Results ---------------- - * Success: The test will pass if all provided IS-IS instances have the correct data-plane configured. - * Failure: The test will fail if any of the provided IS-IS instances have an incorrect data-plane configured. - * Skipped: The test will be skipped if IS-IS is not configured. + * Success: The test will pass if all instances have correct dataplane configured + * Failure: The test will fail if one of the instances has incorrect dataplane configured + * Skipped: The test will be skipped if ISIS is not running Examples -------- @@ -321,37 +463,57 @@ class VerifyISISSegmentRoutingDataplane(AntaTest): class Input(AntaTest.Input): """Input model for the VerifyISISSegmentRoutingDataplane test.""" - instances: list[ISISInstance] - """List of IS-IS instances with their information.""" - IsisInstance: ClassVar[type[IsisInstance]] = IsisInstance + instances: list[IsisInstance] - @field_validator("instances") - @classmethod - def validate_instances(cls, instances: list[ISISInstance]) -> list[ISISInstance]: - """Validate that 'vrf' field is 'default' in each IS-IS instance.""" - for instance in instances: - if instance.vrf != "default": - msg = f"{instance} 'vrf' field must be 'default'" - raise ValueError(msg) - return instances + class IsisInstance(BaseModel): + """ISIS Instance model definition.""" + + name: str + """ISIS instance name.""" + vrf: str = "default" + """VRF name where ISIS instance is configured.""" + dataplane: Literal["MPLS", "mpls", "unset"] = "MPLS" + """Configured dataplane for the instance.""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyISISSegmentRoutingDataplane.""" + command_output = self.instance_commands[0].json_output self.result.is_success() - # Verify if IS-IS is configured - if not (command_output := self.instance_commands[0].json_output["vrfs"]): - self.result.is_skipped("IS-IS not configured") + if len(command_output["vrfs"]) == 0: + self.result.is_skipped("IS-IS-SR is not running on device.") return - for instance in self.inputs.instances: - if not (instance_data := get_value(command_output, f"{instance.vrf}..isisInstances..{instance.name}", separator="..")): - self.result.is_failure(f"{instance} - Not configured") - continue + # initiate defaults + failure_message = [] + skip_vrfs = [] + skip_instances = [] - if instance.dataplane.upper() != (dataplane := instance_data["dataPlane"]): - self.result.is_failure(f"{instance} - Data-plane not correctly configured - Expected: {instance.dataplane.upper()} Actual: {dataplane}") + # Check if VRFs and instances are present in output. + for instance in self.inputs.instances: + vrf_data = get_value( + dictionary=command_output, + key=f"vrfs.{instance.vrf}", + default=None, + ) + if vrf_data is None: + skip_vrfs.append(instance.vrf) + failure_message.append(f"VRF {instance.vrf} is not configured to run segment routing.") + + elif get_value(dictionary=vrf_data, key=f"isisInstances.{instance.name}", default=None) is None: + skip_instances.append(instance.name) + failure_message.append(f"Instance {instance.name} is not found in vrf {instance.vrf}.") + + # Check Adjacency segments + for instance in self.inputs.instances: + if instance.vrf not in skip_vrfs and instance.name not in skip_instances: + eos_dataplane = get_value(dictionary=command_output, key=f"vrfs.{instance.vrf}.isisInstances.{instance.name}.dataPlane", default=None) + if instance.dataplane.upper() != eos_dataplane: + failure_message.append(f"ISIS instance {instance.name} is not running dataplane {instance.dataplane} ({eos_dataplane})") + + if failure_message: + self.result.is_failure("\n".join(failure_message)) class VerifyISISSegmentRoutingTunnels(AntaTest): @@ -391,9 +553,34 @@ class VerifyISISSegmentRoutingTunnels(AntaTest): class Input(AntaTest.Input): """Input model for the VerifyISISSegmentRoutingTunnels test.""" - entries: list[Tunnel] + entries: list[Entry] """List of tunnels to check on device.""" - Entry: ClassVar[type[Entry]] = Entry + + class Entry(BaseModel): + """Definition of a tunnel entry.""" + + endpoint: IPv4Network + """Endpoint IP of the tunnel.""" + vias: list[Vias] | None = None + """Optional list of path to reach endpoint.""" + + class Vias(BaseModel): + """Definition of a tunnel path.""" + + nexthop: IPv4Address | None = None + """Nexthop of the tunnel. If None, then it is not tested. Default: None""" + type: Literal["ip", "tunnel"] | None = None + """Type of the tunnel. If None, then it is not tested. Default: None""" + interface: Interface | None = None + """Interface of the tunnel. If None, then it is not tested. Default: None""" + tunnel_id: Literal["TI-LFA", "ti-lfa", "unset"] | None = None + """Computation method of the tunnel. If None, then it is not tested. Default: None""" + + def _eos_entry_lookup(self, search_value: IPv4Network, entries: dict[str, Any], search_key: str = "endpoint") -> dict[str, Any] | None: + return next( + (entry_value for entry_id, entry_value in entries.items() if str(entry_value[search_key]) == str(search_value)), + None, + ) @AntaTest.anta_test def test(self) -> None: @@ -402,43 +589,142 @@ class VerifyISISSegmentRoutingTunnels(AntaTest): This method performs the main test logic for verifying ISIS Segment Routing tunnels. It checks the command output, initiates defaults, and performs various checks on the tunnels. """ + command_output = self.instance_commands[0].json_output self.result.is_success() - command_output = self.instance_commands[0].json_output + # initiate defaults + failure_message = [] + if len(command_output["entries"]) == 0: - self.result.is_skipped("IS-IS-SR not configured") + self.result.is_skipped("IS-IS-SR is not running on device.") return for input_entry in self.inputs.entries: - entries = list(command_output["entries"].values()) - if (eos_entry := get_item(entries, "endpoint", str(input_entry.endpoint))) is None: - self.result.is_failure(f"{input_entry} - Tunnel not found") - continue - - if input_entry.vias is not None: + eos_entry = self._eos_entry_lookup(search_value=input_entry.endpoint, entries=command_output["entries"]) + if eos_entry is None: + failure_message.append(f"Tunnel to {input_entry} is not found.") + elif input_entry.vias is not None: + failure_src = [] for via_input in input_entry.vias: - via_search_result = any(self._via_matches(via_input, eos_via) for eos_via in eos_entry["vias"]) - if not via_search_result: - self.result.is_failure(f"{input_entry} {via_input} - Tunnel is incorrect") + if not self._check_tunnel_type(via_input, eos_entry): + failure_src.append("incorrect tunnel type") + if not self._check_tunnel_nexthop(via_input, eos_entry): + failure_src.append("incorrect nexthop") + if not self._check_tunnel_interface(via_input, eos_entry): + failure_src.append("incorrect interface") + if not self._check_tunnel_id(via_input, eos_entry): + failure_src.append("incorrect tunnel ID") - def _via_matches(self, via_input: TunnelPath, eos_via: dict[str, Any]) -> bool: - """Check if the via input matches the eos via. + if failure_src: + failure_message.append(f"Tunnel to {input_entry.endpoint!s} is incorrect: {', '.join(failure_src)}") + + if failure_message: + self.result.is_failure("\n".join(failure_message)) + + def _check_tunnel_type(self, via_input: VerifyISISSegmentRoutingTunnels.Input.Entry.Vias, eos_entry: dict[str, Any]) -> bool: + """Check if the tunnel type specified in `via_input` matches any of the tunnel types in `eos_entry`. Parameters ---------- - via_input : TunnelPath - The input via to check. - eos_via : dict[str, Any] - The EOS via to compare against. + via_input : VerifyISISSegmentRoutingTunnels.Input.Entry.Vias + The input tunnel type to check. + eos_entry : dict[str, Any] + The EOS entry containing the tunnel types. Returns ------- bool - True if the via input matches the eos via, False otherwise. + True if the tunnel type matches any of the tunnel types in `eos_entry`, False otherwise. """ - return ( - (via_input.type is None or via_input.type == eos_via.get("type")) - and (via_input.nexthop is None or str(via_input.nexthop) == eos_via.get("nexthop")) - and (via_input.interface is None or via_input.interface == eos_via.get("interface")) - and (via_input.tunnel_id is None or via_input.tunnel_id.upper() == get_value(eos_via, "tunnelId.type", default="").upper()) - ) + if via_input.type is not None: + return any( + via_input.type + == get_value( + dictionary=eos_via, + key="type", + default="undefined", + ) + for eos_via in eos_entry["vias"] + ) + return True + + def _check_tunnel_nexthop(self, via_input: VerifyISISSegmentRoutingTunnels.Input.Entry.Vias, eos_entry: dict[str, Any]) -> bool: + """Check if the tunnel nexthop matches the given input. + + Parameters + ---------- + via_input : VerifyISISSegmentRoutingTunnels.Input.Entry.Vias + The input via object. + eos_entry : dict[str, Any] + The EOS entry dictionary. + + Returns + ------- + bool + True if the tunnel nexthop matches, False otherwise. + """ + if via_input.nexthop is not None: + return any( + str(via_input.nexthop) + == get_value( + dictionary=eos_via, + key="nexthop", + default="undefined", + ) + for eos_via in eos_entry["vias"] + ) + return True + + def _check_tunnel_interface(self, via_input: VerifyISISSegmentRoutingTunnels.Input.Entry.Vias, eos_entry: dict[str, Any]) -> bool: + """Check if the tunnel interface exists in the given EOS entry. + + Parameters + ---------- + via_input : VerifyISISSegmentRoutingTunnels.Input.Entry.Vias + The input via object. + eos_entry : dict[str, Any] + The EOS entry dictionary. + + Returns + ------- + bool + True if the tunnel interface exists, False otherwise. + """ + if via_input.interface is not None: + return any( + via_input.interface + == get_value( + dictionary=eos_via, + key="interface", + default="undefined", + ) + for eos_via in eos_entry["vias"] + ) + return True + + def _check_tunnel_id(self, via_input: VerifyISISSegmentRoutingTunnels.Input.Entry.Vias, eos_entry: dict[str, Any]) -> bool: + """Check if the tunnel ID matches any of the tunnel IDs in the EOS entry's vias. + + Parameters + ---------- + via_input : VerifyISISSegmentRoutingTunnels.Input.Entry.Vias + The input vias to check. + eos_entry : dict[str, Any]) + The EOS entry to compare against. + + Returns + ------- + bool + True if the tunnel ID matches any of the tunnel IDs in the EOS entry's vias, False otherwise. + """ + if via_input.tunnel_id is not None: + return any( + via_input.tunnel_id.upper() + == get_value( + dictionary=eos_via, + key="tunnelId.type", + default="undefined", + ).upper() + for eos_via in eos_entry["vias"] + ) + return True diff --git a/anta/tests/routing/ospf.py b/anta/tests/routing/ospf.py index a99ac18..d5d12e2 100644 --- a/anta/tests/routing/ospf.py +++ b/anta/tests/routing/ospf.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to OSPF tests.""" @@ -7,15 +7,90 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from typing import TYPE_CHECKING, ClassVar +from typing import TYPE_CHECKING, Any, ClassVar from anta.models import AntaCommand, AntaTest -from anta.tools import get_value if TYPE_CHECKING: from anta.models import AntaTemplate +def _count_ospf_neighbor(ospf_neighbor_json: dict[str, Any]) -> int: + """Count the number of OSPF neighbors. + + Parameters + ---------- + ospf_neighbor_json + The JSON output of the `show ip ospf neighbor` command. + + Returns + ------- + int + The number of OSPF neighbors. + + """ + count = 0 + for vrf_data in ospf_neighbor_json["vrfs"].values(): + for instance_data in vrf_data["instList"].values(): + count += len(instance_data.get("ospfNeighborEntries", [])) + return count + + +def _get_not_full_ospf_neighbors(ospf_neighbor_json: dict[str, Any]) -> list[dict[str, Any]]: + """Return the OSPF neighbors whose adjacency state is not `full`. + + Parameters + ---------- + ospf_neighbor_json + The JSON output of the `show ip ospf neighbor` command. + + Returns + ------- + list[dict[str, Any]] + A list of OSPF neighbors whose adjacency state is not `full`. + + """ + return [ + { + "vrf": vrf, + "instance": instance, + "neighbor": neighbor_data["routerId"], + "state": state, + } + for vrf, vrf_data in ospf_neighbor_json["vrfs"].items() + for instance, instance_data in vrf_data["instList"].items() + for neighbor_data in instance_data.get("ospfNeighborEntries", []) + if (state := neighbor_data["adjacencyState"]) != "full" + ] + + +def _get_ospf_max_lsa_info(ospf_process_json: dict[str, Any]) -> list[dict[str, Any]]: + """Return information about OSPF instances and their LSAs. + + Parameters + ---------- + ospf_process_json + OSPF process information in JSON format. + + Returns + ------- + list[dict[str, Any]] + A list of dictionaries containing OSPF LSAs information. + + """ + return [ + { + "vrf": vrf, + "instance": instance, + "maxLsa": instance_data.get("maxLsaInformation", {}).get("maxLsa"), + "maxLsaThreshold": instance_data.get("maxLsaInformation", {}).get("maxLsaThreshold"), + "numLsa": instance_data.get("lsaInformation", {}).get("numLsa"), + } + for vrf, vrf_data in ospf_process_json.get("vrfs", {}).items() + for instance, instance_data in vrf_data.get("instList", {}).items() + ] + + class VerifyOSPFNeighborState(AntaTest): """Verifies all OSPF neighbors are in FULL state. @@ -40,29 +115,14 @@ class VerifyOSPFNeighborState(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyOSPFNeighborState.""" - self.result.is_success() - - # If OSPF is not configured on device, test skipped. - if not (command_output := get_value(self.instance_commands[0].json_output, "vrfs")): - self.result.is_skipped("OSPF not configured") + command_output = self.instance_commands[0].json_output + if _count_ospf_neighbor(command_output) == 0: + self.result.is_skipped("no OSPF neighbor found") return - - no_neighbor = True - for vrf, vrf_data in command_output.items(): - for instance, instance_data in vrf_data["instList"].items(): - neighbors = instance_data["ospfNeighborEntries"] - if not neighbors: - continue - no_neighbor = False - interfaces = [(neighbor["routerId"], state) for neighbor in neighbors if (state := neighbor["adjacencyState"]) != "full"] - for interface in interfaces: - self.result.is_failure( - f"Instance: {instance} VRF: {vrf} Interface: {interface[0]} - Incorrect adjacency state - Expected: Full Actual: {interface[1]}" - ) - - # If OSPF neighbors are not configured on device, test skipped. - if no_neighbor: - self.result.is_skipped("No OSPF neighbor detected") + self.result.is_success() + not_full_neighbors = _get_not_full_ospf_neighbors(command_output) + if not_full_neighbors: + self.result.is_failure(f"Some neighbors are not correctly configured: {not_full_neighbors}.") class VerifyOSPFNeighborCount(AntaTest): @@ -96,34 +156,20 @@ class VerifyOSPFNeighborCount(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyOSPFNeighborCount.""" + command_output = self.instance_commands[0].json_output + if (neighbor_count := _count_ospf_neighbor(command_output)) == 0: + self.result.is_skipped("no OSPF neighbor found") + return self.result.is_success() - # If OSPF is not configured on device, test skipped. - if not (command_output := get_value(self.instance_commands[0].json_output, "vrfs")): - self.result.is_skipped("OSPF not configured") - return - - no_neighbor = True - interfaces = [] - for vrf_data in command_output.values(): - for instance_data in vrf_data["instList"].values(): - neighbors = instance_data["ospfNeighborEntries"] - if not neighbors: - continue - no_neighbor = False - interfaces.extend([neighbor["routerId"] for neighbor in neighbors if neighbor["adjacencyState"] == "full"]) - - # If OSPF neighbors are not configured on device, test skipped. - if no_neighbor: - self.result.is_skipped("No OSPF neighbor detected") - return - - # If the number of OSPF neighbors expected to be in the FULL state does not match with actual one, test fails. - if len(interfaces) != self.inputs.number: - self.result.is_failure(f"Neighbor count mismatch - Expected: {self.inputs.number} Actual: {len(interfaces)}") + if neighbor_count != self.inputs.number: + self.result.is_failure(f"device has {neighbor_count} neighbors (expected {self.inputs.number})") + not_full_neighbors = _get_not_full_ospf_neighbors(command_output) + if not_full_neighbors: + self.result.is_failure(f"Some neighbors are not correctly configured: {not_full_neighbors}.") class VerifyOSPFMaxLSA(AntaTest): - """Verifies all OSPF instances did not cross the maximum LSA threshold. + """Verifies LSAs present in the OSPF link state database did not cross the maximum LSA Threshold. Expected Results ---------------- @@ -140,23 +186,23 @@ class VerifyOSPFMaxLSA(AntaTest): ``` """ + description = "Verifies all OSPF instances did not cross the maximum LSA threshold." categories: ClassVar[list[str]] = ["ospf"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip ospf", revision=1)] @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyOSPFMaxLSA.""" - self.result.is_success() - - # If OSPF is not configured on device, test skipped. - if not (command_output := get_value(self.instance_commands[0].json_output, "vrfs")): - self.result.is_skipped("OSPF not configured") + command_output = self.instance_commands[0].json_output + ospf_instance_info = _get_ospf_max_lsa_info(command_output) + if not ospf_instance_info: + self.result.is_skipped("No OSPF instance found.") return - - for vrf_data in command_output.values(): - for instance, instance_data in vrf_data.get("instList", {}).items(): - max_lsa = instance_data["maxLsaInformation"]["maxLsa"] - max_lsa_threshold = instance_data["maxLsaInformation"]["maxLsaThreshold"] - num_lsa = get_value(instance_data, "lsaInformation.numLsa") - if num_lsa > (max_lsa_threshold := round(max_lsa * (max_lsa_threshold / 100))): - self.result.is_failure(f"Instance: {instance} - Crossed the maximum LSA threshold - Expected: < {max_lsa_threshold} Actual: {num_lsa}") + all_instances_within_threshold = all(instance["numLsa"] <= instance["maxLsa"] * (instance["maxLsaThreshold"] / 100) for instance in ospf_instance_info) + if all_instances_within_threshold: + self.result.is_success() + else: + exceeded_instances = [ + instance["instance"] for instance in ospf_instance_info if instance["numLsa"] > instance["maxLsa"] * (instance["maxLsaThreshold"] / 100) + ] + self.result.is_failure(f"OSPF Instances {exceeded_instances} crossed the maximum LSA threshold.") diff --git a/anta/tests/security.py b/anta/tests/security.py index d026692..38bf240 100644 --- a/anta/tests/security.py +++ b/anta/tests/security.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the EOS various security tests.""" @@ -8,12 +8,22 @@ from __future__ import annotations # Mypy does not understand AntaTest.Input typing # mypy: disable-error-code=attr-defined from datetime import datetime, timezone -from typing import ClassVar +from typing import TYPE_CHECKING, ClassVar, get_args -from anta.custom_types import PositiveInteger -from anta.input_models.security import ACL, APISSLCertificate, IPSecPeer, IPSecPeers +from pydantic import BaseModel, Field, model_validator + +from anta.custom_types import EcdsaKeySize, EncryptionAlgorithm, PositiveInteger, RsaKeySize +from anta.input_models.security import IPSecPeer, IPSecPeers from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import get_item, get_value +from anta.tools import get_failed_logs, get_item, get_value + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 11): + from typing import Self + else: + from typing_extensions import Self class VerifySSHStatus(AntaTest): @@ -43,7 +53,7 @@ class VerifySSHStatus(AntaTest): try: line = next(line for line in command_output.split("\n") if line.startswith("SSHD status")) except StopIteration: - self.result.is_failure("Could not find SSH status in returned output") + self.result.is_failure("Could not find SSH status in returned output.") return status = line.split()[-1] @@ -86,18 +96,19 @@ class VerifySSHIPv4Acl(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySSHIPv4Acl.""" - self.result.is_success() command_output = self.instance_commands[0].json_output ipv4_acl_list = command_output["ipAclList"]["aclList"] ipv4_acl_number = len(ipv4_acl_list) if ipv4_acl_number != self.inputs.number: - self.result.is_failure(f"VRF: {self.inputs.vrf} - SSH IPv4 ACL(s) count mismatch - Expected: {self.inputs.number} Actual: {ipv4_acl_number}") + self.result.is_failure(f"Expected {self.inputs.number} SSH IPv4 ACL(s) in vrf {self.inputs.vrf} but got {ipv4_acl_number}") return not_configured_acl = [acl["name"] for acl in ipv4_acl_list if self.inputs.vrf not in acl["configuredVrfs"] or self.inputs.vrf not in acl["activeVrfs"]] if not_configured_acl: - self.result.is_failure(f"VRF: {self.inputs.vrf} - Following SSH IPv4 ACL(s) not configured or active: {', '.join(not_configured_acl)}") + self.result.is_failure(f"SSH IPv4 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl}") + else: + self.result.is_success() class VerifySSHIPv6Acl(AntaTest): @@ -133,18 +144,19 @@ class VerifySSHIPv6Acl(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySSHIPv6Acl.""" - self.result.is_success() command_output = self.instance_commands[0].json_output ipv6_acl_list = command_output["ipv6AclList"]["aclList"] ipv6_acl_number = len(ipv6_acl_list) if ipv6_acl_number != self.inputs.number: - self.result.is_failure(f"VRF: {self.inputs.vrf} - SSH IPv6 ACL(s) count mismatch - Expected: {self.inputs.number} Actual: {ipv6_acl_number}") + self.result.is_failure(f"Expected {self.inputs.number} SSH IPv6 ACL(s) in vrf {self.inputs.vrf} but got {ipv6_acl_number}") return not_configured_acl = [acl["name"] for acl in ipv6_acl_list if self.inputs.vrf not in acl["configuredVrfs"] or self.inputs.vrf not in acl["activeVrfs"]] if not_configured_acl: - self.result.is_failure(f"VRF: {self.inputs.vrf} - Following SSH IPv6 ACL(s) not configured or active: {', '.join(not_configured_acl)}") + self.result.is_failure(f"SSH IPv6 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl}") + else: + self.result.is_success() class VerifyTelnetStatus(AntaTest): @@ -206,7 +218,7 @@ class VerifyAPIHttpStatus(AntaTest): class VerifyAPIHttpsSSL(AntaTest): - """Verifies if the eAPI has a valid SSL profile. + """Verifies if eAPI HTTPS server SSL profile is configured and valid. Expected Results ---------------- @@ -222,6 +234,7 @@ class VerifyAPIHttpsSSL(AntaTest): ``` """ + description = "Verifies if the eAPI has a valid SSL profile." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management api http-commands", revision=1)] @@ -239,10 +252,10 @@ class VerifyAPIHttpsSSL(AntaTest): if command_output["sslProfile"]["name"] == self.inputs.profile and command_output["sslProfile"]["state"] == "valid": self.result.is_success() else: - self.result.is_failure(f"eAPI HTTPS server SSL profile {self.inputs.profile} is misconfigured or invalid") + self.result.is_failure(f"eAPI HTTPS server SSL profile ({self.inputs.profile}) is misconfigured or invalid") except KeyError: - self.result.is_failure(f"eAPI HTTPS server SSL profile {self.inputs.profile} is not configured") + self.result.is_failure(f"eAPI HTTPS server SSL profile ({self.inputs.profile}) is not configured") class VerifyAPIIPv4Acl(AntaTest): @@ -281,13 +294,13 @@ class VerifyAPIIPv4Acl(AntaTest): ipv4_acl_list = command_output["ipAclList"]["aclList"] ipv4_acl_number = len(ipv4_acl_list) if ipv4_acl_number != self.inputs.number: - self.result.is_failure(f"VRF: {self.inputs.vrf} - eAPI IPv4 ACL(s) count mismatch - Expected: {self.inputs.number} Actual: {ipv4_acl_number}") + self.result.is_failure(f"Expected {self.inputs.number} eAPI IPv4 ACL(s) in vrf {self.inputs.vrf} but got {ipv4_acl_number}") return not_configured_acl = [acl["name"] for acl in ipv4_acl_list if self.inputs.vrf not in acl["configuredVrfs"] or self.inputs.vrf not in acl["activeVrfs"]] if not_configured_acl: - self.result.is_failure(f"VRF: {self.inputs.vrf} - Following eAPI IPv4 ACL(s) not configured or active: {', '.join(not_configured_acl)}") + self.result.is_failure(f"eAPI IPv4 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl}") else: self.result.is_success() @@ -329,13 +342,13 @@ class VerifyAPIIPv6Acl(AntaTest): ipv6_acl_list = command_output["ipv6AclList"]["aclList"] ipv6_acl_number = len(ipv6_acl_list) if ipv6_acl_number != self.inputs.number: - self.result.is_failure(f"VRF: {self.inputs.vrf} - eAPI IPv6 ACL(s) count mismatch - Expected: {self.inputs.number} Actual: {ipv6_acl_number}") + self.result.is_failure(f"Expected {self.inputs.number} eAPI IPv6 ACL(s) in vrf {self.inputs.vrf} but got {ipv6_acl_number}") return not_configured_acl = [acl["name"] for acl in ipv6_acl_list if self.inputs.vrf not in acl["configuredVrfs"] or self.inputs.vrf not in acl["activeVrfs"]] if not_configured_acl: - self.result.is_failure(f"VRF: {self.inputs.vrf} - Following eAPI IPv6 ACL(s) not configured or active: {', '.join(not_configured_acl)}") + self.result.is_failure(f"eAPI IPv6 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl}") else: self.result.is_success() @@ -343,25 +356,12 @@ class VerifyAPIIPv6Acl(AntaTest): class VerifyAPISSLCertificate(AntaTest): """Verifies the eAPI SSL certificate expiry, common subject name, encryption algorithm and key size. - This test performs the following checks for each certificate: - - 1. Validates that the certificate is not expired and meets the configured expiry threshold. - 2. Validates that the certificate Common Name matches the expected one. - 3. Ensures the certificate uses the specified encryption algorithm. - 4. Verifies the certificate key matches the expected key size. - Expected Results ---------------- - * Success: If all of the following occur: - - The certificate's expiry date exceeds the configured threshold. - - The certificate's Common Name matches the input configuration. - - The encryption algorithm used by the certificate is as expected. - - The key size of the certificate matches the input configuration. - * Failure: If any of the following occur: - - The certificate is expired or set to expire within the defined threshold. - - The certificate's common name does not match the expected input. - - The encryption algorithm is incorrect. - - The key size does not match the expected input. + * Success: The test will pass if the certificate's expiry date is greater than the threshold, + and the certificate has the correct name, encryption algorithm, and key size. + * Failure: The test will fail if the certificate is expired or is going to expire, + or if the certificate has an incorrect name, encryption algorithm, or key size. Examples -------- @@ -393,7 +393,38 @@ class VerifyAPISSLCertificate(AntaTest): certificates: list[APISSLCertificate] """List of API SSL certificates.""" - APISSLCertificate: ClassVar[type[APISSLCertificate]] = APISSLCertificate + + class APISSLCertificate(BaseModel): + """Model for an API SSL certificate.""" + + certificate_name: str + """The name of the certificate to be verified.""" + expiry_threshold: int + """The expiry threshold of the certificate in days.""" + common_name: str + """The common subject name of the certificate.""" + encryption_algorithm: EncryptionAlgorithm + """The encryption algorithm of the certificate.""" + key_size: RsaKeySize | EcdsaKeySize + """The encryption algorithm key size of the certificate.""" + + @model_validator(mode="after") + def validate_inputs(self) -> Self: + """Validate the key size provided to the APISSLCertificates class. + + If encryption_algorithm is RSA then key_size should be in {2048, 3072, 4096}. + + If encryption_algorithm is ECDSA then key_size should be in {256, 384, 521}. + """ + if self.encryption_algorithm == "RSA" and self.key_size not in get_args(RsaKeySize): + msg = f"`{self.certificate_name}` key size {self.key_size} is invalid for RSA encryption. Allowed sizes are {get_args(RsaKeySize)}." + raise ValueError(msg) + + if self.encryption_algorithm == "ECDSA" and self.key_size not in get_args(EcdsaKeySize): + msg = f"`{self.certificate_name}` key size {self.key_size} is invalid for ECDSA encryption. Allowed sizes are {get_args(EcdsaKeySize)}." + raise ValueError(msg) + + return self @AntaTest.anta_test def test(self) -> None: @@ -411,7 +442,7 @@ class VerifyAPISSLCertificate(AntaTest): # Collecting certificate expiry time and current EOS time. # These times are used to calculate the number of days until the certificate expires. if not (certificate_data := get_value(certificate_output, f"certificates..{certificate.certificate_name}", separator="..")): - self.result.is_failure(f"{certificate} - Not found") + self.result.is_failure(f"SSL certificate '{certificate.certificate_name}', is not configured.\n") continue expiry_time = certificate_data["notAfter"] @@ -419,25 +450,24 @@ class VerifyAPISSLCertificate(AntaTest): # Verify certificate expiry if 0 < day_difference < certificate.expiry_threshold: - self.result.is_failure( - f"{certificate} - set to expire within the threshold - Threshold: {certificate.expiry_threshold} days Actual: {day_difference} days" - ) + self.result.is_failure(f"SSL certificate `{certificate.certificate_name}` is about to expire in {day_difference} days.\n") elif day_difference < 0: - self.result.is_failure(f"{certificate} - certificate expired") + self.result.is_failure(f"SSL certificate `{certificate.certificate_name}` is expired.\n") # Verify certificate common subject name, encryption algorithm and key size - common_name = get_value(certificate_data, "subject.commonName", default="Not found") - encryp_algo = get_value(certificate_data, "publicKey.encryptionAlgorithm", default="Not found") - key_size = get_value(certificate_data, "publicKey.size", default="Not found") + keys_to_verify = ["subject.commonName", "publicKey.encryptionAlgorithm", "publicKey.size"] + actual_certificate_details = {key: get_value(certificate_data, key) for key in keys_to_verify} - if common_name != certificate.common_name: - self.result.is_failure(f"{certificate} - incorrect common name - Expected: {certificate.common_name} Actual: {common_name}") + expected_certificate_details = { + "subject.commonName": certificate.common_name, + "publicKey.encryptionAlgorithm": certificate.encryption_algorithm, + "publicKey.size": certificate.key_size, + } - if encryp_algo != certificate.encryption_algorithm: - self.result.is_failure(f"{certificate} - incorrect encryption algorithm - Expected: {certificate.encryption_algorithm} Actual: {encryp_algo}") - - if key_size != certificate.key_size: - self.result.is_failure(f"{certificate} - incorrect public key - Expected: {certificate.key_size} Actual: {key_size}") + if actual_certificate_details != expected_certificate_details: + failed_log = f"SSL certificate `{certificate.certificate_name}` is not configured properly:" + failed_log += get_failed_logs(expected_certificate_details, actual_certificate_details) + self.result.is_failure(f"{failed_log}\n") class VerifyBannerLogin(AntaTest): @@ -472,15 +502,14 @@ class VerifyBannerLogin(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBannerLogin.""" - self.result.is_success() - if not (login_banner := self.instance_commands[0].json_output["loginBanner"]): - self.result.is_failure("Login banner is not configured") - return + login_banner = self.instance_commands[0].json_output["loginBanner"] # Remove leading and trailing whitespaces from each line cleaned_banner = "\n".join(line.strip() for line in self.inputs.login_banner.split("\n")) if login_banner != cleaned_banner: - self.result.is_failure(f"Incorrect login banner configured - Expected: {cleaned_banner} Actual: {login_banner}") + self.result.is_failure(f"Expected `{cleaned_banner}` as the login banner, but found `{login_banner}` instead.") + else: + self.result.is_success() class VerifyBannerMotd(AntaTest): @@ -515,34 +544,23 @@ class VerifyBannerMotd(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBannerMotd.""" - self.result.is_success() - if not (motd_banner := self.instance_commands[0].json_output["motd"]): - self.result.is_failure("MOTD banner is not configured") - return + motd_banner = self.instance_commands[0].json_output["motd"] # Remove leading and trailing whitespaces from each line cleaned_banner = "\n".join(line.strip() for line in self.inputs.motd_banner.split("\n")) if motd_banner != cleaned_banner: - self.result.is_failure(f"Incorrect MOTD banner configured - Expected: {cleaned_banner} Actual: {motd_banner}") + self.result.is_failure(f"Expected `{cleaned_banner}` as the motd banner, but found `{motd_banner}` instead.") + else: + self.result.is_success() class VerifyIPv4ACL(AntaTest): """Verifies the configuration of IPv4 ACLs. - This test performs the following checks for each IPv4 ACL: - - 1. Validates that the IPv4 ACL is properly configured. - 2. Validates that the sequence entries in the ACL are correctly ordered. - Expected Results ---------------- - * Success: If all of the following occur: - - Any IPv4 ACL entry is not configured. - - The sequency entries are correctly configured. - * Failure: If any of the following occur: - - The IPv4 ACL is not configured. - - The any IPv4 ACL entry is not configured. - - The action for any entry does not match the expected input. + * Success: The test will pass if an IPv4 ACL is configured with the correct sequence entries. + * Failure: The test will fail if an IPv4 ACL is not configured or entries are not in sequence. Examples -------- @@ -568,37 +586,65 @@ class VerifyIPv4ACL(AntaTest): """ categories: ClassVar[list[str]] = ["security"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip access-lists", revision=1)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip access-lists {acl}", revision=1)] class Input(AntaTest.Input): """Input model for the VerifyIPv4ACL test.""" - ipv4_access_lists: list[ACL] + ipv4_access_lists: list[IPv4ACL] """List of IPv4 ACLs to verify.""" - IPv4ACL: ClassVar[type[ACL]] = ACL - """To maintain backward compatibility.""" + + class IPv4ACL(BaseModel): + """Model for an IPv4 ACL.""" + + name: str + """Name of IPv4 ACL.""" + + entries: list[IPv4ACLEntry] + """List of IPv4 ACL entries.""" + + class IPv4ACLEntry(BaseModel): + """Model for an IPv4 ACL entry.""" + + sequence: int = Field(ge=1, le=4294967295) + """Sequence number of an ACL entry.""" + action: str + """Action of an ACL entry.""" + + def render(self, template: AntaTemplate) -> list[AntaCommand]: + """Render the template for each input ACL.""" + return [template.render(acl=acl.name) for acl in self.inputs.ipv4_access_lists] @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyIPv4ACL.""" self.result.is_success() + for command_output, acl in zip(self.instance_commands, self.inputs.ipv4_access_lists): + # Collecting input ACL details + acl_name = command_output.params.acl + # Retrieve the expected entries from the inputs + acl_entries = acl.entries - if not (command_output := self.instance_commands[0].json_output["aclList"]): - self.result.is_failure("No Access Control List (ACL) configured") - return - - for access_list in self.inputs.ipv4_access_lists: - if not (access_list_output := get_item(command_output, "name", access_list.name)): - self.result.is_failure(f"{access_list} - Not configured") + # Check if ACL is configured + ipv4_acl_list = command_output.json_output["aclList"] + if not ipv4_acl_list: + self.result.is_failure(f"{acl_name}: Not found") continue - for entry in access_list.entries: - if not (actual_entry := get_item(access_list_output["sequence"], "sequenceNumber", entry.sequence)): - self.result.is_failure(f"{access_list} {entry} - Not configured") + # Check if the sequence number is configured and has the correct action applied + failed_log = f"{acl_name}:\n" + for acl_entry in acl_entries: + acl_seq = acl_entry.sequence + acl_action = acl_entry.action + if (actual_entry := get_item(ipv4_acl_list[0]["sequence"], "sequenceNumber", acl_seq)) is None: + failed_log += f"Sequence number `{acl_seq}` is not found.\n" continue - if (act_action := actual_entry["text"]) != entry.action: - self.result.is_failure(f"{access_list} {entry} - action mismatch - Expected: {entry.action} Actual: {act_action}") + if actual_entry["text"] != acl_action: + failed_log += f"Expected `{acl_action}` as sequence number {acl_seq} action but found `{actual_entry['text']}` instead.\n" + + if failed_log != f"{acl_name}:\n": + self.result.is_failure(f"{failed_log}") class VerifyIPSecConnHealth(AntaTest): @@ -624,11 +670,12 @@ class VerifyIPSecConnHealth(AntaTest): def test(self) -> None: """Main test function for VerifyIPSecConnHealth.""" self.result.is_success() + failure_conn = [] command_output = self.instance_commands[0].json_output["connections"] # Check if IP security connection is configured if not command_output: - self.result.is_failure("No IPv4 security connection configured") + self.result.is_failure("No IPv4 security connection configured.") return # Iterate over all ipsec connections @@ -638,7 +685,10 @@ class VerifyIPSecConnHealth(AntaTest): source = conn_data.get("saddr") destination = conn_data.get("daddr") vrf = conn_data.get("tunnelNs") - self.result.is_failure(f"Source: {source} Destination: {destination} VRF: {vrf} - IPv4 security connection not established") + failure_conn.append(f"source:{source} destination:{destination} vrf:{vrf}") + if failure_conn: + failure_msg = "\n".join(failure_conn) + self.result.is_failure(f"The following IPv4 security connections are not established:\n{failure_msg}.") class VerifySpecificIPSecConn(AntaTest): @@ -713,7 +763,9 @@ class VerifySpecificIPSecConn(AntaTest): if state != "Established": source = conn_data.get("saddr") destination = conn_data.get("daddr") - self.result.is_failure(f"{input_peer} Source: {source} Destination: {destination} - Connection down - Expected: Established Actual: {state}") + self.result.is_failure( + f"{input_peer} Source: {source} Destination: {destination} - Connection down - Expected: Established, Actual: {state}" + ) continue # Create a dictionary of existing connections for faster lookup @@ -728,7 +780,7 @@ class VerifySpecificIPSecConn(AntaTest): if (source_input, destination_input, vrf) in existing_connections: existing_state = existing_connections[(source_input, destination_input, vrf)] if existing_state != "Established": - failure = f"Expected: Established Actual: {existing_state}" + failure = f"Expected: Established, Actual: {existing_state}" self.result.is_failure(f"{input_peer} Source: {source_input} Destination: {destination_input} - Connection down - {failure}") else: self.result.is_failure(f"{input_peer} Source: {source_input} Destination: {destination_input} - Connection not found.") @@ -760,6 +812,6 @@ class VerifyHardwareEntropy(AntaTest): # Check if hardware entropy generation is enabled. if not command_output.get("hardwareEntropyEnabled"): - self.result.is_failure("Hardware entropy generation is disabled") + self.result.is_failure("Hardware entropy generation is disabled.") else: self.result.is_success() diff --git a/anta/tests/services.py b/anta/tests/services.py index a2b09da..dab1b3a 100644 --- a/anta/tests/services.py +++ b/anta/tests/services.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the EOS various services tests.""" @@ -9,9 +9,12 @@ from __future__ import annotations # mypy: disable-error-code=attr-defined from typing import ClassVar -from anta.input_models.services import DnsServer, ErrDisableReason, ErrdisableRecovery +from pydantic import BaseModel + +from anta.custom_types import ErrDisableInterval, ErrDisableReasons +from anta.input_models.services import DnsServer from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import get_dict_superset, get_item +from anta.tools import get_dict_superset, get_failed_logs class VerifyHostname(AntaTest): @@ -46,7 +49,7 @@ class VerifyHostname(AntaTest): hostname = self.instance_commands[0].json_output["hostname"] if hostname != self.inputs.hostname: - self.result.is_failure(f"Incorrect Hostname - Expected: {self.inputs.hostname} Actual: {hostname}") + self.result.is_failure(f"Expected `{self.inputs.hostname}` as the hostname, but found `{hostname}` instead.") else: self.result.is_success() @@ -163,24 +166,12 @@ class VerifyDNSServers(AntaTest): class VerifyErrdisableRecovery(AntaTest): - """Verifies the error disable recovery functionality. - - This test performs the following checks for each specified error disable reason: - - 1. Verifying if the specified error disable reason exists. - 2. Checking if the recovery timer status matches the expected enabled/disabled state. - 3. Validating that the timer interval matches the configured value. + """Verifies the errdisable recovery reason, status, and interval. Expected Results ---------------- - * Success: The test will pass if: - - The specified error disable reason exists. - - The recovery timer status matches the expected state. - - The timer interval matches the configured value. - * Failure: The test will fail if: - - The specified error disable reason does not exist. - - The recovery timer status does not match the expected state. - - The timer interval does not match the configured value. + * Success: The test will pass if the errdisable recovery reason status is enabled and the interval matches the input. + * Failure: The test will fail if the errdisable recovery reason is not found, the status is not enabled, or the interval does not match the input. Examples -------- @@ -190,10 +181,8 @@ class VerifyErrdisableRecovery(AntaTest): reasons: - reason: acl interval: 30 - status: Enabled - reason: bpduguard interval: 30 - status: Enabled ``` """ @@ -204,35 +193,44 @@ class VerifyErrdisableRecovery(AntaTest): class Input(AntaTest.Input): """Input model for the VerifyErrdisableRecovery test.""" - reasons: list[ErrdisableRecovery] + reasons: list[ErrDisableReason] """List of errdisable reasons.""" - ErrDisableReason: ClassVar[type[ErrdisableRecovery]] = ErrDisableReason + + class ErrDisableReason(BaseModel): + """Model for an errdisable reason.""" + + reason: ErrDisableReasons + """Type or name of the errdisable reason.""" + interval: ErrDisableInterval + """Interval of the reason in seconds.""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyErrdisableRecovery.""" + command_output = self.instance_commands[0].text_output self.result.is_success() - - # Skip header and last empty line - command_output = self.instance_commands[0].text_output.split("\n")[2:-1] - - # Collecting the actual errdisable reasons for faster lookup - errdisable_reasons = [ - {"reason": reason, "status": status, "interval": interval} - for line in command_output - if line.strip() # Skip empty lines - for reason, status, interval in [line.split(None, 2)] # Unpack split result - ] - for error_reason in self.inputs.reasons: - if not (reason_output := get_item(errdisable_reasons, "reason", error_reason.reason)): - self.result.is_failure(f"{error_reason} - Not found") - continue + input_reason = error_reason.reason + input_interval = error_reason.interval + reason_found = False - if not all( - [ - error_reason.status == (act_status := reason_output["status"]), - error_reason.interval == (act_interval := int(reason_output["interval"])), - ] - ): - self.result.is_failure(f"{error_reason} - Incorrect configuration - Status: {act_status} Interval: {act_interval}") + # Skip header and last empty line + lines = command_output.split("\n")[2:-1] + for line in lines: + # Skip empty lines + if not line.strip(): + continue + # Split by first two whitespaces + reason, status, interval = line.split(None, 2) + if reason != input_reason: + continue + reason_found = True + actual_reason_data = {"interval": interval, "status": status} + expected_reason_data = {"interval": str(input_interval), "status": "Enabled"} + if actual_reason_data != expected_reason_data: + failed_log = get_failed_logs(expected_reason_data, actual_reason_data) + self.result.is_failure(f"`{input_reason}`:{failed_log}\n") + break + + if not reason_found: + self.result.is_failure(f"`{input_reason}`: Not found.\n") diff --git a/anta/tests/snmp.py b/anta/tests/snmp.py index 1d02252..b8bd73d 100644 --- a/anta/tests/snmp.py +++ b/anta/tests/snmp.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the EOS various SNMP tests.""" @@ -9,10 +9,7 @@ from __future__ import annotations from typing import TYPE_CHECKING, ClassVar, get_args -from pydantic import field_validator - from anta.custom_types import PositiveInteger, SnmpErrorCounter, SnmpPdu -from anta.input_models.snmp import SnmpGroup, SnmpHost, SnmpSourceInterface, SnmpUser from anta.models import AntaCommand, AntaTest from anta.tools import get_value @@ -21,7 +18,7 @@ if TYPE_CHECKING: class VerifySnmpStatus(AntaTest): - """Verifies if the SNMP agent is enabled. + """Verifies whether the SNMP agent is enabled in a specified VRF. Expected Results ---------------- @@ -37,6 +34,7 @@ class VerifySnmpStatus(AntaTest): ``` """ + description = "Verifies if the SNMP agent is enabled." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)] @@ -49,14 +47,15 @@ class VerifySnmpStatus(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySnmpStatus.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - if not (command_output["enabled"] and self.inputs.vrf in command_output["vrfs"]["snmpVrfs"]): - self.result.is_failure(f"VRF: {self.inputs.vrf} - SNMP agent disabled") + if command_output["enabled"] and self.inputs.vrf in command_output["vrfs"]["snmpVrfs"]: + self.result.is_success() + else: + self.result.is_failure(f"SNMP agent disabled in vrf {self.inputs.vrf}") class VerifySnmpIPv4Acl(AntaTest): - """Verifies if the SNMP agent has IPv4 ACL(s) configured. + """Verifies if the SNMP agent has the right number IPv4 ACL(s) configured for a specified VRF. Expected Results ---------------- @@ -73,6 +72,7 @@ class VerifySnmpIPv4Acl(AntaTest): ``` """ + description = "Verifies if the SNMP agent has IPv4 ACL(s) configured." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp ipv4 access-list summary", revision=1)] @@ -87,22 +87,23 @@ class VerifySnmpIPv4Acl(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySnmpIPv4Acl.""" - self.result.is_success() command_output = self.instance_commands[0].json_output ipv4_acl_list = command_output["ipAclList"]["aclList"] ipv4_acl_number = len(ipv4_acl_list) if ipv4_acl_number != self.inputs.number: - self.result.is_failure(f"VRF: {self.inputs.vrf} - Incorrect SNMP IPv4 ACL(s) - Expected: {self.inputs.number} Actual: {ipv4_acl_number}") + self.result.is_failure(f"Expected {self.inputs.number} SNMP IPv4 ACL(s) in vrf {self.inputs.vrf} but got {ipv4_acl_number}") return not_configured_acl = [acl["name"] for acl in ipv4_acl_list if self.inputs.vrf not in acl["configuredVrfs"] or self.inputs.vrf not in acl["activeVrfs"]] if not_configured_acl: - self.result.is_failure(f"VRF: {self.inputs.vrf} - Following SNMP IPv4 ACL(s) not configured or active: {', '.join(not_configured_acl)}") + self.result.is_failure(f"SNMP IPv4 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl}") + else: + self.result.is_success() class VerifySnmpIPv6Acl(AntaTest): - """Verifies if the SNMP agent has IPv6 ACL(s) configured. + """Verifies if the SNMP agent has the right number IPv6 ACL(s) configured for a specified VRF. Expected Results ---------------- @@ -119,6 +120,7 @@ class VerifySnmpIPv6Acl(AntaTest): ``` """ + description = "Verifies if the SNMP agent has IPv6 ACL(s) configured." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp ipv6 access-list summary", revision=1)] @@ -134,17 +136,18 @@ class VerifySnmpIPv6Acl(AntaTest): def test(self) -> None: """Main test function for VerifySnmpIPv6Acl.""" command_output = self.instance_commands[0].json_output - self.result.is_success() ipv6_acl_list = command_output["ipv6AclList"]["aclList"] ipv6_acl_number = len(ipv6_acl_list) if ipv6_acl_number != self.inputs.number: - self.result.is_failure(f"VRF: {self.inputs.vrf} - Incorrect SNMP IPv6 ACL(s) - Expected: {self.inputs.number} Actual: {ipv6_acl_number}") + self.result.is_failure(f"Expected {self.inputs.number} SNMP IPv6 ACL(s) in vrf {self.inputs.vrf} but got {ipv6_acl_number}") return acl_not_configured = [acl["name"] for acl in ipv6_acl_list if self.inputs.vrf not in acl["configuredVrfs"] or self.inputs.vrf not in acl["activeVrfs"]] if acl_not_configured: - self.result.is_failure(f"VRF: {self.inputs.vrf} - Following SNMP IPv6 ACL(s) not configured or active: {', '.join(acl_not_configured)}") + self.result.is_failure(f"SNMP IPv6 ACL(s) not configured or active in vrf {self.inputs.vrf}: {acl_not_configured}") + else: + self.result.is_success() class VerifySnmpLocation(AntaTest): @@ -176,15 +179,16 @@ class VerifySnmpLocation(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySnmpLocation.""" - self.result.is_success() # Verifies the SNMP location is configured. if not (location := get_value(self.instance_commands[0].json_output, "location.location")): - self.result.is_failure("SNMP location is not configured") + self.result.is_failure("SNMP location is not configured.") return # Verifies the expected SNMP location. if location != self.inputs.location: - self.result.is_failure(f"Incorrect SNMP location - Expected: {self.inputs.location} Actual: {location}") + self.result.is_failure(f"Expected `{self.inputs.location}` as the location, but found `{location}` instead.") + else: + self.result.is_success() class VerifySnmpContact(AntaTest): @@ -216,15 +220,16 @@ class VerifySnmpContact(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySnmpContact.""" - self.result.is_success() # Verifies the SNMP contact is configured. if not (contact := get_value(self.instance_commands[0].json_output, "contact.contact")): - self.result.is_failure("SNMP contact is not configured") + self.result.is_failure("SNMP contact is not configured.") return # Verifies the expected SNMP contact. if contact != self.inputs.contact: - self.result.is_failure(f"Incorrect SNMP contact - Expected: {self.inputs.contact} Actual: {contact}") + self.result.is_failure(f"Expected `{self.inputs.contact}` as the contact, but found `{contact}` instead.") + else: + self.result.is_success() class VerifySnmpPDUCounters(AntaTest): @@ -261,24 +266,25 @@ class VerifySnmpPDUCounters(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySnmpPDUCounters.""" - self.result.is_success() snmp_pdus = self.inputs.pdus command_output = self.instance_commands[0].json_output # Verify SNMP PDU counters. if not (pdu_counters := get_value(command_output, "counters")): - self.result.is_failure("SNMP counters not found") + self.result.is_failure("SNMP counters not found.") return # In case SNMP PDUs not provided, It will check all the update error counters. if not snmp_pdus: snmp_pdus = list(get_args(SnmpPdu)) - failures = {pdu for pdu in snmp_pdus if (value := pdu_counters.get(pdu, "Not Found")) == "Not Found" or value == 0} + failures = {pdu: value for pdu in snmp_pdus if (value := pdu_counters.get(pdu, "Not Found")) == "Not Found" or value == 0} # Check if any failures - if failures: - self.result.is_failure(f"The following SNMP PDU counters are not found or have zero PDU counters: {', '.join(sorted(failures))}") + if not failures: + self.result.is_success() + else: + self.result.is_failure(f"The following SNMP PDU counters are not found or have zero PDU counters:\n{failures}") class VerifySnmpErrorCounters(AntaTest): @@ -314,7 +320,6 @@ class VerifySnmpErrorCounters(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySnmpErrorCounters.""" - self.result.is_success() error_counters = self.inputs.error_counters command_output = self.instance_commands[0].json_output @@ -327,400 +332,10 @@ class VerifySnmpErrorCounters(AntaTest): if not error_counters: error_counters = list(get_args(SnmpErrorCounter)) - error_counters_not_ok = {counter for counter in error_counters if snmp_counters.get(counter)} + error_counters_not_ok = {counter: value for counter in error_counters if (value := snmp_counters.get(counter))} # Check if any failures - if error_counters_not_ok: - self.result.is_failure(f"The following SNMP error counters are not found or have non-zero error counters: {', '.join(sorted(error_counters_not_ok))}") - - -class VerifySnmpHostLogging(AntaTest): - """Verifies SNMP logging configurations. - - This test performs the following checks: - - 1. SNMP logging is enabled globally. - 2. For each specified SNMP host: - - Host exists in configuration. - - Host's VRF assignment matches expected value. - - Expected Results - ---------------- - * Success: The test will pass if all of the following conditions are met: - - SNMP logging is enabled on the device. - - All specified hosts are configured with correct VRF assignments. - * Failure: The test will fail if any of the following conditions is met: - - SNMP logging is disabled on the device. - - SNMP host not found in configuration. - - Host's VRF assignment doesn't match expected value. - - Examples - -------- - ```yaml - anta.tests.snmp: - - VerifySnmpHostLogging: - hosts: - - hostname: 192.168.1.100 - vrf: default - - hostname: 192.168.1.103 - vrf: MGMT - ``` - """ - - categories: ClassVar[list[str]] = ["snmp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)] - - class Input(AntaTest.Input): - """Input model for the VerifySnmpHostLogging test.""" - - hosts: list[SnmpHost] - """List of SNMP hosts.""" - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifySnmpHostLogging.""" - self.result.is_success() - - command_output = self.instance_commands[0].json_output.get("logging", {}) - # If SNMP logging is disabled, test fails. - if not command_output.get("loggingEnabled"): - self.result.is_failure("SNMP logging is disabled") - return - - host_details = command_output.get("hosts", {}) - - for host in self.inputs.hosts: - hostname = str(host.hostname) - vrf = host.vrf - actual_snmp_host = host_details.get(hostname, {}) - - # If SNMP host is not configured on the device, test fails. - if not actual_snmp_host: - self.result.is_failure(f"{host} - Not configured") - continue - - # If VRF is not matches the expected value, test fails. - actual_vrf = "default" if (vrf_name := actual_snmp_host.get("vrf")) == "" else vrf_name - if actual_vrf != vrf: - self.result.is_failure(f"{host} - Incorrect VRF - Actual: {actual_vrf}") - - -class VerifySnmpUser(AntaTest): - """Verifies the SNMP user configurations. - - This test performs the following checks for each specified user: - - 1. User exists in SNMP configuration. - 2. Group assignment is correct. - 3. For SNMPv3 users only: - - Authentication type matches (if specified) - - Privacy type matches (if specified) - - Expected Results - ---------------- - * Success: If all of the following conditions are met: - - All users exist with correct group assignments. - - SNMPv3 authentication and privacy types match specified values. - * Failure: If any of the following occur: - - User not found in SNMP configuration. - - Incorrect group assignment. - - For SNMPv3: Mismatched authentication or privacy types. - - Examples - -------- - ```yaml - anta.tests.snmp: - - VerifySnmpUser: - snmp_users: - - username: test - group_name: test_group - version: v3 - auth_type: MD5 - priv_type: AES-128 - ``` - """ - - categories: ClassVar[list[str]] = ["snmp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp user", revision=1)] - - class Input(AntaTest.Input): - """Input model for the VerifySnmpUser test.""" - - snmp_users: list[SnmpUser] - """List of SNMP users.""" - - @field_validator("snmp_users") - @classmethod - def validate_snmp_users(cls, snmp_users: list[SnmpUser]) -> list[SnmpUser]: - """Validate that 'auth_type' or 'priv_type' field is provided in each SNMPv3 user.""" - for user in snmp_users: - if user.version == "v3" and not (user.auth_type or user.priv_type): - msg = f"{user} 'auth_type' or 'priv_type' field is required with 'version: v3'" - raise ValueError(msg) - return snmp_users - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifySnmpUser.""" - self.result.is_success() - - for user in self.inputs.snmp_users: - # Verify SNMP user details. - if not (user_details := get_value(self.instance_commands[0].json_output, f"usersByVersion.{user.version}.users.{user.username}")): - self.result.is_failure(f"{user} - Not found") - continue - - if user.group_name != (act_group := user_details.get("groupName", "Not Found")): - self.result.is_failure(f"{user} - Incorrect user group - Actual: {act_group}") - - if user.version == "v3": - if user.auth_type and (act_auth_type := get_value(user_details, "v3Params.authType", "Not Found")) != user.auth_type: - self.result.is_failure(f"{user} - Incorrect authentication type - Expected: {user.auth_type} Actual: {act_auth_type}") - - if user.priv_type and (act_encryption := get_value(user_details, "v3Params.privType", "Not Found")) != user.priv_type: - self.result.is_failure(f"{user} - Incorrect privacy type - Expected: {user.priv_type} Actual: {act_encryption}") - - -class VerifySnmpNotificationHost(AntaTest): - """Verifies the SNMP notification host(s) (SNMP manager) configurations. - - This test performs the following checks for each specified host: - - 1. Verifies that the SNMP host(s) is configured on the device. - 2. Verifies that the notification type ("trap" or "inform") matches the expected value. - 3. Ensures that UDP port provided matches the expected value. - 4. Ensures the following depending on SNMP version: - - For SNMP version v1/v2c, a valid community string is set and matches the expected value. - - For SNMP version v3, a valid user field is set and matches the expected value. - - Expected Results - ---------------- - * Success: The test will pass if all of the following conditions are met: - - The SNMP host(s) is configured on the device. - - The notification type ("trap" or "inform") and UDP port match the expected value. - - Ensures the following depending on SNMP version: - - For SNMP version v1/v2c, a community string is set and it matches the expected value. - - For SNMP version v3, a valid user field is set and matches the expected value. - * Failure: The test will fail if any of the following conditions is met: - - The SNMP host(s) is not configured on the device. - - The notification type ("trap" or "inform") or UDP port do not matches the expected value. - - Ensures the following depending on SNMP version: - - For SNMP version v1/v2c, a community string is not matches the expected value. - - For SNMP version v3, an user field is not matches the expected value. - - Examples - -------- - ```yaml - anta.tests.snmp: - - VerifySnmpNotificationHost: - notification_hosts: - - hostname: spine - vrf: default - notification_type: trap - version: v1 - udp_port: 162 - community_string: public - - hostname: 192.168.1.100 - vrf: default - notification_type: trap - version: v3 - udp_port: 162 - user: public - ``` - """ - - categories: ClassVar[list[str]] = ["snmp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp notification host", revision=1)] - - class Input(AntaTest.Input): - """Input model for the VerifySnmpNotificationHost test.""" - - notification_hosts: list[SnmpHost] - """List of SNMP host(s).""" - - @field_validator("notification_hosts") - @classmethod - def validate_notification_hosts(cls, notification_hosts: list[SnmpHost]) -> list[SnmpHost]: - """Validate that all required fields are provided in each SNMP Notification Host.""" - for host in notification_hosts: - if host.version is None: - msg = f"{host}; 'version' field missing in the input" - raise ValueError(msg) - if host.version in ["v1", "v2c"] and host.community_string is None: - msg = f"{host} Version: {host.version}; 'community_string' field missing in the input" - raise ValueError(msg) - if host.version == "v3" and host.user is None: - msg = f"{host} Version: {host.version}; 'user' field missing in the input" - raise ValueError(msg) - return notification_hosts - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifySnmpNotificationHost.""" - self.result.is_success() - - # If SNMP is not configured, test fails. - if not (snmp_hosts := get_value(self.instance_commands[0].json_output, "hosts")): - self.result.is_failure("No SNMP host is configured") - return - - for host in self.inputs.notification_hosts: - vrf = "" if host.vrf == "default" else host.vrf - hostname = str(host.hostname) - notification_type = host.notification_type - version = host.version - udp_port = host.udp_port - community_string = host.community_string - user = host.user - default_value = "Not Found" - - host_details = next( - (host for host in snmp_hosts if (host.get("hostname") == hostname and host.get("protocolVersion") == version and host.get("vrf") == vrf)), None - ) - # If expected SNMP host is not configured with the specified protocol version, test fails. - if not host_details: - self.result.is_failure(f"{host} Version: {version} - Not configured") - continue - - # If actual notification type does not match the expected value, test fails. - if notification_type != (actual_notification_type := get_value(host_details, "notificationType", default_value)): - self.result.is_failure(f"{host} - Incorrect notification type - Expected: {notification_type} Actual: {actual_notification_type}") - - # If actual UDP port does not match the expected value, test fails. - if udp_port != (actual_udp_port := get_value(host_details, "port", default_value)): - self.result.is_failure(f"{host} - Incorrect UDP port - Expected: {udp_port} Actual: {actual_udp_port}") - - user_found = user != (actual_user := get_value(host_details, "v3Params.user", default_value)) - version_user_check = (version == "v3", user_found) - - # If SNMP protocol version is v1 or v2c and actual community string does not match the expected value, test fails. - if version in ["v1", "v2c"] and community_string != (actual_community_string := get_value(host_details, "v1v2cParams.communityString", default_value)): - self.result.is_failure(f"{host} Version: {version} - Incorrect community string - Expected: {community_string} Actual: {actual_community_string}") - - # If SNMP protocol version is v3 and actual user does not match the expected value, test fails. - elif all(version_user_check): - self.result.is_failure(f"{host} Version: {version} - Incorrect user - Expected: {user} Actual: {actual_user}") - - -class VerifySnmpSourceInterface(AntaTest): - """Verifies SNMP source interfaces. - - This test performs the following checks: - - 1. Verifies that source interface(s) are configured for SNMP. - 2. For each specified source interface: - - Interface is configured in the specified VRF. - - Expected Results - ---------------- - * Success: The test will pass if the provided SNMP source interface(s) are configured in their specified VRF. - * Failure: The test will fail if any of the provided SNMP source interface(s) are NOT configured in their specified VRF. - - Examples - -------- - ```yaml - anta.tests.snmp: - - VerifySnmpSourceInterface: - interfaces: - - interface: Ethernet1 - vrf: default - - interface: Management0 - vrf: MGMT - ``` - """ - - categories: ClassVar[list[str]] = ["snmp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)] - - class Input(AntaTest.Input): - """Input model for the VerifySnmpSourceInterface test.""" - - interfaces: list[SnmpSourceInterface] - """List of source interfaces.""" - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifySnmpSourceInterface.""" - self.result.is_success() - command_output = self.instance_commands[0].json_output.get("srcIntf", {}) - - if not (interface_output := command_output.get("sourceInterfaces")): - self.result.is_failure("SNMP source interface(s) not configured") - return - - for interface_details in self.inputs.interfaces: - # If the source interface is not configured, or if it does not match the expected value, the test fails. - if not (actual_interface := interface_output.get(interface_details.vrf)): - self.result.is_failure(f"{interface_details} - Not configured") - elif actual_interface != interface_details.interface: - self.result.is_failure(f"{interface_details} - Incorrect source interface - Actual: {actual_interface}") - - -class VerifySnmpGroup(AntaTest): - """Verifies the SNMP group configurations for specified version(s). - - This test performs the following checks: - - 1. Verifies that the SNMP group is configured for the specified version. - 2. For SNMP version 3, verify that the security model matches the expected value. - 3. Ensures that SNMP group configurations, including read, write, and notify views, align with version-specific requirements. - - Expected Results - ---------------- - * Success: The test will pass if the provided SNMP group and all specified parameters are correctly configured. - * Failure: The test will fail if the provided SNMP group is not configured or if any specified parameter is not correctly configured. - - Examples - -------- - ```yaml - anta.tests.snmp: - - VerifySnmpGroup: - snmp_groups: - - group_name: Group1 - version: v1 - read_view: group_read_1 - write_view: group_write_1 - notify_view: group_notify_1 - - group_name: Group2 - version: v3 - read_view: group_read_2 - write_view: group_write_2 - notify_view: group_notify_2 - authentication: priv - ``` - """ - - categories: ClassVar[list[str]] = ["snmp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp group", revision=1)] - - class Input(AntaTest.Input): - """Input model for the VerifySnmpGroup test.""" - - snmp_groups: list[SnmpGroup] - """List of SNMP groups.""" - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifySnmpGroup.""" - self.result.is_success() - for group in self.inputs.snmp_groups: - # Verify SNMP group details. - if not (group_details := get_value(self.instance_commands[0].json_output, f"groups.{group.group_name}.versions.{group.version}")): - self.result.is_failure(f"{group} - Not configured") - continue - - view_types = [view_type for view_type in ["read", "write", "notify"] if getattr(group, f"{view_type}_view")] - # Verify SNMP views, the read, write and notify settings aligning with version-specific requirements. - for view_type in view_types: - expected_view = getattr(group, f"{view_type}_view") - # Verify actual view is configured. - if group_details.get(f"{view_type}View") == "": - self.result.is_failure(f"{group} View: {view_type} - Not configured") - elif (act_view := group_details.get(f"{view_type}View")) != expected_view: - self.result.is_failure(f"{group} - Incorrect {view_type.title()} view - Expected: {expected_view} Actual: {act_view}") - elif not group_details.get(f"{view_type}ViewConfig"): - self.result.is_failure(f"{group} {view_type.title()} View: {expected_view} - Not configured") - - # For version v3, verify that the security model aligns with the expected value. - if group.version == "v3" and (actual_auth := group_details.get("secModel")) != group.authentication: - self.result.is_failure(f"{group} - Incorrect security model - Expected: {group.authentication} Actual: {actual_auth}") + if not error_counters_not_ok: + self.result.is_success() + else: + self.result.is_failure(f"The following SNMP error counters are not found or have non-zero error counters:\n{error_counters_not_ok}") diff --git a/anta/tests/software.py b/anta/tests/software.py index 8251760..9a41881 100644 --- a/anta/tests/software.py +++ b/anta/tests/software.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to the EOS software tests.""" @@ -16,7 +16,7 @@ if TYPE_CHECKING: class VerifyEOSVersion(AntaTest): - """Verifies the EOS version of the device. + """Verifies that the device is running one of the allowed EOS version. Expected Results ---------------- @@ -34,6 +34,7 @@ class VerifyEOSVersion(AntaTest): ``` """ + description = "Verifies the EOS version of the device." categories: ClassVar[list[str]] = ["software"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show version", revision=1)] @@ -47,13 +48,14 @@ class VerifyEOSVersion(AntaTest): def test(self) -> None: """Main test function for VerifyEOSVersion.""" command_output = self.instance_commands[0].json_output - self.result.is_success() - if command_output["version"] not in self.inputs.versions: - self.result.is_failure(f"EOS version mismatch - Actual: {command_output['version']} not in Expected: {', '.join(self.inputs.versions)}") + if command_output["version"] in self.inputs.versions: + self.result.is_success() + else: + self.result.is_failure(f'device is running version "{command_output["version"]}" not in expected versions: {self.inputs.versions}') class VerifyTerminAttrVersion(AntaTest): - """Verifies the TerminAttr version of the device. + """Verifies that he device is running one of the allowed TerminAttr version. Expected Results ---------------- @@ -71,6 +73,7 @@ class VerifyTerminAttrVersion(AntaTest): ``` """ + description = "Verifies the TerminAttr version of the device." categories: ClassVar[list[str]] = ["software"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show version detail", revision=1)] @@ -84,10 +87,11 @@ class VerifyTerminAttrVersion(AntaTest): def test(self) -> None: """Main test function for VerifyTerminAttrVersion.""" command_output = self.instance_commands[0].json_output - self.result.is_success() command_output_data = command_output["details"]["packages"]["TerminAttr-core"]["version"] - if command_output_data not in self.inputs.versions: - self.result.is_failure(f"TerminAttr version mismatch - Actual: {command_output_data} not in Expected: {', '.join(self.inputs.versions)}") + if command_output_data in self.inputs.versions: + self.result.is_success() + else: + self.result.is_failure(f"device is running TerminAttr version {command_output_data} and is not in the allowed list: {self.inputs.versions}") class VerifyEOSExtensions(AntaTest): @@ -116,7 +120,6 @@ class VerifyEOSExtensions(AntaTest): def test(self) -> None: """Main test function for VerifyEOSExtensions.""" boot_extensions = [] - self.result.is_success() show_extensions_command_output = self.instance_commands[0].json_output show_boot_extensions_command_output = self.instance_commands[1].json_output installed_extensions = [ @@ -128,7 +131,7 @@ class VerifyEOSExtensions(AntaTest): boot_extensions.append(formatted_extension) installed_extensions.sort() boot_extensions.sort() - if installed_extensions != boot_extensions: - str_installed_extensions = ", ".join(installed_extensions) if installed_extensions else "Not found" - str_boot_extensions = ", ".join(boot_extensions) if boot_extensions else "Not found" - self.result.is_failure(f"EOS extensions mismatch - Installed: {str_installed_extensions} Configured: {str_boot_extensions}") + if installed_extensions == boot_extensions: + self.result.is_success() + else: + self.result.is_failure(f"Missing EOS extensions: installed {installed_extensions} / configured: {boot_extensions}") diff --git a/anta/tests/stp.py b/anta/tests/stp.py index 47dfb9f..93a0d2e 100644 --- a/anta/tests/stp.py +++ b/anta/tests/stp.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to various Spanning Tree Protocol (STP) tests.""" @@ -7,7 +7,7 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from typing import ClassVar, Literal +from typing import Any, ClassVar, Literal from pydantic import Field @@ -54,7 +54,8 @@ class VerifySTPMode(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySTPMode.""" - self.result.is_success() + not_configured = [] + wrong_stp_mode = [] for command in self.instance_commands: vlan_id = command.params.vlan if not ( @@ -63,9 +64,15 @@ class VerifySTPMode(AntaTest): f"spanningTreeVlanInstances.{vlan_id}.spanningTreeVlanInstance.protocol", ) ): - self.result.is_failure(f"VLAN {vlan_id} STP mode: {self.inputs.mode} - Not configured") + not_configured.append(vlan_id) elif stp_mode != self.inputs.mode: - self.result.is_failure(f"VLAN {vlan_id} - Incorrect STP mode - Expected: {self.inputs.mode} Actual: {stp_mode}") + wrong_stp_mode.append(vlan_id) + if not_configured: + self.result.is_failure(f"STP mode '{self.inputs.mode}' not configured for the following VLAN(s): {not_configured}") + if wrong_stp_mode: + self.result.is_failure(f"Wrong STP mode configured for the following VLAN(s): {wrong_stp_mode}") + if not not_configured and not wrong_stp_mode: + self.result.is_success() class VerifySTPBlockedPorts(AntaTest): @@ -95,8 +102,8 @@ class VerifySTPBlockedPorts(AntaTest): self.result.is_success() else: for key, value in stp_instances.items(): - stp_block_ports = value.get("spanningTreeBlockedPorts") - self.result.is_failure(f"STP Instance: {key} - Blocked ports - {', '.join(stp_block_ports)}") + stp_instances[key] = value.pop("spanningTreeBlockedPorts") + self.result.is_failure(f"The following ports are blocked by STP: {stp_instances}") class VerifySTPCounters(AntaTest): @@ -121,14 +128,14 @@ class VerifySTPCounters(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySTPCounters.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - - for interface, counters in command_output["interfaces"].items(): - if counters["bpduTaggedError"] != 0: - self.result.is_failure(f"Interface {interface} - STP BPDU packet tagged errors count mismatch - Expected: 0 Actual: {counters['bpduTaggedError']}") - if counters["bpduOtherError"] != 0: - self.result.is_failure(f"Interface {interface} - STP BPDU packet other errors count mismatch - Expected: 0 Actual: {counters['bpduOtherError']}") + interfaces_with_errors = [ + interface for interface, counters in command_output["interfaces"].items() if counters["bpduTaggedError"] or counters["bpduOtherError"] != 0 + ] + if interfaces_with_errors: + self.result.is_failure(f"The following interfaces have STP BPDU packet errors: {interfaces_with_errors}") + else: + self.result.is_success() class VerifySTPForwardingPorts(AntaTest): @@ -167,22 +174,25 @@ class VerifySTPForwardingPorts(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySTPForwardingPorts.""" - self.result.is_success() - interfaces_state = [] + not_configured = [] + not_forwarding = [] for command in self.instance_commands: vlan_id = command.params.vlan if not (topologies := get_value(command.json_output, "topologies")): - self.result.is_failure(f"VLAN {vlan_id} - STP instance is not configured") - continue - for value in topologies.values(): - if vlan_id and int(vlan_id) in value["vlans"]: - interfaces_state = [ - (interface, actual_state) for interface, state in value["interfaces"].items() if (actual_state := state["state"]) != "forwarding" - ] - - if interfaces_state: - for interface, state in interfaces_state: - self.result.is_failure(f"VLAN {vlan_id} Interface: {interface} - Invalid state - Expected: forwarding Actual: {state}") + not_configured.append(vlan_id) + else: + interfaces_not_forwarding = [] + for value in topologies.values(): + if vlan_id and int(vlan_id) in value["vlans"]: + interfaces_not_forwarding = [interface for interface, state in value["interfaces"].items() if state["state"] != "forwarding"] + if interfaces_not_forwarding: + not_forwarding.append({f"VLAN {vlan_id}": interfaces_not_forwarding}) + if not_configured: + self.result.is_failure(f"STP instance is not configured for the following VLAN(s): {not_configured}") + if not_forwarding: + self.result.is_failure(f"The following VLAN(s) have interface(s) that are not in a forwarding state: {not_forwarding}") + if not not_configured and not interfaces_not_forwarding: + self.result.is_success() class VerifySTPRootPriority(AntaTest): @@ -219,7 +229,6 @@ class VerifySTPRootPriority(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifySTPRootPriority.""" - self.result.is_success() command_output = self.instance_commands[0].json_output if not (stp_instances := command_output["instances"]): self.result.is_failure("No STP instances configured") @@ -231,15 +240,16 @@ class VerifySTPRootPriority(AntaTest): elif first_name.startswith("VL"): prefix = "VL" else: - self.result.is_failure(f"STP Instance: {first_name} - Unsupported STP instance type") + self.result.is_failure(f"Unsupported STP instance type: {first_name}") return check_instances = [f"{prefix}{instance_id}" for instance_id in self.inputs.instances] if self.inputs.instances else command_output["instances"].keys() - for instance in check_instances: - if not (instance_details := get_value(command_output, f"instances.{instance}")): - self.result.is_failure(f"Instance: {instance} - Not configured") - continue - if (priority := get_value(instance_details, "rootBridge.priority")) != self.inputs.priority: - self.result.is_failure(f"STP Instance: {instance} - Incorrect root priority - Expected: {self.inputs.priority} Actual: {priority}") + wrong_priority_instances = [ + instance for instance in check_instances if get_value(command_output, f"instances.{instance}.rootBridge.priority") != self.inputs.priority + ] + if wrong_priority_instances: + self.result.is_failure(f"The following instance(s) have the wrong STP root priority configured: {wrong_priority_instances}") + else: + self.result.is_success() class VerifyStpTopologyChanges(AntaTest): @@ -272,7 +282,8 @@ class VerifyStpTopologyChanges(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyStpTopologyChanges.""" - self.result.is_success() + failures: dict[str, Any] = {"topologies": {}} + command_output = self.instance_commands[0].json_output stp_topologies = command_output.get("topologies", {}) @@ -281,78 +292,20 @@ class VerifyStpTopologyChanges(AntaTest): # Verify the STP topology(s). if not stp_topologies: - self.result.is_failure("STP is not configured") + self.result.is_failure("STP is not configured.") return # Verifies the number of changes across all interfaces for topology, topology_details in stp_topologies.items(): - for interface, details in topology_details.get("interfaces", {}).items(): - if (num_of_changes := details.get("numChanges")) > self.inputs.threshold: - self.result.is_failure( - f"Topology: {topology} Interface: {interface} - Number of changes not within the threshold - Expected: " - f"{self.inputs.threshold} Actual: {num_of_changes}" - ) + interfaces = { + interface: {"Number of changes": num_of_changes} + for interface, details in topology_details.get("interfaces", {}).items() + if (num_of_changes := details.get("numChanges")) > self.inputs.threshold + } + if interfaces: + failures["topologies"][topology] = interfaces - -class VerifySTPDisabledVlans(AntaTest): - """Verifies the STP disabled VLAN(s). - - This test performs the following checks: - - 1. Verifies that the STP is configured. - 2. Verifies that the specified VLAN(s) exist on the device. - 3. Verifies that the STP is disabled for the specified VLAN(s). - - Expected Results - ---------------- - * Success: The test will pass if all of the following conditions are met: - - STP is properly configured on the device. - - The specified VLAN(s) exist on the device. - - STP is confirmed to be disabled for all the specified VLAN(s). - * Failure: The test will fail if any of the following condition is met: - - STP is not configured on the device. - - The specified VLAN(s) do not exist on the device. - - STP is enabled for any of the specified VLAN(s). - - Examples - -------- - ```yaml - anta.tests.stp: - - VerifySTPDisabledVlans: - vlans: - - 6 - - 4094 - ``` - """ - - categories: ClassVar[list[str]] = ["stp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show spanning-tree vlan detail", revision=1)] - - class Input(AntaTest.Input): - """Input model for the VerifySTPDisabledVlans test.""" - - vlans: list[Vlan] - """List of STP disabled VLAN(s).""" - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifySTPDisabledVlans.""" - self.result.is_success() - - command_output = self.instance_commands[0].json_output - stp_vlan_instances = command_output.get("spanningTreeVlanInstances", {}) - - # If the spanningTreeVlanInstances detail are not found in the command output, the test fails. - if not stp_vlan_instances: - self.result.is_failure("STP is not configured") - return - - actual_vlans = list(stp_vlan_instances) - # If the specified VLAN is not present on the device, STP is enabled for the VLAN(s), test fails. - for vlan in self.inputs.vlans: - if str(vlan) not in actual_vlans: - self.result.is_failure(f"VLAN: {vlan} - Not configured") - continue - - if stp_vlan_instances.get(str(vlan)): - self.result.is_failure(f"VLAN: {vlan} - STP is enabled") + if failures["topologies"]: + self.result.is_failure(f"The following STP topologies are not configured or number of changes not within the threshold:\n{failures}") + else: + self.result.is_success() diff --git a/anta/tests/stun.py b/anta/tests/stun.py index da8c281..2be13c4 100644 --- a/anta/tests/stun.py +++ b/anta/tests/stun.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Test functions related to various STUN settings.""" @@ -76,7 +76,7 @@ class VerifyStunClientTranslation(AntaTest): # If no bindings are found for the STUN client, mark the test as a failure and continue with the next client if not bindings: - self.result.is_failure(f"{client_input} - STUN client translation not found") + self.result.is_failure(f"{client_input} - STUN client translation not found.") continue # Extract the transaction ID from the bindings @@ -145,10 +145,10 @@ class VerifyStunServer(AntaTest): not_running = command_output.get("pid") == 0 if status_disabled and not_running: - self.result.is_failure("STUN server status is disabled and not running") + self.result.is_failure("STUN server status is disabled and not running.") elif status_disabled: - self.result.is_failure("STUN server status is disabled") + self.result.is_failure("STUN server status is disabled.") elif not_running: - self.result.is_failure("STUN server is not running") + self.result.is_failure("STUN server is not running.") else: self.result.is_success() diff --git a/anta/tests/system.py b/anta/tests/system.py index 048f987..cceced6 100644 --- a/anta/tests/system.py +++ b/anta/tests/system.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to system-level features and protocols tests.""" @@ -8,33 +8,23 @@ from __future__ import annotations import re -from typing import TYPE_CHECKING, Any, ClassVar +from typing import TYPE_CHECKING, ClassVar -from pydantic import model_validator - -from anta.custom_types import Hostname, PositiveInteger -from anta.input_models.system import NTPPool, NTPServer +from anta.custom_types import PositiveInteger +from anta.input_models.system import NTPServer from anta.models import AntaCommand, AntaTest from anta.tools import get_value if TYPE_CHECKING: - import sys - from ipaddress import IPv4Address - from anta.models import AntaTemplate - if sys.version_info >= (3, 11): - from typing import Self - else: - from typing_extensions import Self - CPU_IDLE_THRESHOLD = 25 MEMORY_THRESHOLD = 0.25 DISK_SPACE_THRESHOLD = 75 class VerifyUptime(AntaTest): - """Verifies the device uptime. + """Verifies if the device uptime is higher than the provided minimum uptime value. Expected Results ---------------- @@ -50,6 +40,7 @@ class VerifyUptime(AntaTest): ``` """ + description = "Verifies the device uptime." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show uptime", revision=1)] @@ -62,10 +53,11 @@ class VerifyUptime(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyUptime.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - if command_output["upTime"] < self.inputs.minimum: - self.result.is_failure(f"Device uptime is incorrect - Expected: {self.inputs.minimum}s Actual: {command_output['upTime']}s") + if command_output["upTime"] > self.inputs.minimum: + self.result.is_success() + else: + self.result.is_failure(f"Device uptime is {command_output['upTime']} seconds") class VerifyReloadCause(AntaTest): @@ -104,11 +96,11 @@ class VerifyReloadCause(AntaTest): ]: self.result.is_success() else: - self.result.is_failure(f"Reload cause is: {command_output_data}") + self.result.is_failure(f"Reload cause is: '{command_output_data}'") class VerifyCoredump(AntaTest): - """Verifies there are no core dump files. + """Verifies if there are core dump files in the /var/core directory. Expected Results ---------------- @@ -127,6 +119,7 @@ class VerifyCoredump(AntaTest): ``` """ + description = "Verifies there are no core dump files." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show system coredump", revision=1)] @@ -140,7 +133,7 @@ class VerifyCoredump(AntaTest): if not core_files: self.result.is_success() else: - self.result.is_failure(f"Core dump(s) have been found: {', '.join(core_files)}") + self.result.is_failure(f"Core dump(s) have been found: {core_files}") class VerifyAgentLogs(AntaTest): @@ -196,11 +189,12 @@ class VerifyCPUUtilization(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyCPUUtilization.""" - self.result.is_success() command_output = self.instance_commands[0].json_output command_output_data = command_output["cpuInfo"]["%Cpu(s)"]["idle"] - if command_output_data < CPU_IDLE_THRESHOLD: - self.result.is_failure(f"Device has reported a high CPU utilization - Expected: < 75% Actual: {100 - command_output_data}%") + if command_output_data > CPU_IDLE_THRESHOLD: + self.result.is_success() + else: + self.result.is_failure(f"Device has reported a high CPU utilization: {100 - command_output_data}%") class VerifyMemoryUtilization(AntaTest): @@ -225,11 +219,12 @@ class VerifyMemoryUtilization(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyMemoryUtilization.""" - self.result.is_success() command_output = self.instance_commands[0].json_output memory_usage = command_output["memFree"] / command_output["memTotal"] - if memory_usage < MEMORY_THRESHOLD: - self.result.is_failure(f"Device has reported a high memory usage - Expected: < 75% Actual: {(1 - memory_usage) * 100:.2f}%") + if memory_usage > MEMORY_THRESHOLD: + self.result.is_success() + else: + self.result.is_failure(f"Device has reported a high memory usage: {(1 - memory_usage)*100:.2f}%") class VerifyFileSystemUtilization(AntaTest): @@ -258,11 +253,11 @@ class VerifyFileSystemUtilization(AntaTest): self.result.is_success() for line in command_output.split("\n")[1:]: if "loop" not in line and len(line) > 0 and (percentage := int(line.split()[4].replace("%", ""))) > DISK_SPACE_THRESHOLD: - self.result.is_failure(f"Mount point: {line} - Higher disk space utilization - Expected: {DISK_SPACE_THRESHOLD}% Actual: {percentage}%") + self.result.is_failure(f"Mount point {line} is higher than 75%: reported {percentage}%") class VerifyNTP(AntaTest): - """Verifies if NTP is synchronised. + """Verifies that the Network Time Protocol (NTP) is synchronized. Expected Results ---------------- @@ -277,6 +272,7 @@ class VerifyNTP(AntaTest): ``` """ + description = "Verifies if NTP is synchronised." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ntp status", ofmt="text")] @@ -288,27 +284,18 @@ class VerifyNTP(AntaTest): self.result.is_success() else: data = command_output.split("\n")[0] - self.result.is_failure(f"NTP status mismatch - Expected: synchronised Actual: {data}") + self.result.is_failure(f"The device is not synchronized with the configured NTP server(s): '{data}'") class VerifyNTPAssociations(AntaTest): """Verifies the Network Time Protocol (NTP) associations. - This test performs the following checks: - - 1. For the NTP servers: - - The primary NTP server (marked as preferred) has the condition 'sys.peer'. - - All other NTP servers have the condition 'candidate'. - - All the NTP servers have the expected stratum level. - 2. For the NTP servers pool: - - All the NTP servers belong to the specified NTP pool. - - All the NTP servers have valid condition (sys.peer | candidate). - - All the NTP servers have the stratum level within the specified startum level. - Expected Results ---------------- - * Success: The test will pass if all the NTP servers meet the expected state. - * Failure: The test will fail if any of the NTP server does not meet the expected state. + * Success: The test will pass if the Primary NTP server (marked as preferred) has the condition 'sys.peer' and + all other NTP servers have the condition 'candidate'. + * Failure: The test will fail if the Primary NTP server (marked as preferred) does not have the condition 'sys.peer' or + if any other NTP server does not have the condition 'candidate'. Examples -------- @@ -323,10 +310,6 @@ class VerifyNTPAssociations(AntaTest): stratum: 2 - server_address: 3.3.3.3 stratum: 2 - - VerifyNTPAssociations: - ntp_pool: - server_addresses: [1.1.1.1, 2.2.2.2] - preferred_stratum_range: [1,3] ``` """ @@ -336,79 +319,10 @@ class VerifyNTPAssociations(AntaTest): class Input(AntaTest.Input): """Input model for the VerifyNTPAssociations test.""" - ntp_servers: list[NTPServer] | None = None + ntp_servers: list[NTPServer] """List of NTP servers.""" - ntp_pool: NTPPool | None = None - """NTP servers pool.""" NTPServer: ClassVar[type[NTPServer]] = NTPServer - @model_validator(mode="after") - def validate_inputs(self) -> Self: - """Validate the inputs provided to the VerifyNTPAssociations test. - - Either `ntp_servers` or `ntp_pool` can be provided at the same time. - """ - if not self.ntp_servers and not self.ntp_pool: - msg = "'ntp_servers' or 'ntp_pool' must be provided" - raise ValueError(msg) - if self.ntp_servers and self.ntp_pool: - msg = "Either 'ntp_servers' or 'ntp_pool' can be provided at the same time" - raise ValueError(msg) - - # Verifies the len of preferred_stratum_range in NTP Pool should be 2 as this is the range. - stratum_range = 2 - if self.ntp_pool and len(self.ntp_pool.preferred_stratum_range) > stratum_range: - msg = "'preferred_stratum_range' list should have at most 2 items" - raise ValueError(msg) - return self - - def _validate_ntp_server(self, ntp_server: NTPServer, peers: dict[str, Any]) -> list[str]: - """Validate the NTP server, condition and stratum level.""" - failure_msgs: list[str] = [] - server_address = str(ntp_server.server_address) - - # We check `peerIpAddr` in the peer details - covering IPv4Address input, or the peer key - covering Hostname input. - matching_peer = next((peer for peer, peer_details in peers.items() if (server_address in {peer_details["peerIpAddr"], peer})), None) - - if not matching_peer: - failure_msgs.append(f"{ntp_server} - Not configured") - return failure_msgs - - # Collecting the expected/actual NTP peer details. - exp_condition = "sys.peer" if ntp_server.preferred else "candidate" - exp_stratum = ntp_server.stratum - act_condition = get_value(peers[matching_peer], "condition") - act_stratum = get_value(peers[matching_peer], "stratumLevel") - - if act_condition != exp_condition: - failure_msgs.append(f"{ntp_server} - Incorrect condition - Expected: {exp_condition} Actual: {act_condition}") - - if act_stratum != exp_stratum: - failure_msgs.append(f"{ntp_server} - Incorrect stratum level - Expected: {exp_stratum} Actual: {act_stratum}") - - return failure_msgs - - def _validate_ntp_pool(self, server_addresses: list[Hostname | IPv4Address], peer: str, stratum_range: list[int], peer_details: dict[str, Any]) -> list[str]: - """Validate the NTP server pool, condition and stratum level.""" - failure_msgs: list[str] = [] - - # We check `peerIpAddr` and `peer` in the peer details - covering server_addresses input - if (peer_ip := peer_details["peerIpAddr"]) not in server_addresses and peer not in server_addresses: - failure_msgs.append(f"NTP Server: {peer_ip} Hostname: {peer} - Associated but not part of the provided NTP pool") - return failure_msgs - - act_condition = get_value(peer_details, "condition") - act_stratum = get_value(peer_details, "stratumLevel") - - if act_condition not in ["sys.peer", "candidate"]: - failure_msgs.append(f"NTP Server: {peer_ip} Hostname: {peer} - Incorrect condition - Expected: sys.peer, candidate Actual: {act_condition}") - - if int(act_stratum) not in range(stratum_range[0], stratum_range[1] + 1): - msg = f"Expected Stratum Range: {stratum_range[0]} to {stratum_range[1]} Actual: {act_stratum}" - failure_msgs.append(f"NTP Server: {peer_ip} Hostname: {peer} - Incorrect stratum level - {msg}") - - return failure_msgs - @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyNTPAssociations.""" @@ -418,66 +332,22 @@ class VerifyNTPAssociations(AntaTest): self.result.is_failure("No NTP peers configured") return - if self.inputs.ntp_servers: - # Iterate over each NTP server. - for ntp_server in self.inputs.ntp_servers: - failure_msgs = self._validate_ntp_server(ntp_server, peers) - for msg in failure_msgs: - self.result.is_failure(msg) - return + # Iterate over each NTP server. + for ntp_server in self.inputs.ntp_servers: + server_address = str(ntp_server.server_address) - # Verifies the NTP pool details - server_addresses = self.inputs.ntp_pool.server_addresses - exp_stratum_range = self.inputs.ntp_pool.preferred_stratum_range - for peer, peer_details in peers.items(): - failure_msgs = self._validate_ntp_pool(server_addresses, peer, exp_stratum_range, peer_details) - for msg in failure_msgs: - self.result.is_failure(msg) + # We check `peerIpAddr` in the peer details - covering IPv4Address input, or the peer key - covering Hostname input. + matching_peer = next((peer for peer, peer_details in peers.items() if (server_address in {peer_details["peerIpAddr"], peer})), None) + if not matching_peer: + self.result.is_failure(f"{ntp_server} - Not configured") + continue -class VerifyMaintenance(AntaTest): - """Verifies that the device is not currently under or entering maintenance. + # Collecting the expected/actual NTP peer details. + exp_condition = "sys.peer" if ntp_server.preferred else "candidate" + exp_stratum = ntp_server.stratum + act_condition = get_value(peers[matching_peer], "condition") + act_stratum = get_value(peers[matching_peer], "stratumLevel") - Expected Results - ---------------- - * Success: The test will pass if the device is not under or entering maintenance. - * Failure: The test will fail if the device is under or entering maintenance. - - Examples - -------- - ```yaml - anta.tests.system: - - VerifyMaintenance: - ``` - """ - - categories: ClassVar[list[str]] = ["Maintenance"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show maintenance", revision=1)] - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyMaintenance.""" - self.result.is_success() - - # If units is not empty we have to examine the output for details. - if not (units := get_value(self.instance_commands[0].json_output, "units")): - return - units_under_maintenance = [unit for unit, info in units.items() if info["state"] == "underMaintenance"] - units_entering_maintenance = [unit for unit, info in units.items() if info["state"] == "maintenanceModeEnter"] - causes = set() - # Iterate over units, check for units under or entering maintenance, and examine the causes. - for info in units.values(): - if info["adminState"] == "underMaintenance": - causes.add("Quiesce is configured") - if info["onBootMaintenance"]: - causes.add("On-boot maintenance is configured") - if info["intfsViolatingTrafficThreshold"]: - causes.add("Interface traffic threshold violation") - - # Building the error message. - if units_under_maintenance: - self.result.is_failure(f"Units under maintenance: '{', '.join(units_under_maintenance)}'.") - if units_entering_maintenance: - self.result.is_failure(f"Units entering maintenance: '{', '.join(units_entering_maintenance)}'.") - if causes: - self.result.is_failure(f"Possible causes: '{', '.join(sorted(causes))}'.") + if act_condition != exp_condition or act_stratum != exp_stratum: + self.result.is_failure(f"{ntp_server} - Bad association - Condition: {act_condition}, Stratum: {act_stratum}") diff --git a/anta/tests/vlan.py b/anta/tests/vlan.py index 25fc9d5..b7b1bd4 100644 --- a/anta/tests/vlan.py +++ b/anta/tests/vlan.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to VLAN tests.""" @@ -9,9 +9,9 @@ from __future__ import annotations from typing import TYPE_CHECKING, ClassVar, Literal -from anta.custom_types import DynamicVlanSource, Vlan +from anta.custom_types import Vlan from anta.models import AntaCommand, AntaTest -from anta.tools import get_value +from anta.tools import get_failed_logs, get_value if TYPE_CHECKING: from anta.models import AntaTemplate @@ -55,93 +55,16 @@ class VerifyVlanInternalPolicy(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyVlanInternalPolicy.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - if (policy := self.inputs.policy) != (act_policy := get_value(command_output, "policy")): - self.result.is_failure(f"Incorrect VLAN internal allocation policy configured - Expected: {policy} Actual: {act_policy}") - return + keys_to_verify = ["policy", "startVlanId", "endVlanId"] + actual_policy_output = {key: get_value(command_output, key) for key in keys_to_verify} + expected_policy_output = {"policy": self.inputs.policy, "startVlanId": self.inputs.start_vlan_id, "endVlanId": self.inputs.end_vlan_id} - if (start_vlan_id := self.inputs.start_vlan_id) != (act_vlan_id := get_value(command_output, "startVlanId")): - self.result.is_failure( - f"VLAN internal allocation policy: {self.inputs.policy} - Incorrect start VLAN id configured - Expected: {start_vlan_id} Actual: {act_vlan_id}" - ) - - if (end_vlan_id := self.inputs.end_vlan_id) != (act_vlan_id := get_value(command_output, "endVlanId")): - self.result.is_failure( - f"VLAN internal allocation policy: {self.inputs.policy} - Incorrect end VLAN id configured - Expected: {end_vlan_id} Actual: {act_vlan_id}" - ) - - -class VerifyDynamicVlanSource(AntaTest): - """Verifies dynamic VLAN allocation for specified VLAN sources. - - This test performs the following checks for each specified VLAN source: - - 1. Validates source exists in dynamic VLAN table. - 2. Verifies at least one VLAN is allocated to the source. - 3. When strict mode is enabled (`strict: true`), ensures no other sources have VLANs allocated. - - Expected Results - ---------------- - * Success: The test will pass if all of the following conditions are met: - - Each specified source exists in dynamic VLAN table. - - Each specified source has at least one VLAN allocated. - - In strict mode: No other sources have VLANs allocated. - * Failure: The test will fail if any of the following conditions is met: - - Specified source not found in configuration. - - Source exists but has no VLANs allocated. - - In strict mode: Non-specified sources have VLANs allocated. - - Examples - -------- - ```yaml - anta.tests.vlan: - - VerifyDynamicVlanSource: - sources: - - evpn - - mlagsync - strict: False - ``` - """ - - categories: ClassVar[list[str]] = ["vlan"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show vlan dynamic", revision=1)] - - class Input(AntaTest.Input): - """Input model for the VerifyDynamicVlanSource test.""" - - sources: list[DynamicVlanSource] - """The dynamic VLAN source list.""" - strict: bool = False - """If True, only specified sources are allowed to have VLANs allocated. Default is False.""" - - @AntaTest.anta_test - def test(self) -> None: - """Main test function for VerifyDynamicVlanSource.""" - self.result.is_success() - command_output = self.instance_commands[0].json_output - dynamic_vlans = command_output.get("dynamicVlans", {}) - - # Get all configured sources and sources with VLANs allocated - configured_sources = set(dynamic_vlans.keys()) - sources_with_vlans = {source for source, data in dynamic_vlans.items() if data.get("vlanIds")} - expected_sources = set(self.inputs.sources) - - # Check if all specified sources exist in configuration - missing_sources = expected_sources - configured_sources - if missing_sources: - self.result.is_failure(f"Dynamic VLAN source(s) not found in configuration: {', '.join(sorted(missing_sources))}") - return - - # Check if configured sources have VLANs allocated - sources_without_vlans = expected_sources - sources_with_vlans - if sources_without_vlans: - self.result.is_failure(f"Dynamic VLAN source(s) exist but have no VLANs allocated: {', '.join(sorted(sources_without_vlans))}") - return - - # In strict mode, verify no other sources have VLANs allocated - if self.inputs.strict: - unexpected_sources = sources_with_vlans - expected_sources - if unexpected_sources: - self.result.is_failure(f"Strict mode enabled: Unexpected sources have VLANs allocated: {', '.join(sorted(unexpected_sources))}") + # Check if the actual output matches the expected output + if actual_policy_output != expected_policy_output: + failed_log = "The VLAN internal allocation policy is not configured properly:" + failed_log += get_failed_logs(expected_policy_output, actual_policy_output) + self.result.is_failure(failed_log) + else: + self.result.is_success() diff --git a/anta/tests/vxlan.py b/anta/tests/vxlan.py index 04c3994..e5f0a54 100644 --- a/anta/tests/vxlan.py +++ b/anta/tests/vxlan.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Module related to VXLAN tests.""" @@ -21,7 +21,7 @@ if TYPE_CHECKING: class VerifyVxlan1Interface(AntaTest): - """Verifies the Vxlan1 interface status. + """Verifies if the Vxlan1 interface is configured and 'up/up'. Warnings -------- @@ -41,26 +41,26 @@ class VerifyVxlan1Interface(AntaTest): ``` """ + description = "Verifies the Vxlan1 interface status." categories: ClassVar[list[str]] = ["vxlan"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces description", revision=1)] @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyVxlan1Interface.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - - # Skipping the test if the Vxlan1 interface is not configured - if "Vxlan1" not in (interface_details := command_output["interfaceDescriptions"]): - self.result.is_skipped("Interface: Vxlan1 - Not configured") - return - - line_protocol_status = interface_details["Vxlan1"]["lineProtocolStatus"] - interface_status = interface_details["Vxlan1"]["interfaceStatus"] - - # Checking against both status and line protocol status - if interface_status != "up" or line_protocol_status != "up": - self.result.is_failure(f"Interface: Vxlan1 - Incorrect Line protocol status/Status - Expected: up/up Actual: {line_protocol_status}/{interface_status}") + if "Vxlan1" not in command_output["interfaceDescriptions"]: + self.result.is_skipped("Vxlan1 interface is not configured") + elif ( + command_output["interfaceDescriptions"]["Vxlan1"]["lineProtocolStatus"] == "up" + and command_output["interfaceDescriptions"]["Vxlan1"]["interfaceStatus"] == "up" + ): + self.result.is_success() + else: + self.result.is_failure( + f"Vxlan1 interface is {command_output['interfaceDescriptions']['Vxlan1']['lineProtocolStatus']}" + f"/{command_output['interfaceDescriptions']['Vxlan1']['interfaceStatus']}", + ) class VerifyVxlanConfigSanity(AntaTest): @@ -86,19 +86,19 @@ class VerifyVxlanConfigSanity(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyVxlanConfigSanity.""" - self.result.is_success() command_output = self.instance_commands[0].json_output - - # Skipping the test if VXLAN is not configured if "categories" not in command_output or len(command_output["categories"]) == 0: self.result.is_skipped("VXLAN is not configured") return - - # Verifies the Vxlan config sanity - categories_to_check = ["localVtep", "mlag", "pd"] - for category in categories_to_check: - if not get_value(command_output, f"categories.{category}.allCheckPass"): - self.result.is_failure(f"Vxlan Category: {category} - Config sanity check is not passing") + failed_categories = { + category: content + for category, content in command_output["categories"].items() + if category in ["localVtep", "mlag", "pd"] and content["allCheckPass"] is not True + } + if len(failed_categories) > 0: + self.result.is_failure(f"VXLAN config sanity check is not passing: {failed_categories}") + else: + self.result.is_success() class VerifyVxlanVniBinding(AntaTest): @@ -135,23 +135,31 @@ class VerifyVxlanVniBinding(AntaTest): """Main test function for VerifyVxlanVniBinding.""" self.result.is_success() + no_binding = [] + wrong_binding = [] + if (vxlan1 := get_value(self.instance_commands[0].json_output, "vxlanIntfs.Vxlan1")) is None: self.result.is_skipped("Vxlan1 interface is not configured") return for vni, vlan in self.inputs.bindings.items(): str_vni = str(vni) - retrieved_vlan = "" if str_vni in vxlan1["vniBindings"]: - retrieved_vlan = get_value(vxlan1, f"vniBindings..{str_vni}..vlan", separator="..") + retrieved_vlan = vxlan1["vniBindings"][str_vni]["vlan"] elif str_vni in vxlan1["vniBindingsToVrf"]: - retrieved_vlan = get_value(vxlan1, f"vniBindingsToVrf..{str_vni}..vlan", separator="..") + retrieved_vlan = vxlan1["vniBindingsToVrf"][str_vni]["vlan"] + else: + no_binding.append(str_vni) + retrieved_vlan = None - if not retrieved_vlan: - self.result.is_failure(f"Interface: Vxlan1 VNI: {str_vni} - Binding not found") + if retrieved_vlan and vlan != retrieved_vlan: + wrong_binding.append({str_vni: retrieved_vlan}) - elif vlan != retrieved_vlan: - self.result.is_failure(f"Interface: Vxlan1 VNI: {str_vni} VLAN: {vlan} - Wrong VLAN binding - Actual: {retrieved_vlan}") + if no_binding: + self.result.is_failure(f"The following VNI(s) have no binding: {no_binding}") + + if wrong_binding: + self.result.is_failure(f"The following VNI(s) have the wrong VLAN binding: {wrong_binding}") class VerifyVxlanVtep(AntaTest): @@ -198,10 +206,10 @@ class VerifyVxlanVtep(AntaTest): difference2 = set(vxlan1["vteps"]).difference(set(inputs_vteps)) if difference1: - self.result.is_failure(f"The following VTEP peer(s) are missing from the Vxlan1 interface: {', '.join(sorted(difference1))}") + self.result.is_failure(f"The following VTEP peer(s) are missing from the Vxlan1 interface: {sorted(difference1)}") if difference2: - self.result.is_failure(f"Unexpected VTEP peer(s) on Vxlan1 interface: {', '.join(sorted(difference2))}") + self.result.is_failure(f"Unexpected VTEP peer(s) on Vxlan1 interface: {sorted(difference2)}") class VerifyVxlan1ConnSettings(AntaTest): @@ -251,6 +259,6 @@ class VerifyVxlan1ConnSettings(AntaTest): # Check vxlan1 source interface and udp port if src_intf != self.inputs.source_interface: - self.result.is_failure(f"Interface: Vxlan1 - Incorrect Source interface - Expected: {self.inputs.source_interface} Actual: {src_intf}") + self.result.is_failure(f"Source interface is not correct. Expected `{self.inputs.source_interface}` as source interface but found `{src_intf}` instead.") if port != self.inputs.udp_port: - self.result.is_failure(f"Interface: Vxlan1 - Incorrect UDP port - Expected: {self.inputs.udp_port} Actual: {port}") + self.result.is_failure(f"UDP port is not correct. Expected `{self.inputs.udp_port}` as UDP port but found `{port}` instead.") diff --git a/anta/tools.py b/anta/tools.py index cbcfe0b..8b116a0 100644 --- a/anta/tools.py +++ b/anta/tools.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2025 Arista Networks, Inc. +# Copyright (c) 2023-2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. """Common functions used in ANTA tests.""" @@ -353,7 +353,7 @@ def cprofile(sort_by: str = "cumtime") -> Callable[[F], F]: return result - return cast("F", wrapper) + return cast(F, wrapper) return decorator diff --git a/asynceapi/__init__.py b/asynceapi/__init__.py index fedb07f..6d5a23b 100644 --- a/asynceapi/__init__.py +++ b/asynceapi/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. +# Copyright (c) 2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. # Initially written by Jeremy Schulman at https://github.com/jeremyschulman/aio-eapi diff --git a/asynceapi/_constants.py b/asynceapi/_constants.py deleted file mode 100644 index 2904038..0000000 --- a/asynceapi/_constants.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. -# Use of this source code is governed by the Apache License 2.0 -# that can be found in the LICENSE file. -"""Constants and Enums for the asynceapi package.""" - -from __future__ import annotations - -from enum import Enum - - -class EapiCommandFormat(str, Enum): - """Enum for the eAPI command format. - - NOTE: This could be updated to StrEnum when Python 3.11 is the minimum supported version in ANTA. - """ - - JSON = "json" - TEXT = "text" - - def __str__(self) -> str: - """Override the __str__ method to return the value of the Enum, mimicking the behavior of StrEnum.""" - return self.value diff --git a/asynceapi/_errors.py b/asynceapi/_errors.py deleted file mode 100644 index 321843d..0000000 --- a/asynceapi/_errors.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. -# Use of this source code is governed by the Apache License 2.0 -# that can be found in the LICENSE file. -"""Exceptions for the asynceapi package.""" - -from __future__ import annotations - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from asynceapi._models import EapiResponse - - -class EapiReponseError(Exception): - """Exception raised when an eAPI response contains errors. - - Attributes - ---------- - response : EapiResponse - The eAPI response that contains the error. - """ - - def __init__(self, response: EapiResponse) -> None: - """Initialize the EapiReponseError exception.""" - self.response = response - - # Build a descriptive error message - message = "Error in eAPI response" - - if response.error_code is not None: - message += f" (code: {response.error_code})" - - if response.error_message is not None: - message += f": {response.error_message}" - - super().__init__(message) diff --git a/asynceapi/_models.py b/asynceapi/_models.py deleted file mode 100644 index 0572a2f..0000000 --- a/asynceapi/_models.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. -# Use of this source code is governed by the Apache License 2.0 -# that can be found in the LICENSE file. -"""Models for the asynceapi package.""" - -from __future__ import annotations - -from dataclasses import dataclass, field -from logging import getLogger -from typing import TYPE_CHECKING, Any, Literal -from uuid import uuid4 - -from ._constants import EapiCommandFormat -from ._errors import EapiReponseError - -if TYPE_CHECKING: - from collections.abc import Iterator - - from ._types import EapiComplexCommand, EapiJsonOutput, EapiSimpleCommand, EapiTextOutput, JsonRpc - -LOGGER = getLogger(__name__) - - -# pylint: disable=too-many-instance-attributes -@dataclass(frozen=True) -class EapiRequest: - """Model for an eAPI request. - - Attributes - ---------- - commands : list[EapiSimpleCommand | EapiComplexCommand] - A list of commands to execute. - version : int | Literal["latest"] - The eAPI version to use. Defaults to "latest". - format : EapiCommandFormat - The command output format. Defaults "json". - timestamps : bool - Include timestamps in the command output. Defaults to False. - auto_complete : bool - Enable command auto-completion. Defaults to False. - expand_aliases : bool - Expand command aliases. Defaults to False. - stop_on_error : bool - Stop command execution on first error. Defaults to True. - id : int | str - The request ID. Defaults to a random hex string. - """ - - commands: list[EapiSimpleCommand | EapiComplexCommand] - version: int | Literal["latest"] = "latest" - format: EapiCommandFormat = EapiCommandFormat.JSON - timestamps: bool = False - auto_complete: bool = False - expand_aliases: bool = False - stop_on_error: bool = True - id: int | str = field(default_factory=lambda: uuid4().hex) - - def to_jsonrpc(self) -> JsonRpc: - """Return the JSON-RPC dictionary payload for the request.""" - return { - "jsonrpc": "2.0", - "method": "runCmds", - "params": { - "version": self.version, - "cmds": self.commands, - "format": self.format, - "timestamps": self.timestamps, - "autoComplete": self.auto_complete, - "expandAliases": self.expand_aliases, - "stopOnError": self.stop_on_error, - }, - "id": self.id, - } - - -@dataclass(frozen=True) -class EapiResponse: - """Model for an eAPI response. - - Construct an EapiResponse from a JSON-RPC response dictionary using the `from_jsonrpc` class method. - - Can be iterated over to access command results in order of execution. - - Attributes - ---------- - request_id : str - The ID of the original request this response corresponds to. - _results : dict[int, EapiCommandResult] - Dictionary mapping request command indices to their respective results. - error_code : int | None - The JSON-RPC error code, if any. - error_message : str | None - The JSON-RPC error message, if any. - """ - - request_id: str - _results: dict[int, EapiCommandResult] = field(default_factory=dict) - error_code: int | None = None - error_message: str | None = None - - @property - def success(self) -> bool: - """Return True if the response has no errors.""" - return self.error_code is None - - @property - def results(self) -> list[EapiCommandResult]: - """Get all results as a list. Results are ordered by the command indices in the request.""" - return list(self._results.values()) - - def __len__(self) -> int: - """Return the number of results.""" - return len(self._results) - - def __iter__(self) -> Iterator[EapiCommandResult]: - """Enable iteration over the results. Results are yielded in the same order as provided in the request.""" - yield from self._results.values() - - @classmethod - def from_jsonrpc(cls, response: dict[str, Any], request: EapiRequest, *, raise_on_error: bool = False) -> EapiResponse: - """Build an EapiResponse from a JSON-RPC eAPI response. - - Parameters - ---------- - response - The JSON-RPC eAPI response dictionary. - request - The corresponding EapiRequest. - raise_on_error - Raise an EapiReponseError if the response contains errors, by default False. - - Returns - ------- - EapiResponse - The EapiResponse object. - """ - has_error = "error" in response - response_data = response["error"]["data"] if has_error else response["result"] - - # Handle case where we have fewer results than commands (stop_on_error=True) - executed_count = min(len(response_data), len(request.commands)) - - # Process the results we have - results = {} - for i in range(executed_count): - cmd = request.commands[i] - cmd_str = cmd["cmd"] if isinstance(cmd, dict) else cmd - data = response_data[i] - - output = None - errors = [] - success = True - start_time = None - duration = None - - # Parse the output based on the data type, no output when errors are present - if isinstance(data, dict): - if "errors" in data: - errors = data["errors"] - success = False - else: - output = data["output"] if request.format == EapiCommandFormat.TEXT and "output" in data else data - - # Add timestamps if available - if request.timestamps and "_meta" in data: - meta = data.pop("_meta") - start_time = meta.get("execStartTime") - duration = meta.get("execDuration") - - elif isinstance(data, str): - # Handle case where eAPI returns a JSON string response (serialized JSON) for certain commands - try: - from json import JSONDecodeError, loads - - output = loads(data) - except (JSONDecodeError, TypeError): - # If it's not valid JSON, store as is - LOGGER.warning("Invalid JSON response for command: %s. Storing as text: %s", cmd_str, data) - output = data - - results[i] = EapiCommandResult( - command=cmd_str, - output=output, - errors=errors, - success=success, - start_time=start_time, - duration=duration, - ) - - # If stop_on_error is True and we have an error, indicate commands not executed - if has_error and request.stop_on_error and executed_count < len(request.commands): - for i in range(executed_count, len(request.commands)): - cmd = request.commands[i] - cmd_str = cmd["cmd"] if isinstance(cmd, dict) else cmd - results[i] = EapiCommandResult(command=cmd_str, output=None, errors=["Command not executed due to previous error"], success=False, executed=False) - - response_obj = cls( - request_id=response["id"], - _results=results, - error_code=response["error"]["code"] if has_error else None, - error_message=response["error"]["message"] if has_error else None, - ) - - if raise_on_error and has_error: - raise EapiReponseError(response_obj) - - return response_obj - - -@dataclass(frozen=True) -class EapiCommandResult: - """Model for an eAPI command result. - - Attributes - ---------- - command : str - The command that was executed. - output : EapiJsonOutput | EapiTextOutput | None - The command result output. None if the command returned errors. - errors : list[str] - A list of error messages, if any. - success : bool - True if the command was successful. - executed : bool - True if the command was executed. When `stop_on_error` is True in the request, some commands may not be executed. - start_time : float | None - Command execution start time in seconds. Uses Unix epoch format. `timestamps` must be True in the request. - duration : float | None - Command execution duration in seconds. `timestamps` must be True in the request. - """ - - command: str - output: EapiJsonOutput | EapiTextOutput | None - errors: list[str] = field(default_factory=list) - success: bool = True - executed: bool = True - start_time: float | None = None - duration: float | None = None diff --git a/asynceapi/_types.py b/asynceapi/_types.py deleted file mode 100644 index ebebf04..0000000 --- a/asynceapi/_types.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. -# Use of this source code is governed by the Apache License 2.0 -# that can be found in the LICENSE file. -"""Type definitions used for the asynceapi package.""" - -from __future__ import annotations - -import sys -from typing import TYPE_CHECKING, Any, Literal - -if TYPE_CHECKING: - from ._constants import EapiCommandFormat - -if sys.version_info >= (3, 11): - from typing import NotRequired, TypedDict -else: - from typing_extensions import NotRequired, TypedDict - -EapiJsonOutput = dict[str, Any] -"""Type definition of an eAPI JSON output response.""" -EapiTextOutput = str -"""Type definition of an eAPI text output response.""" -EapiSimpleCommand = str -"""Type definition of an eAPI simple command. A simple command is the CLI command to run as a string.""" - - -class EapiComplexCommand(TypedDict): - """Type definition of an eAPI complex command. A complex command is a dictionary with the CLI command to run with additional parameters.""" - - cmd: str - input: NotRequired[str] - revision: NotRequired[int] - - -class JsonRpc(TypedDict): - """Type definition of a JSON-RPC payload.""" - - jsonrpc: Literal["2.0"] - method: Literal["runCmds"] - params: JsonRpcParams - id: NotRequired[int | str] - - -class JsonRpcParams(TypedDict): - """Type definition of JSON-RPC parameters.""" - - version: NotRequired[int | Literal["latest"]] - cmds: list[EapiSimpleCommand | EapiComplexCommand] - format: NotRequired[EapiCommandFormat] - autoComplete: NotRequired[bool] - expandAliases: NotRequired[bool] - timestamps: NotRequired[bool] - stopOnError: NotRequired[bool] diff --git a/asynceapi/aio_portcheck.py b/asynceapi/aio_portcheck.py index be9a79f..deac043 100644 --- a/asynceapi/aio_portcheck.py +++ b/asynceapi/aio_portcheck.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. +# Copyright (c) 2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. # Initially written by Jeremy Schulman at https://github.com/jeremyschulman/aio-eapi diff --git a/asynceapi/config_session.py b/asynceapi/config_session.py index e5e1d08..7f83da4 100644 --- a/asynceapi/config_session.py +++ b/asynceapi/config_session.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. +# Copyright (c) 2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. # Initially written by Jeremy Schulman at https://github.com/jeremyschulman/aio-eapi @@ -10,10 +10,9 @@ from __future__ import annotations import re -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: - from ._types import EapiComplexCommand, EapiJsonOutput, EapiSimpleCommand from .device import Device # ----------------------------------------------------------------------------- @@ -79,7 +78,7 @@ class SessionConfig: # Public Methods # ------------------------------------------------------------------------- - async def status_all(self) -> EapiJsonOutput: + async def status_all(self) -> dict[str, Any]: """Get the status of all the session config on the device. Run the following command on the device: @@ -87,7 +86,7 @@ class SessionConfig: Returns ------- - EapiJsonOutput + dict[str, Any] Dictionary of native EOS eAPI response; see `status` method for details. @@ -117,9 +116,9 @@ class SessionConfig: } ``` """ - return await self._cli(command="show configuration sessions detail") + return await self._cli("show configuration sessions detail") # type: ignore[return-value] # json outformat returns dict[str, Any] - async def status(self) -> EapiJsonOutput | None: + async def status(self) -> dict[str, Any] | None: """Get the status of a session config on the device. Run the following command on the device: @@ -130,7 +129,7 @@ class SessionConfig: Returns ------- - EapiJsonOutput | None + dict[str, Any] | None Dictionary instance of the session status. If the session does not exist, then this method will return None. @@ -202,7 +201,7 @@ class SessionConfig: # prepare the initial set of command to enter the config session and # rollback clean if the `replace` argument is True. - commands: list[EapiSimpleCommand | EapiComplexCommand] = [self._cli_config_session] + commands: list[str | dict[str, Any]] = [self._cli_config_session] if replace: commands.append(self.CLI_CFG_FACTORY_RESET) @@ -233,7 +232,7 @@ class SessionConfig: if timer: command += f" timer {timer}" - await self._cli(command=command) + await self._cli(command) async def abort(self) -> None: """Abort the configuration session. @@ -241,7 +240,7 @@ class SessionConfig: Run the following command on the device: # configure session abort """ - await self._cli(command=f"{self._cli_config_session} abort") + await self._cli(f"{self._cli_config_session} abort") async def diff(self) -> str: """Return the "diff" of the session config relative to the running config. @@ -258,7 +257,7 @@ class SessionConfig: ---------- * https://www.gnu.org/software/diffutils/manual/diffutils.txt """ - return await self._cli(command=f"show session-config named {self.name} diffs", ofmt="text") + return await self._cli(f"show session-config named {self.name} diffs", ofmt="text") # type: ignore[return-value] # text outformat returns str async def load_file(self, filename: str, *, replace: bool = False) -> None: """Load the configuration from into the session configuration. @@ -282,12 +281,12 @@ class SessionConfig: If there are any issues with loading the configuration file then a RuntimeError is raised with the error messages content. """ - commands: list[EapiSimpleCommand | EapiComplexCommand] = [self._cli_config_session] + commands: list[str | dict[str, Any]] = [self._cli_config_session] if replace: commands.append(self.CLI_CFG_FACTORY_RESET) commands.append(f"copy {filename} session-config") - res = await self._cli(commands=commands) + res: list[dict[str, Any]] = await self._cli(commands=commands) # type: ignore[assignment] # JSON outformat of multiple commands returns list[dict[str, Any]] checks_re = re.compile(r"error|abort|invalid", flags=re.IGNORECASE) messages = res[-1]["messages"] @@ -296,4 +295,4 @@ class SessionConfig: async def write(self) -> None: """Save the running config to the startup config by issuing the command "write" to the device.""" - await self._cli(command="write") + await self._cli("write") diff --git a/asynceapi/device.py b/asynceapi/device.py index a7702da..c423c36 100644 --- a/asynceapi/device.py +++ b/asynceapi/device.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. +# Copyright (c) 2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. # Initially written by Jeremy Schulman at https://github.com/jeremyschulman/aio-eapi @@ -10,7 +10,7 @@ from __future__ import annotations from socket import getservbyname -from typing import TYPE_CHECKING, Any, Literal, overload +from typing import TYPE_CHECKING, Any # ----------------------------------------------------------------------------- # Public Imports @@ -20,13 +20,12 @@ import httpx # ----------------------------------------------------------------------------- # Private Imports # ----------------------------------------------------------------------------- -from ._constants import EapiCommandFormat from .aio_portcheck import port_check_url from .config_session import SessionConfig from .errors import EapiCommandError if TYPE_CHECKING: - from ._types import EapiComplexCommand, EapiJsonOutput, EapiSimpleCommand, EapiTextOutput, JsonRpc + from collections.abc import Sequence # ----------------------------------------------------------------------------- # Exports @@ -122,139 +121,18 @@ class Device(httpx.AsyncClient): """ return await port_check_url(self.base_url) - # Single command, JSON output, no suppression - @overload async def cli( self, - *, - command: EapiSimpleCommand | EapiComplexCommand, - commands: None = None, - ofmt: Literal["json"] = "json", - version: int | Literal["latest"] = "latest", - suppress_error: Literal[False] = False, - auto_complete: bool = False, - expand_aliases: bool = False, - req_id: int | str | None = None, - ) -> EapiJsonOutput: ... - - # Multiple commands, JSON output, no suppression - @overload - async def cli( - self, - *, - command: None = None, - commands: list[EapiSimpleCommand | EapiComplexCommand], - ofmt: Literal["json"] = "json", - version: int | Literal["latest"] = "latest", - suppress_error: Literal[False] = False, - auto_complete: bool = False, - expand_aliases: bool = False, - req_id: int | str | None = None, - ) -> list[EapiJsonOutput]: ... - - # Single command, TEXT output, no suppression - @overload - async def cli( - self, - *, - command: EapiSimpleCommand | EapiComplexCommand, - commands: None = None, - ofmt: Literal["text"], - version: int | Literal["latest"] = "latest", - suppress_error: Literal[False] = False, - auto_complete: bool = False, - expand_aliases: bool = False, - req_id: int | str | None = None, - ) -> EapiTextOutput: ... - - # Multiple commands, TEXT output, no suppression - @overload - async def cli( - self, - *, - command: None = None, - commands: list[EapiSimpleCommand | EapiComplexCommand], - ofmt: Literal["text"], - version: int | Literal["latest"] = "latest", - suppress_error: Literal[False] = False, - auto_complete: bool = False, - expand_aliases: bool = False, - req_id: int | str | None = None, - ) -> list[EapiTextOutput]: ... - - # Single command, JSON output, with suppression - @overload - async def cli( - self, - *, - command: EapiSimpleCommand | EapiComplexCommand, - commands: None = None, - ofmt: Literal["json"] = "json", - version: int | Literal["latest"] = "latest", - suppress_error: Literal[True], - auto_complete: bool = False, - expand_aliases: bool = False, - req_id: int | str | None = None, - ) -> EapiJsonOutput | None: ... - - # Multiple commands, JSON output, with suppression - @overload - async def cli( - self, - *, - command: None = None, - commands: list[EapiSimpleCommand | EapiComplexCommand], - ofmt: Literal["json"] = "json", - version: int | Literal["latest"] = "latest", - suppress_error: Literal[True], - auto_complete: bool = False, - expand_aliases: bool = False, - req_id: int | str | None = None, - ) -> list[EapiJsonOutput] | None: ... - - # Single command, TEXT output, with suppression - @overload - async def cli( - self, - *, - command: EapiSimpleCommand | EapiComplexCommand, - commands: None = None, - ofmt: Literal["text"], - version: int | Literal["latest"] = "latest", - suppress_error: Literal[True], - auto_complete: bool = False, - expand_aliases: bool = False, - req_id: int | str | None = None, - ) -> EapiTextOutput | None: ... - - # Multiple commands, TEXT output, with suppression - @overload - async def cli( - self, - *, - command: None = None, - commands: list[EapiSimpleCommand | EapiComplexCommand], - ofmt: Literal["text"], - version: int | Literal["latest"] = "latest", - suppress_error: Literal[True], - auto_complete: bool = False, - expand_aliases: bool = False, - req_id: int | str | None = None, - ) -> list[EapiTextOutput] | None: ... - - # Actual implementation - async def cli( - self, - command: EapiSimpleCommand | EapiComplexCommand | None = None, - commands: list[EapiSimpleCommand | EapiComplexCommand] | None = None, - ofmt: Literal["json", "text"] = "json", - version: int | Literal["latest"] = "latest", + command: str | dict[str, Any] | None = None, + commands: Sequence[str | dict[str, Any]] | None = None, + ofmt: str | None = None, + version: int | str | None = "latest", *, suppress_error: bool = False, auto_complete: bool = False, expand_aliases: bool = False, req_id: int | str | None = None, - ) -> EapiJsonOutput | EapiTextOutput | list[EapiJsonOutput] | list[EapiTextOutput] | None: + ) -> list[dict[str, Any] | str] | dict[str, Any] | str | None: """Execute one or more CLI commands. Parameters @@ -265,7 +143,6 @@ class Device(httpx.AsyncClient): A list of commands to execute; results in a list of output responses. ofmt Either 'json' or 'text'; indicates the output format for the CLI commands. - eAPI defaults to 'json'. version By default the eAPI will use "version 1" for all API object models. This driver will, by default, always set version to "latest" so @@ -281,13 +158,13 @@ class Device(httpx.AsyncClient): response = dev.cli(..., suppress_error=True) auto_complete - Enabled/disables the command auto-compelete feature of the eAPI. Per the + Enabled/disables the command auto-compelete feature of the EAPI. Per the documentation: Allows users to use shorthand commands in eAPI calls. With this parameter included a user can send 'sh ver' via eAPI to get the output of 'show version'. expand_aliases - Enables/disables the command use of user-defined alias. Per the + Enables/disables the command use of User defined alias. Per the documentation: Allowed users to provide the expandAliases parameter to eAPI calls. This allows users to use aliased commands via the API. @@ -299,34 +176,15 @@ class Device(httpx.AsyncClient): Returns ------- - dict[str, Any] - Single command, JSON output, suppress_error=False - list[dict[str, Any]] - Multiple commands, JSON output, suppress_error=False - str - Single command, TEXT output, suppress_error=False - list[str] - Multiple commands, TEXT output, suppress_error=False - dict[str, Any] | None - Single command, JSON output, suppress_error=True - list[dict[str, Any]] | None - Multiple commands, JSON output, suppress_error=True - str | None - Single command, TEXT output, suppress_error=True - list[str] | None - Multiple commands, TEXT output, suppress_error=True + list[dict[str, Any] | str] | dict[str, Any] | str | None + One or List of output responses, per the description above. """ if not any((command, commands)): msg = "Required 'command' or 'commands'" raise RuntimeError(msg) jsonrpc = self._jsonrpc_command( - commands=[command] if command else commands if commands else [], - ofmt=ofmt, - version=version, - auto_complete=auto_complete, - expand_aliases=expand_aliases, - req_id=req_id, + commands=[command] if command else commands, ofmt=ofmt, version=version, auto_complete=auto_complete, expand_aliases=expand_aliases, req_id=req_id ) try: @@ -339,14 +197,14 @@ class Device(httpx.AsyncClient): def _jsonrpc_command( self, - commands: list[EapiSimpleCommand | EapiComplexCommand], - ofmt: Literal["json", "text"] = "json", - version: int | Literal["latest"] = "latest", + commands: Sequence[str | dict[str, Any]] | None = None, + ofmt: str | None = None, + version: int | str | None = "latest", *, auto_complete: bool = False, expand_aliases: bool = False, req_id: int | str | None = None, - ) -> JsonRpc: + ) -> dict[str, Any]: """Create the JSON-RPC command dictionary object. Parameters @@ -355,7 +213,6 @@ class Device(httpx.AsyncClient): A list of commands to execute; results in a list of output responses. ofmt Either 'json' or 'text'; indicates the output format for the CLI commands. - eAPI defaults to 'json'. version By default the eAPI will use "version 1" for all API object models. This driver will, by default, always set version to "latest" so @@ -384,20 +241,25 @@ class Device(httpx.AsyncClient): dict containing the JSON payload to run the command. """ - return { + cmd: dict[str, Any] = { "jsonrpc": "2.0", "method": "runCmds", "params": { "version": version, "cmds": commands, - "format": EapiCommandFormat(ofmt), - "autoComplete": auto_complete, - "expandAliases": expand_aliases, + "format": ofmt or self.EAPI_DEFAULT_OFMT, }, "id": req_id or id(self), } + if auto_complete is not None: + cmd["params"].update({"autoComplete": auto_complete}) - async def jsonrpc_exec(self, jsonrpc: JsonRpc) -> list[EapiJsonOutput] | list[EapiTextOutput]: + if expand_aliases is not None: + cmd["params"].update({"expandAliases": expand_aliases}) + + return cmd + + async def jsonrpc_exec(self, jsonrpc: dict[str, Any]) -> list[dict[str, Any] | str]: """Execute the JSON-RPC dictionary object. Parameters @@ -453,7 +315,7 @@ class Device(httpx.AsyncClient): failed_cmd = commands[err_at] raise EapiCommandError( - passed=[get_output(cmd_data[i]) for i in range(err_at)], + passed=[get_output(cmd_data[cmd_i]) for cmd_i, cmd in enumerate(commands[:err_at])], failed=failed_cmd["cmd"] if isinstance(failed_cmd, dict) else failed_cmd, errors=cmd_data[err_at]["errors"], errmsg=err_msg, diff --git a/asynceapi/errors.py b/asynceapi/errors.py index 50b02c6..5fce9db 100644 --- a/asynceapi/errors.py +++ b/asynceapi/errors.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024-2025 Arista Networks, Inc. +# Copyright (c) 2024 Arista Networks, Inc. # Use of this source code is governed by the Apache License 2.0 # that can be found in the LICENSE file. # Initially written by Jeremy Schulman at https://github.com/jeremyschulman/aio-eapi @@ -6,16 +6,13 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import Any import httpx -if TYPE_CHECKING: - from ._types import EapiComplexCommand, EapiJsonOutput, EapiSimpleCommand, EapiTextOutput - class EapiCommandError(RuntimeError): - """Exception class for eAPI command errors. + """Exception class for EAPI command errors. Attributes ---------- @@ -26,14 +23,7 @@ class EapiCommandError(RuntimeError): not_exec: a list of commands that were not executed """ - def __init__( - self, - failed: str, - errors: list[str], - errmsg: str, - passed: list[EapiJsonOutput] | list[EapiTextOutput], - not_exec: list[EapiSimpleCommand | EapiComplexCommand], - ) -> None: + def __init__(self, failed: str, errors: list[str], errmsg: str, passed: list[str | dict[str, Any]], not_exec: list[dict[str, Any]]) -> None: """Initialize for the EapiCommandError exception.""" self.failed = failed self.errmsg = errmsg diff --git a/debian/changelog b/debian/changelog index 498b5e0..dfc5dea 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,12 +1,3 @@ -anta (1.3.0-1) sid; urgency=medium - - * Updating to standards version 4.7.1. - * Updating to standards version 4.7.2. - * Merging upstream version 1.3.0. - * Updating year in upstream copyright for 2025. - - -- Daniel Baumann Mon, 17 Mar 2025 07:34:09 +0100 - anta (1.2.0-2) sid; urgency=medium * Updating vcs fields. diff --git a/debian/copyright b/debian/copyright index ee4377f..cfe7ad7 100644 --- a/debian/copyright +++ b/debian/copyright @@ -4,7 +4,7 @@ Upstream-Contact: https://github.com/aristanetworks/anta/issues Source: https://github.com/aristanetworks/anta/tags Files: * -Copyright: 2019-2025 Arista Networks, Inc. +Copyright: 2019-2014 Arista Networks, Inc. License: Apache-2.0 Files: debian/* diff --git a/docs/README.md b/docs/README.md index b6e00c7..07ac3d2 100755 --- a/docs/README.md +++ b/docs/README.md @@ -1,5 +1,5 @@ diff --git a/docs/advanced_usages/as-python-lib.md b/docs/advanced_usages/as-python-lib.md index 1a7dedb..fce5e7e 100644 --- a/docs/advanced_usages/as-python-lib.md +++ b/docs/advanced_usages/as-python-lib.md @@ -1,5 +1,5 @@ @@ -14,8 +14,8 @@ ANTA is a Python library that can be used in user applications. This section des A device is represented in ANTA as a instance of a subclass of the [AntaDevice](../api/device.md#anta.device.AntaDevice) abstract class. There are few abstract methods that needs to be implemented by child classes: -- The [collect()](../api/device.md#anta.device.AntaDevice.collect) coroutine is in charge of collecting outputs of [AntaCommand](../api/commands.md#anta.models.AntaCommand) instances. -- The [refresh()](../api/device.md#anta.device.AntaDevice.refresh) coroutine is in charge of updating attributes of the [AntaDevice](../api/device.md#anta.device.AntaDevice) instance. These attributes are used by [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) to filter out unreachable devices or by [AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) to skip devices based on their hardware models. +- The [collect()](../api/device.md#anta.device.AntaDevice.collect) coroutine is in charge of collecting outputs of [AntaCommand](../api/models.md#anta.models.AntaCommand) instances. +- The [refresh()](../api/device.md#anta.device.AntaDevice.refresh) coroutine is in charge of updating attributes of the [AntaDevice](../api/device.md#anta.device.AntaDevice) instance. These attributes are used by [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) to filter out unreachable devices or by [AntaTest](../api/models.md#anta.models.AntaTest) to skip devices based on their hardware models. The [copy()](../api/device.md#anta.device.AntaDevice.copy) coroutine is used to copy files to and from the device. It does not need to be implemented if tests are not using it. @@ -24,7 +24,7 @@ The [copy()](../api/device.md#anta.device.AntaDevice.copy) coroutine is used to The [AsyncEOSDevice](../api/device.md#anta.device.AsyncEOSDevice) class is an implementation of [AntaDevice](../api/device.md#anta.device.AntaDevice) for Arista EOS. It uses the [aio-eapi](https://github.com/jeremyschulman/aio-eapi) eAPI client and the [AsyncSSH](https://github.com/ronf/asyncssh) library. -- The [\_collect()](../api/device.md#anta.device.AsyncEOSDevice._collect) coroutine collects [AntaCommand](../api/models.md#anta.models.AntaCommand) outputs using eAPI. +- The [_collect()](../api/device.md#anta.device.AsyncEOSDevice._collect) coroutine collects [AntaCommand](../api/models.md#anta.models.AntaCommand) outputs using eAPI. - The [refresh()](../api/device.md#anta.device.AsyncEOSDevice.refresh) coroutine tries to open a TCP connection on the eAPI port and update the `is_online` attribute accordingly. If the TCP connection succeeds, it sends a `show version` command to gather the hardware model of the device and updates the `established` and `hw_model` attributes. - The [copy()](../api/device.md#anta.device.AsyncEOSDevice.copy) coroutine copies files to and from the device using the SCP protocol. diff --git a/docs/advanced_usages/caching.md b/docs/advanced_usages/caching.md index 628376b..8b089ce 100644 --- a/docs/advanced_usages/caching.md +++ b/docs/advanced_usages/caching.md @@ -1,5 +1,5 @@ @@ -8,17 +8,30 @@ ANTA is a streamlined Python framework designed for efficient interaction with n ## Configuration +By default, ANTA utilizes [aiocache](https://github.com/aio-libs/aiocache)'s memory cache backend, also called [`SimpleMemoryCache`](https://aiocache.aio-libs.org/en/v0.12.2/caches.html#simplememorycache). This library aims for simplicity and supports asynchronous operations to go along with Python `asyncio` used in ANTA. + The `_init_cache()` method of the [AntaDevice](../api/device.md#anta.device.AntaDevice) abstract class initializes the cache. Child classes can override this method to tweak the cache configuration: +```python +def _init_cache(self) -> None: + """ + Initialize cache for the device, can be overridden by subclasses to manipulate how it works + """ + self.cache = Cache(cache_class=Cache.MEMORY, ttl=60, namespace=self.name, plugins=[HitMissRatioPlugin()]) + self.cache_locks = defaultdict(asyncio.Lock) +``` + +The cache is also configured with `aiocache`'s [`HitMissRatioPlugin`](https://aiocache.aio-libs.org/en/v0.12.2/plugins.html#hitmissratioplugin) plugin to calculate the ratio of hits the cache has and give useful statistics for logging purposes in ANTA. + ## Cache key design The cache is initialized per `AntaDevice` and uses the following cache key design: `:` -The `uid` is an attribute of [AntaCommand](../api/commands.md#anta.models.AntaCommand), which is a unique identifier generated from the command, version, revision and output format. +The `uid` is an attribute of [AntaCommand](../api/models.md#anta.models.AntaCommand), which is a unique identifier generated from the command, version, revision and output format. -Each UID has its own asyncio lock. This design allows coroutines that need to access the cache for different UIDs to do so concurrently. The locks are managed by the `AntaCache.locks` dictionary. +Each UID has its own asyncio lock. This design allows coroutines that need to access the cache for different UIDs to do so concurrently. The locks are managed by the `self.cache_locks` dictionary. ## Mechanisms @@ -32,37 +45,37 @@ There might be scenarios where caching is not wanted. You can disable caching in 1. Caching can be disabled globally, for **ALL** commands on **ALL** devices, using the `--disable-cache` global flag when invoking anta at the [CLI](../cli/overview.md#invoking-anta-cli): - ```bash - anta --disable-cache --username arista --password arista nrfu table - ``` + ```bash + anta --disable-cache --username arista --password arista nrfu table + ``` 2. Caching can be disabled per device, network or range by setting the `disable_cache` key to `True` when defining the ANTA [Inventory](../usage-inventory-catalog.md#device-inventory) file: - ```yaml - anta_inventory: - hosts: - - host: 172.20.20.101 - name: DC1-SPINE1 - tags: ["SPINE", "DC1"] - disable_cache: True # Set this key to True - - host: 172.20.20.102 - name: DC1-SPINE2 - tags: ["SPINE", "DC1"] - disable_cache: False # Optional since it's the default + ```yaml + anta_inventory: + hosts: + - host: 172.20.20.101 + name: DC1-SPINE1 + tags: ["SPINE", "DC1"] + disable_cache: True # Set this key to True + - host: 172.20.20.102 + name: DC1-SPINE2 + tags: ["SPINE", "DC1"] + disable_cache: False # Optional since it's the default - networks: - - network: "172.21.21.0/24" - disable_cache: True + networks: + - network: "172.21.21.0/24" + disable_cache: True - ranges: - - start: 172.22.22.10 - end: 172.22.22.19 - disable_cache: True - ``` + ranges: + - start: 172.22.22.10 + end: 172.22.22.19 + disable_cache: True + ``` - This approach effectively disables caching for **ALL** commands sent to devices targeted by the `disable_cache` key. + This approach effectively disables caching for **ALL** commands sent to devices targeted by the `disable_cache` key. -3. For tests developers, caching can be disabled for a specific [`AntaCommand`](../api/commands.md#anta.models.AntaCommand) or [`AntaTemplate`](../api/commands.md#anta.models.AntaTemplate) by setting the `use_cache` attribute to `False`. That means the command output will always be collected on the device and therefore, never use caching. +3. For tests developers, caching can be disabled for a specific [`AntaCommand`](../api/models.md#anta.models.AntaCommand) or [`AntaTemplate`](../api/models.md#anta.models.AntaTemplate) by setting the `use_cache` attribute to `False`. That means the command output will always be collected on the device and therefore, never use caching. ### Disable caching in a child class of `AntaDevice` diff --git a/docs/advanced_usages/custom-tests.md b/docs/advanced_usages/custom-tests.md index 0880743..2fc61cc 100644 --- a/docs/advanced_usages/custom-tests.md +++ b/docs/advanced_usages/custom-tests.md @@ -1,5 +1,5 @@ @@ -13,7 +13,7 @@ ANTA is not only a Python library with a CLI and a collection of built-in tests, A test is a Python class where a test function is defined and will be run by the framework. -ANTA provides an abstract class [AntaTest](../api/tests/anta_test.md#anta.models.AntaTest). This class does the heavy lifting and provide the logic to define, collect and test data. The code below is an example of a simple test in ANTA, which is an [AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) subclass: +ANTA provides an abstract class [AntaTest](../api/models.md#anta.models.AntaTest). This class does the heavy lifting and provide the logic to define, collect and test data. The code below is an example of a simple test in ANTA, which is an [AntaTest](../api/models.md#anta.models.AntaTest) subclass: ````python from anta.models import AntaTest, AntaCommand @@ -51,18 +51,18 @@ class VerifyTemperature(AntaTest): self.result.is_failure(f"Device temperature exceeds acceptable limits. Current system status: '{temperature_status}'") ```` -[AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) also provide more advanced capabilities like [AntaCommand](../api/commands.md#anta.models.AntaCommand) templating using the [AntaTemplate](../api/commands.md#anta.models.AntaTemplate) class or test inputs definition and validation using [AntaTest.Input](../api/tests/anta_test.md#anta.models.AntaTest.Input) [pydantic](https://docs.pydantic.dev/latest/) model. This will be discussed in the sections below. +[AntaTest](../api/models.md#anta.models.AntaTest) also provide more advanced capabilities like [AntaCommand](../api/models.md#anta.models.AntaCommand) templating using the [AntaTemplate](../api/models.md#anta.models.AntaTemplate) class or test inputs definition and validation using [AntaTest.Input](../api/models.md#anta.models.AntaTest.Input) [pydantic](https://docs.pydantic.dev/latest/) model. This will be discussed in the sections below. ## AntaTest structure -Full AntaTest API documentation is available in the [API documentation section](../api/tests/anta_test.md#anta.models.AntaTest) +Full AntaTest API documentation is available in the [API documentation section](../api/models.md#anta.models.AntaTest) ### Class Attributes - `name` (`str`, `optional`): Name of the test. Used during reporting. By default set to the Class name. - `description` (`str`, `optional`): A human readable description of your test. By default set to the first line of the docstring. - `categories` (`list[str]`): A list of categories in which the test belongs. -- `commands` (`[list[AntaCommand | AntaTemplate]]`): A list of command to collect from devices. This list **must** be a list of [AntaCommand](../api/commands.md#anta.models.AntaCommand) or [AntaTemplate](../api/commands.md#anta.models.AntaTemplate) instances. Rendering [AntaTemplate](../api/commands.md#anta.models.AntaTemplate) instances will be discussed later. +- `commands` (`[list[AntaCommand | AntaTemplate]]`): A list of command to collect from devices. This list **must** be a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) or [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances. Rendering [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances will be discussed later. > [!INFO] > All these class attributes are mandatory. If any attribute is missing, a `NotImplementedError` exception will be raised during class instantiation. @@ -86,7 +86,7 @@ Full AntaTest API documentation is available in the [API documentation section]( > > - **Logger object** > -> ANTA already provides comprehensive logging at every steps of a test execution. The [AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) class also provides a `logger` attribute that is a Python logger specific to the test instance. See [Python documentation](https://docs.python.org/3/library/logging.html) for more information. +> ANTA already provides comprehensive logging at every steps of a test execution. The [AntaTest](../api/models.md#anta.models.AntaTest) class also provides a `logger` attribute that is a Python logger specific to the test instance. See [Python documentation](https://docs.python.org/3/library/logging.html) for more information. > > - **AntaDevice object** > @@ -94,13 +94,13 @@ Full AntaTest API documentation is available in the [API documentation section]( ### Test Inputs -[AntaTest.Input](../api/tests/anta_test.md#anta.models.AntaTest.Input) is a [pydantic model](https://docs.pydantic.dev/latest/usage/models/) that allow test developers to define their test inputs. [pydantic](https://docs.pydantic.dev/latest/) provides out of the box [error handling](https://docs.pydantic.dev/latest/usage/models/#error-handling) for test input validation based on the type hints defined by the test developer. +[AntaTest.Input](../api/models.md#anta.models.AntaTest.Input) is a [pydantic model](https://docs.pydantic.dev/latest/usage/models/) that allow test developers to define their test inputs. [pydantic](https://docs.pydantic.dev/latest/) provides out of the box [error handling](https://docs.pydantic.dev/latest/usage/models/#error-handling) for test input validation based on the type hints defined by the test developer. -The base definition of [AntaTest.Input](../api/tests/anta_test.md#anta.models.AntaTest.Input) provides common test inputs for all [AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) instances: +The base definition of [AntaTest.Input](../api/models.md#anta.models.AntaTest.Input) provides common test inputs for all [AntaTest](../api/models.md#anta.models.AntaTest) instances: #### Input model -Full `Input` model documentation is available in [API documentation section](../api/tests/anta_test.md#anta.models.AntaTest.Input) +Full `Input` model documentation is available in [API documentation section](../api/models.md#anta.models.AntaTest.Input) ::: anta.models.AntaTest.Input options: @@ -118,7 +118,7 @@ Full `Input` model documentation is available in [API documentation section](../ #### ResultOverwrite model -Full `ResultOverwrite` model documentation is available in [API documentation section](../api/tests/anta_test.md#anta.models.AntaTest.Input.ResultOverwrite) +Full `ResultOverwrite` model documentation is available in [API documentation section](../api/models.md#anta.models.AntaTest.Input.ResultOverwrite) ::: anta.models.AntaTest.Input.ResultOverwrite options: @@ -138,31 +138,31 @@ Full `ResultOverwrite` model documentation is available in [API documentation se ### Methods -- [test(self) -> None](../api/tests/anta_test.md#anta.models.AntaTest.test): This is an abstract method that **must** be implemented. It contains the test logic that can access the collected command outputs using the `instance_commands` instance attribute, access the test inputs using the `inputs` instance attribute and **must** set the `result` instance attribute accordingly. It must be implemented using the `AntaTest.anta_test` decorator that provides logging and will collect commands before executing the `test()` method. -- [render(self, template: AntaTemplate) -> list[AntaCommand]](../api/tests/anta_test.md#anta.models.AntaTest.render): This method only needs to be implemented if [AntaTemplate](../api/commands.md#anta.models.AntaTemplate) instances are present in the `commands` class attribute. It will be called for every [AntaTemplate](../api/commands.md#anta.models.AntaTemplate) occurrence and **must** return a list of [AntaCommand](../api/commands.md#anta.models.AntaCommand) using the [AntaTemplate.render()](../api/commands.md#anta.models.AntaTemplate.render) method. It can access test inputs using the `inputs` instance attribute. +- [test(self) -> None](../api/models.md#anta.models.AntaTest.test): This is an abstract method that **must** be implemented. It contains the test logic that can access the collected command outputs using the `instance_commands` instance attribute, access the test inputs using the `inputs` instance attribute and **must** set the `result` instance attribute accordingly. It must be implemented using the `AntaTest.anta_test` decorator that provides logging and will collect commands before executing the `test()` method. +- [render(self, template: AntaTemplate) -> list[AntaCommand]](../api/models.md#anta.models.AntaTest.render): This method only needs to be implemented if [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances are present in the `commands` class attribute. It will be called for every [AntaTemplate](../api/models.md#anta.models.AntaTemplate) occurrence and **must** return a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) using the [AntaTemplate.render()](../api/models.md#anta.models.AntaTemplate.render) method. It can access test inputs using the `inputs` instance attribute. ## Test execution Below is a high level description of the test execution flow in ANTA: -1. ANTA will parse the test catalog to get the list of [AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) subclasses to instantiate and their associated input values. We consider a single [AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) subclass in the following steps. +1. ANTA will parse the test catalog to get the list of [AntaTest](../api/models.md#anta.models.AntaTest) subclasses to instantiate and their associated input values. We consider a single [AntaTest](../api/models.md#anta.models.AntaTest) subclass in the following steps. -2. ANTA will instantiate the [AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) subclass and a single device will be provided to the test instance. The `Input` model defined in the class will also be instantiated at this moment. If any [ValidationError](https://docs.pydantic.dev/latest/errors/errors/) is raised, the test execution will be stopped. +2. ANTA will instantiate the [AntaTest](../api/models.md#anta.models.AntaTest) subclass and a single device will be provided to the test instance. The `Input` model defined in the class will also be instantiated at this moment. If any [ValidationError](https://docs.pydantic.dev/latest/errors/errors/) is raised, the test execution will be stopped. -3. If there is any [AntaTemplate](../api/commands.md#anta.models.AntaTemplate) instance in the `commands` class attribute, [render()](../api/tests/anta_test.md#anta.models.AntaTest.render) will be called for every occurrence. At this moment, the `instance_commands` attribute has been initialized. If any rendering error occurs, the test execution will be stopped. +3. If there is any [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instance in the `commands` class attribute, [render()](../api/models.md#anta.models.AntaTest.render) will be called for every occurrence. At this moment, the `instance_commands` attribute has been initialized. If any rendering error occurs, the test execution will be stopped. 4. The `AntaTest.anta_test` decorator will collect the commands from the device and update the `instance_commands` attribute with the outputs. If any collection error occurs, the test execution will be stopped. -5. The [test()](../api/tests/anta_test.md#anta.models.AntaTest.test) method is executed. +5. The [test()](../api/models.md#anta.models.AntaTest.test) method is executed. ## Writing an AntaTest subclass -In this section, we will go into all the details of writing an [AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) subclass. +In this section, we will go into all the details of writing an [AntaTest](../api/models.md#anta.models.AntaTest) subclass. ### Class definition -Import [anta.models.AntaTest](../api/tests/anta_test.md#anta.models.AntaTest) and define your own class. -Define the mandatory class attributes using [anta.models.AntaCommand](../api/commands.md#anta.models.AntaCommand), [anta.models.AntaTemplate](../api/commands.md#anta.models.AntaTemplate) or both. +Import [anta.models.AntaTest](../api/models.md#anta.models.AntaTest) and define your own class. +Define the mandatory class attributes using [anta.models.AntaCommand](../api/models.md#anta.models.AntaCommand), [anta.models.AntaTemplate](../api/models.md#anta.models.AntaTemplate) or both. > [!NOTE] > Caching can be disabled per `AntaCommand` or `AntaTemplate` by setting the `use_cache` argument to `False`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md). @@ -244,7 +244,7 @@ class (AntaTest): ``` To define an input field type, refer to the [pydantic documentation](https://docs.pydantic.dev/latest/usage/types/types/) about types. -You can also leverage [anta.custom_types](../api/tests/types.md) that provides reusable types defined in ANTA tests. +You can also leverage [anta.custom_types](../api/types.md) that provides reusable types defined in ANTA tests. Regarding required, optional and nullable fields, refer to this [documentation](https://docs.pydantic.dev/latest/migration/#required-optional-and-nullable-fields) on how to define them. @@ -253,7 +253,7 @@ Regarding required, optional and nullable fields, refer to this [documentation]( ### Template rendering -Define the `render()` method if you have [AntaTemplate](../api/commands.md#anta.models.AntaTemplate) instances in your `commands` class attribute: +Define the `render()` method if you have [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances in your `commands` class attribute: ```python class (AntaTest): @@ -262,7 +262,7 @@ class (AntaTest): return [template.render(