1
0
Fork 0

Adding upstream version 1.1.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-05 11:54:23 +01:00
parent f13b7abbd8
commit 77504588ab
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
196 changed files with 10121 additions and 3780 deletions

View file

@ -30,7 +30,7 @@ class SafeDumper(yaml.SafeDumper):
https://github.com/yaml/pyyaml/issues/234#issuecomment-765894586. https://github.com/yaml/pyyaml/issues/234#issuecomment-765894586.
""" """
# pylint: disable=R0901,W0613,W1113 # pylint: disable=R0901
def increase_indent(self, flow=False, *args, **kwargs): def increase_indent(self, flow=False, *args, **kwargs):
return super().increase_indent(flow=flow, indentless=False) return super().increase_indent(flow=flow, indentless=False)

98
.github/markdownlint.yaml vendored Normal file
View file

@ -0,0 +1,98 @@
# markdownlint configuration
# the definitive list of rules for markdownlint can be found:
# https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md
#
# only deviations from the defaults are noted here or where there's an opinion
# being expressed.
# default state for all rules
default:
true
# heading style
MD003:
style: "atx"
# unordered list style
MD004:
style: "dash"
# unorderd list indentation (2-spaces)
# keep it tight yo!
MD007:
indent: 2
# line length
MD013:
false
# a lot of debate whether to wrap or not wrap
# multiple headings with the same content
# siblings_only is set here to allow for common header values in structured
# documents
MD024:
siblings_only: true
# Multiple top-level headings in the same document
MD025:
front_matter_title: ""
# MD029/ol-prefix - Ordered list item prefix
MD029:
# List style
style: "ordered"
# fenced code should be surrounded by blank lines default: true
MD031:
true
# lists should be surrounded by blank lines default: true
MD032:
true
# MD033/no-inline-html - Inline HTML
MD033:
false
# bare URL - bare URLs should be wrapped in angle brackets
# <https://eos.arista.com>
MD034:
false
# horizontal rule style default: consistent
MD035:
style: "---"
# first line in a file to be a top-level heading
# since we're using front-matter, this
MD041:
false
# proper-names - proper names to have the correct capitalization
# probably not entirely helpful in a technical writing environment.
MD044:
false
# block style - disabled to allow for admonitions
MD046:
false
# MD048/code-fence-style - Code fence style
MD048:
# Code fence style
style: "backtick"
# MD049/Emphasis style should be consistent
MD049:
# Emphasis style should be consistent
style: "asterisk"
# MD050/Strong style should be consistent
MD050:
# Strong style should be consistent
style: "asterisk"
# MD037/no-space-in-emphasis - Spaces inside emphasis markers
# This incorrectly catches stars used in table contents, so *foo | *bar is triggered to remove the space between | and *bar.
MD037:
false

0
.github/markdownlintignore vendored Normal file
View file

View file

@ -122,7 +122,7 @@ jobs:
test-documentation: test-documentation:
name: Build offline documentation for testing name: Build offline documentation for testing
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
needs: [lint-python, type-python, test-python] needs: [test-python]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Setup Python - name: Setup Python
@ -133,3 +133,20 @@ jobs:
run: pip install .[doc] run: pip install .[doc]
- name: "Build mkdocs documentation offline" - name: "Build mkdocs documentation offline"
run: mkdocs build run: mkdocs build
benchmarks:
name: Benchmark ANTA for Python 3.12
runs-on: ubuntu-latest
needs: [test-python]
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install dependencies
run: pip install .[dev]
- name: Run benchmarks
uses: CodSpeedHQ/action@v3
with:
token: ${{ secrets.CODSPEED_TOKEN }}
run: pytest --codspeed --no-cov --log-cli-level INFO tests/benchmark

22
.github/workflows/codspeed.yml vendored Normal file
View file

@ -0,0 +1,22 @@
---
name: Run benchmarks manually
on:
workflow_dispatch:
jobs:
benchmarks:
name: Benchmark ANTA for Python 3.12
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install dependencies
run: pip install .[dev]
- name: Run benchmarks
uses: CodSpeedHQ/action@v3
with:
token: ${{ secrets.CODSPEED_TOKEN }}
run: pytest --codspeed --no-cov --log-cli-level INFO tests/benchmark

View file

@ -39,7 +39,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push - name: Build and push
uses: docker/build-push-action@v5 uses: docker/build-push-action@v6
with: with:
context: . context: .
file: Dockerfile file: Dockerfile

View file

@ -13,7 +13,7 @@ jobs:
# https://github.com/marketplace/actions/auto-author-assign # https://github.com/marketplace/actions/auto-author-assign
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: toshimaru/auto-author-assign@v2.1.0 - uses: toshimaru/auto-author-assign@v2.1.1
with: with:
repo-token: "${{ secrets.GITHUB_TOKEN }}" repo-token: "${{ secrets.GITHUB_TOKEN }}"
@ -22,7 +22,7 @@ jobs:
steps: steps:
# Please look up the latest version from # Please look up the latest version from
# https://github.com/amannn/action-semantic-pull-request/releases # https://github.com/amannn/action-semantic-pull-request/releases
- uses: amannn/action-semantic-pull-request@v5.5.2 - uses: amannn/action-semantic-pull-request@v5.5.3
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:

View file

@ -100,7 +100,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push - name: Build and push
uses: docker/build-push-action@v5 uses: docker/build-push-action@v6
with: with:
context: . context: .
file: Dockerfile file: Dockerfile

View file

@ -10,21 +10,6 @@ jobs:
scan_secret: scan_secret:
name: Scan incoming changes name: Scan incoming changes
runs-on: ubuntu-latest runs-on: ubuntu-latest
container:
image: ghcr.io/aristanetworks/secret-scanner-service:main
options: --name sss-scanner
steps: steps:
- name: Checkout ${{ github.ref }}
# Hitting https://github.com/actions/checkout/issues/334 so trying v1
uses: actions/checkout@v1
with:
fetch-depth: 0
- name: Run scanner - name: Run scanner
run: | uses: aristanetworks/secret-scanner-service-public@main
git config --global --add safe.directory $GITHUB_WORKSPACE
scanner commit . github ${{ github.repository }} \
--markdown-file job_summary.md \
${{ github.event_name == 'pull_request' && format('--since-commit {0}', github.event.pull_request.base.sha) || ''}}
- name: Write result to summary
run: cat ./job_summary.md >> $GITHUB_STEP_SUMMARY
if: ${{ always() }}

44
.github/workflows/sonar.yml vendored Normal file
View file

@ -0,0 +1,44 @@
---
name: Analysis with Sonarlint and publish to SonarCloud
on:
push:
branches:
- main
# Need to do this to be able to have coverage on PR across forks.
pull_request_target:
# TODO this can be made better by running only coverage, it happens that today
# in tox gh-actions we have configured 3.11 to run the report side in
# pyproject.toml
jobs:
sonarcloud:
name: Run Sonarlint analysis and upload to SonarCloud.
if: github.repository == 'aristanetworks/anta'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Install dependencies
run: pip install tox tox-gh-actions
- name: "Run pytest via tox for ${{ matrix.python }}"
run: tox
- name: SonarCloud Scan
uses: SonarSource/sonarcloud-github-action@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
with:
# Using ACTION_STEP_DEBUG to trigger verbose when debugging in Github Action
args: >
-Dsonar.scm.revision=${{ github.event.pull_request.head.sha }}
-Dsonar.pullrequest.key=${{ github.event.number }}
-Dsonar.pullrequest.branch=${{ github.event.pull_request.head.ref }}
-Dsonar.pullrequest.base=${{ github.event.pull_request.base.ref }}
-Dsonar.verbose=${{ secrets.ACTIONS_STEP_DEBUG }}

20
.gitignore vendored
View file

@ -1,8 +1,10 @@
__pycache__ __pycache__
*.pyc *.pyc
.pages .pages
.coverage
.pytest_cache .pytest_cache
.mypy_cache
.ruff_cache
.cache
build build
dist dist
*.egg-info *.egg-info
@ -46,14 +48,13 @@ htmlcov/
.tox/ .tox/
.nox/ .nox/
.coverage .coverage
coverage_html_report
.coverage.* .coverage.*
.cache
nosetests.xml nosetests.xml
coverage.xml coverage.xml
*.cover *.cover
*.py,cover *.py,cover
.hypothesis/ .hypothesis/
.pytest_cache/
cover/ cover/
report.html report.html
@ -98,16 +99,3 @@ venv.bak/
# VScode settings # VScode settings
.vscode .vscode
test.env
tech-support/
tech-support/*
2*
**/report.html
.*report.html
# direnv file
.envrc
clab-atd-anta/*
clab-atd-anta/

View file

@ -1,11 +1,14 @@
--- ---
# See https://pre-commit.com for more information # See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks # See https://pre-commit.com/hooks.html for more hooks
ci:
autoupdate_commit_msg: "ci: pre-commit autoupdate"
files: ^(anta|docs|scripts|tests|asynceapi)/ files: ^(anta|docs|scripts|tests|asynceapi)/
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0 rev: v5.0.0
hooks: hooks:
- id: trailing-whitespace - id: trailing-whitespace
exclude: docs/.*.svg exclude: docs/.*.svg
@ -32,7 +35,7 @@ repos:
- name: Check and insert license on Markdown files - name: Check and insert license on Markdown files
id: insert-license id: insert-license
files: .*\.md$ files: .*\.md$
# exclude: exclude: ^tests/data/.*\.md$
args: args:
- --license-filepath - --license-filepath
- .github/license-short.txt - .github/license-short.txt
@ -43,7 +46,7 @@ repos:
- '<!--| ~| -->' - '<!--| ~| -->'
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.4.8 rev: v0.6.9
hooks: hooks:
- id: ruff - id: ruff
name: Run Ruff linter name: Run Ruff linter
@ -52,7 +55,7 @@ repos:
name: Run Ruff formatter name: Run Ruff formatter
- repo: https://github.com/pycqa/pylint - repo: https://github.com/pycqa/pylint
rev: "v3.2.3" rev: "v3.3.1"
hooks: hooks:
- id: pylint - id: pylint
name: Check code style with pylint name: Check code style with pylint
@ -69,6 +72,8 @@ repos:
- types-pyOpenSSL - types-pyOpenSSL
- pylint_pydantic - pylint_pydantic
- pytest - pytest
- pytest-codspeed
- respx
- repo: https://github.com/codespell-project/codespell - repo: https://github.com/codespell-project/codespell
rev: v2.3.0 rev: v2.3.0
@ -80,7 +85,7 @@ repos:
types: [text] types: [text]
- repo: https://github.com/pre-commit/mirrors-mypy - repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.10.0 rev: v1.11.2
hooks: hooks:
- id: mypy - id: mypy
name: Check typing with mypy name: Check typing with mypy
@ -93,3 +98,13 @@ repos:
- types-pyOpenSSL - types-pyOpenSSL
- pytest - pytest
files: ^(anta|tests)/ files: ^(anta|tests)/
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.42.0
hooks:
- id: markdownlint
name: Check Markdown files style.
args:
- --config=.github/markdownlint.yaml
- --ignore-path=.github/markdownlintignore
- --fix

25
.vscode/settings.json vendored
View file

@ -1,29 +1,14 @@
{ {
"ruff.enable": true, "ruff.enable": true,
"python.testing.unittestEnabled": false, "ruff.configuration": "pyproject.toml",
"python.testing.pytestEnabled": true, "python.testing.pytestEnabled": true,
"pylint.importStrategy": "fromEnvironment",
"pylint.severity": {
"refactor": "Warning"
},
"pylint.args": [
"--load-plugins",
"pylint_pydantic",
"--rcfile=pyproject.toml"
],
"python.testing.pytestArgs": [ "python.testing.pytestArgs": [
"tests" "tests"
], ],
"autoDocstring.docstringFormat": "numpy",
"autoDocstring.includeName": false,
"autoDocstring.includeExtendedSummary": true,
"autoDocstring.startOnNewLine": true,
"autoDocstring.guessTypes": true,
"python.languageServer": "Pylance",
"githubIssues.issueBranchTitle": "issues/${issueNumber}-${issueTitle}", "githubIssues.issueBranchTitle": "issues/${issueNumber}-${issueTitle}",
"editor.formatOnPaste": true, "pylint.importStrategy": "fromEnvironment",
"files.trimTrailingWhitespace": true, "pylint.args": [
"mypy.configFile": "pyproject.toml", "--rcfile=pyproject.toml"
"workbench.remoteIndicator.showExtensionRecommendations": true, ],
} }

View file

@ -20,7 +20,10 @@ __credits__ = [
__copyright__ = "Copyright 2022-2024, Arista Networks, Inc." __copyright__ = "Copyright 2022-2024, Arista Networks, Inc."
# ANTA Debug Mode environment variable # ANTA Debug Mode environment variable
__DEBUG__ = bool(os.environ.get("ANTA_DEBUG", "").lower() == "true") __DEBUG__ = os.environ.get("ANTA_DEBUG", "").lower() == "true"
if __DEBUG__:
# enable asyncio DEBUG mode when __DEBUG__ is enabled
os.environ["PYTHONASYNCIODEBUG"] = "1"
# Source: https://rich.readthedocs.io/en/stable/appendix/colors.html # Source: https://rich.readthedocs.io/en/stable/appendix/colors.html

View file

@ -10,21 +10,29 @@ import logging
import math import math
from collections import defaultdict from collections import defaultdict
from inspect import isclass from inspect import isclass
from itertools import chain
from json import load as json_load
from pathlib import Path from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, Union from typing import TYPE_CHECKING, Any, Literal, Optional, Union
from warnings import warn
import yaml
from pydantic import BaseModel, ConfigDict, RootModel, ValidationError, ValidationInfo, field_validator, model_serializer, model_validator from pydantic import BaseModel, ConfigDict, RootModel, ValidationError, ValidationInfo, field_validator, model_serializer, model_validator
from pydantic.types import ImportString from pydantic.types import ImportString
from pydantic_core import PydanticCustomError from pydantic_core import PydanticCustomError
from yaml import YAMLError, safe_load from yaml import YAMLError, safe_dump, safe_load
from anta.logger import anta_log_exception from anta.logger import anta_log_exception
from anta.models import AntaTest from anta.models import AntaTest
if TYPE_CHECKING: if TYPE_CHECKING:
import sys
from types import ModuleType from types import ModuleType
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# { <module_name> : [ { <test_class_name>: <input_as_dict_or_None> }, ... ] } # { <module_name> : [ { <test_class_name>: <input_as_dict_or_None> }, ... ] }
@ -37,8 +45,12 @@ ListAntaTestTuples = list[tuple[type[AntaTest], Optional[Union[AntaTest.Input, d
class AntaTestDefinition(BaseModel): class AntaTestDefinition(BaseModel):
"""Define a test with its associated inputs. """Define a test with its associated inputs.
test: An AntaTest concrete subclass Attributes
inputs: The associated AntaTest.Input subclass instance ----------
test
An AntaTest concrete subclass.
inputs
The associated AntaTest.Input subclass instance.
""" """
model_config = ConfigDict(frozen=True) model_config = ConfigDict(frozen=True)
@ -58,6 +70,7 @@ class AntaTestDefinition(BaseModel):
Returns Returns
------- -------
dict
A dictionary representing the model. A dictionary representing the model.
""" """
return {self.test.__name__: self.inputs} return {self.test.__name__: self.inputs}
@ -116,7 +129,7 @@ class AntaTestDefinition(BaseModel):
raise ValueError(msg) raise ValueError(msg)
@model_validator(mode="after") @model_validator(mode="after")
def check_inputs(self) -> AntaTestDefinition: def check_inputs(self) -> Self:
"""Check the `inputs` field typing. """Check the `inputs` field typing.
The `inputs` class attribute needs to be an instance of the AntaTest.Input subclass defined in the class `test`. The `inputs` class attribute needs to be an instance of the AntaTest.Input subclass defined in the class `test`.
@ -130,7 +143,7 @@ class AntaTestDefinition(BaseModel):
class AntaCatalogFile(RootModel[dict[ImportString[Any], list[AntaTestDefinition]]]): # pylint: disable=too-few-public-methods class AntaCatalogFile(RootModel[dict[ImportString[Any], list[AntaTestDefinition]]]): # pylint: disable=too-few-public-methods
"""Represents an ANTA Test Catalog File. """Represents an ANTA Test Catalog File.
Example: Example
------- -------
A valid test catalog file must have the following structure: A valid test catalog file must have the following structure:
``` ```
@ -147,7 +160,7 @@ class AntaCatalogFile(RootModel[dict[ImportString[Any], list[AntaTestDefinition]
def flatten_modules(data: dict[str, Any], package: str | None = None) -> dict[ModuleType, list[Any]]: def flatten_modules(data: dict[str, Any], package: str | None = None) -> dict[ModuleType, list[Any]]:
"""Allow the user to provide a data structure with nested Python modules. """Allow the user to provide a data structure with nested Python modules.
Example: Example
------- -------
``` ```
anta.tests.routing: anta.tests.routing:
@ -166,7 +179,7 @@ class AntaCatalogFile(RootModel[dict[ImportString[Any], list[AntaTestDefinition]
module_name = f".{module_name}" # noqa: PLW2901 module_name = f".{module_name}" # noqa: PLW2901
try: try:
module: ModuleType = importlib.import_module(name=module_name, package=package) module: ModuleType = importlib.import_module(name=module_name, package=package)
except Exception as e: # pylint: disable=broad-exception-caught except Exception as e:
# A test module is potentially user-defined code. # A test module is potentially user-defined code.
# We need to catch everything if we want to have meaningful logs # We need to catch everything if we want to have meaningful logs
module_str = f"{module_name[1:] if module_name.startswith('.') else module_name}{f' from package {package}' if package else ''}" module_str = f"{module_name[1:] if module_name.startswith('.') else module_name}{f' from package {package}' if package else ''}"
@ -232,13 +245,24 @@ class AntaCatalogFile(RootModel[dict[ImportString[Any], list[AntaTestDefinition]
Returns Returns
------- -------
str
The YAML representation string of this model. The YAML representation string of this model.
""" """
# TODO: Pydantic and YAML serialization/deserialization is not supported natively. # TODO: Pydantic and YAML serialization/deserialization is not supported natively.
# This could be improved. # This could be improved.
# https://github.com/pydantic/pydantic/issues/1043 # https://github.com/pydantic/pydantic/issues/1043
# Explore if this worth using this: https://github.com/NowanIlfideme/pydantic-yaml # Explore if this worth using this: https://github.com/NowanIlfideme/pydantic-yaml
return yaml.safe_dump(yaml.safe_load(self.model_dump_json(serialize_as_any=True, exclude_unset=True)), indent=2, width=math.inf) return safe_dump(safe_load(self.model_dump_json(serialize_as_any=True, exclude_unset=True)), indent=2, width=math.inf)
def to_json(self) -> str:
"""Return a JSON representation string of this model.
Returns
-------
str
The JSON representation string of this model.
"""
return self.model_dump_json(serialize_as_any=True, exclude_unset=True, indent=2)
class AntaCatalog: class AntaCatalog:
@ -254,10 +278,12 @@ class AntaCatalog:
) -> None: ) -> None:
"""Instantiate an AntaCatalog instance. """Instantiate an AntaCatalog instance.
Args: Parameters
---- ----------
tests: A list of AntaTestDefinition instances. tests
filename: The path from which the catalog is loaded. A list of AntaTestDefinition instances.
filename
The path from which the catalog is loaded.
""" """
self._tests: list[AntaTestDefinition] = [] self._tests: list[AntaTestDefinition] = []
@ -270,11 +296,14 @@ class AntaCatalog:
else: else:
self._filename = Path(filename) self._filename = Path(filename)
# Default indexes for faster access self.indexes_built: bool
self.tag_to_tests: defaultdict[str | None, set[AntaTestDefinition]] = defaultdict(set) self.tag_to_tests: defaultdict[str | None, set[AntaTestDefinition]]
self.tests_without_tags: set[AntaTestDefinition] = set() self._init_indexes()
self.indexes_built: bool = False
self.final_tests_count: int = 0 def _init_indexes(self) -> None:
"""Init indexes related variables."""
self.tag_to_tests = defaultdict(set)
self.indexes_built = False
@property @property
def filename(self) -> Path | None: def filename(self) -> Path | None:
@ -298,19 +327,30 @@ class AntaCatalog:
self._tests = value self._tests = value
@staticmethod @staticmethod
def parse(filename: str | Path) -> AntaCatalog: def parse(filename: str | Path, file_format: Literal["yaml", "json"] = "yaml") -> AntaCatalog:
"""Create an AntaCatalog instance from a test catalog file. """Create an AntaCatalog instance from a test catalog file.
Args: Parameters
---- ----------
filename: Path to test catalog YAML file filename
Path to test catalog YAML or JSON file.
file_format
Format of the file, either 'yaml' or 'json'.
Returns
-------
AntaCatalog
An AntaCatalog populated with the file content.
""" """
if file_format not in ["yaml", "json"]:
message = f"'{file_format}' is not a valid format for an AntaCatalog file. Only 'yaml' and 'json' are supported."
raise ValueError(message)
try: try:
file: Path = filename if isinstance(filename, Path) else Path(filename) file: Path = filename if isinstance(filename, Path) else Path(filename)
with file.open(encoding="UTF-8") as f: with file.open(encoding="UTF-8") as f:
data = safe_load(f) data = safe_load(f) if file_format == "yaml" else json_load(f)
except (TypeError, YAMLError, OSError) as e: except (TypeError, YAMLError, OSError, ValueError) as e:
message = f"Unable to parse ANTA Test Catalog file '{filename}'" message = f"Unable to parse ANTA Test Catalog file '{filename}'"
anta_log_exception(e, message, logger) anta_log_exception(e, message, logger)
raise raise
@ -325,11 +365,17 @@ class AntaCatalog:
It is the data structure returned by `yaml.load()` function of a valid It is the data structure returned by `yaml.load()` function of a valid
YAML Test Catalog file. YAML Test Catalog file.
Args: Parameters
---- ----------
data: Python dictionary used to instantiate the AntaCatalog instance data
filename: value to be set as AntaCatalog instance attribute Python dictionary used to instantiate the AntaCatalog instance.
filename
value to be set as AntaCatalog instance attribute
Returns
-------
AntaCatalog
An AntaCatalog populated with the 'data' dictionary content.
""" """
tests: list[AntaTestDefinition] = [] tests: list[AntaTestDefinition] = []
if data is None: if data is None:
@ -359,10 +405,15 @@ class AntaCatalog:
See ListAntaTestTuples type alias for details. See ListAntaTestTuples type alias for details.
Args: Parameters
---- ----------
data: Python list used to instantiate the AntaCatalog instance data
Python list used to instantiate the AntaCatalog instance.
Returns
-------
AntaCatalog
An AntaCatalog populated with the 'data' list content.
""" """
tests: list[AntaTestDefinition] = [] tests: list[AntaTestDefinition] = []
try: try:
@ -372,24 +423,54 @@ class AntaCatalog:
raise raise
return AntaCatalog(tests) return AntaCatalog(tests)
def merge(self, catalog: AntaCatalog) -> AntaCatalog: @classmethod
"""Merge two AntaCatalog instances. def merge_catalogs(cls, catalogs: list[AntaCatalog]) -> AntaCatalog:
"""Merge multiple AntaCatalog instances.
Args: Parameters
---- ----------
catalog: AntaCatalog instance to merge to this instance. catalogs
A list of AntaCatalog instances to merge.
Returns Returns
------- -------
AntaCatalog
A new AntaCatalog instance containing the tests of all the input catalogs.
"""
combined_tests = list(chain(*(catalog.tests for catalog in catalogs)))
return cls(tests=combined_tests)
def merge(self, catalog: AntaCatalog) -> AntaCatalog:
"""Merge two AntaCatalog instances.
Warning
-------
This method is deprecated and will be removed in ANTA v2.0. Use `AntaCatalog.merge_catalogs()` instead.
Parameters
----------
catalog
AntaCatalog instance to merge to this instance.
Returns
-------
AntaCatalog
A new AntaCatalog instance containing the tests of the two instances. A new AntaCatalog instance containing the tests of the two instances.
""" """
return AntaCatalog(tests=self.tests + catalog.tests) # TODO: Use a decorator to deprecate this method instead. See https://github.com/aristanetworks/anta/issues/754
warn(
message="AntaCatalog.merge() is deprecated and will be removed in ANTA v2.0. Use AntaCatalog.merge_catalogs() instead.",
category=DeprecationWarning,
stacklevel=2,
)
return self.merge_catalogs([self, catalog])
def dump(self) -> AntaCatalogFile: def dump(self) -> AntaCatalogFile:
"""Return an AntaCatalogFile instance from this AntaCatalog instance. """Return an AntaCatalogFile instance from this AntaCatalog instance.
Returns Returns
------- -------
AntaCatalogFile
An AntaCatalogFile instance containing tests of this AntaCatalog instance. An AntaCatalogFile instance containing tests of this AntaCatalog instance.
""" """
root: dict[ImportString[Any], list[AntaTestDefinition]] = {} root: dict[ImportString[Any], list[AntaTestDefinition]] = {}
@ -403,9 +484,7 @@ class AntaCatalog:
If a `filtered_tests` set is provided, only the tests in this set will be indexed. If a `filtered_tests` set is provided, only the tests in this set will be indexed.
This method populates two attributes: This method populates the tag_to_tests attribute, which is a dictionary mapping tags to sets of tests.
- tag_to_tests: A dictionary mapping each tag to a set of tests that contain it.
- tests_without_tags: A set of tests that do not have any tags.
Once the indexes are built, the `indexes_built` attribute is set to True. Once the indexes are built, the `indexes_built` attribute is set to True.
""" """
@ -419,27 +498,34 @@ class AntaCatalog:
for tag in test_tags: for tag in test_tags:
self.tag_to_tests[tag].add(test) self.tag_to_tests[tag].add(test)
else: else:
self.tests_without_tags.add(test) self.tag_to_tests[None].add(test)
self.tag_to_tests[None] = self.tests_without_tags
self.indexes_built = True self.indexes_built = True
def clear_indexes(self) -> None:
"""Clear this AntaCatalog instance indexes."""
self._init_indexes()
def get_tests_by_tags(self, tags: set[str], *, strict: bool = False) -> set[AntaTestDefinition]: def get_tests_by_tags(self, tags: set[str], *, strict: bool = False) -> set[AntaTestDefinition]:
"""Return all tests that match a given set of tags, according to the specified strictness. """Return all tests that match a given set of tags, according to the specified strictness.
Args: Parameters
---- ----------
tags: The tags to filter tests by. If empty, return all tests without tags. tags
strict: If True, returns only tests that contain all specified tags (intersection). The tags to filter tests by. If empty, return all tests without tags.
strict
If True, returns only tests that contain all specified tags (intersection).
If False, returns tests that contain any of the specified tags (union). If False, returns tests that contain any of the specified tags (union).
Returns Returns
------- -------
set[AntaTestDefinition]: A set of tests that match the given tags. set[AntaTestDefinition]
A set of tests that match the given tags.
Raises Raises
------ ------
ValueError: If the indexes have not been built prior to method call. ValueError
If the indexes have not been built prior to method call.
""" """
if not self.indexes_built: if not self.indexes_built:
msg = "Indexes have not been built yet. Call build_indexes() first." msg = "Indexes have not been built yet. Call build_indexes() first."

View file

@ -25,7 +25,8 @@ logger = logging.getLogger(__name__)
@click.group(cls=AliasedGroup) @click.group(cls=AliasedGroup)
@click.pass_context @click.pass_context
@click.version_option(__version__) @click.help_option(allow_from_autoenv=False)
@click.version_option(__version__, allow_from_autoenv=False)
@click.option( @click.option(
"--log-file", "--log-file",
help="Send the logs to a file. If logging level is DEBUG, only INFO or higher will be sent to stdout.", help="Send the logs to a file. If logging level is DEBUG, only INFO or higher will be sent to stdout.",
@ -61,7 +62,7 @@ def cli() -> None:
"""Entrypoint for pyproject.toml.""" """Entrypoint for pyproject.toml."""
try: try:
anta(obj={}, auto_envvar_prefix="ANTA") anta(obj={}, auto_envvar_prefix="ANTA")
except Exception as exc: # pylint: disable=broad-exception-caught except Exception as exc: # noqa: BLE001
anta_log_exception( anta_log_exception(
exc, exc,
f"Uncaught Exception when running ANTA CLI\n{GITHUB_SUGGESTION}", f"Uncaught Exception when running ANTA CLI\n{GITHUB_SUGGESTION}",

View file

@ -35,7 +35,6 @@ def run_cmd(
version: Literal["1", "latest"], version: Literal["1", "latest"],
revision: int, revision: int,
) -> None: ) -> None:
# pylint: disable=too-many-arguments
"""Run arbitrary command to an ANTA device.""" """Run arbitrary command to an ANTA device."""
console.print(f"Run command [green]{command}[/green] on [red]{device.name}[/red]") console.print(f"Run command [green]{command}[/green] on [red]{device.name}[/red]")
# I do not assume the following line, but click make me do it # I do not assume the following line, but click make me do it
@ -71,12 +70,14 @@ def run_template(
version: Literal["1", "latest"], version: Literal["1", "latest"],
revision: int, revision: int,
) -> None: ) -> None:
# pylint: disable=too-many-arguments # Using \b for click
# ruff: noqa: D301
"""Run arbitrary templated command to an ANTA device. """Run arbitrary templated command to an ANTA device.
Takes a list of arguments (keys followed by a value) to build a dictionary used as template parameters. Takes a list of arguments (keys followed by a value) to build a dictionary used as template parameters.
Example: \b
Example
------- -------
anta debug run-template -d leaf1a -t 'show vlan {vlan_id}' vlan_id 1 anta debug run-template -d leaf1a -t 'show vlan {vlan_id}' vlan_id 1

View file

@ -11,7 +11,7 @@ from typing import TYPE_CHECKING, Any, Callable
import click import click
from anta.cli.utils import ExitCode, inventory_options from anta.cli.utils import ExitCode, core_options
if TYPE_CHECKING: if TYPE_CHECKING:
from anta.inventory import AntaInventory from anta.inventory import AntaInventory
@ -22,7 +22,7 @@ logger = logging.getLogger(__name__)
def debug_options(f: Callable[..., Any]) -> Callable[..., Any]: def debug_options(f: Callable[..., Any]) -> Callable[..., Any]:
"""Click common options required to execute a command on a specific device.""" """Click common options required to execute a command on a specific device."""
@inventory_options @core_options
@click.option( @click.option(
"--ofmt", "--ofmt",
type=click.Choice(["json", "text"]), type=click.Choice(["json", "text"]),
@ -44,12 +44,10 @@ def debug_options(f: Callable[..., Any]) -> Callable[..., Any]:
ctx: click.Context, ctx: click.Context,
*args: tuple[Any], *args: tuple[Any],
inventory: AntaInventory, inventory: AntaInventory,
tags: set[str] | None,
device: str, device: str,
**kwargs: Any, **kwargs: Any,
) -> Any: ) -> Any:
# TODO: @gmuloc - tags come from context https://github.com/aristanetworks/anta/issues/584 # TODO: @gmuloc - tags come from context https://github.com/aristanetworks/anta/issues/584
# pylint: disable=unused-argument
# ruff: noqa: ARG001 # ruff: noqa: ARG001
if (d := inventory.get(device)) is None: if (d := inventory.get(device)) is None:
logger.error("Device '%s' does not exist in Inventory", device) logger.error("Device '%s' does not exist in Inventory", device)

View file

@ -9,7 +9,7 @@ from anta.cli.exec import commands
@click.group("exec") @click.group("exec")
def _exec() -> None: # pylint: disable=redefined-builtin def _exec() -> None:
"""Commands to execute various scripts on EOS devices.""" """Commands to execute various scripts on EOS devices."""

View file

@ -10,16 +10,15 @@ import asyncio
import itertools import itertools
import json import json
import logging import logging
import re
from pathlib import Path from pathlib import Path
from typing import TYPE_CHECKING, Literal from typing import TYPE_CHECKING, Literal
from click.exceptions import UsageError from click.exceptions import UsageError
from httpx import ConnectError, HTTPError from httpx import ConnectError, HTTPError
from anta.custom_types import REGEXP_PATH_MARKERS
from anta.device import AntaDevice, AsyncEOSDevice from anta.device import AntaDevice, AsyncEOSDevice
from anta.models import AntaCommand from anta.models import AntaCommand
from anta.tools import safe_command
from asynceapi import EapiCommandError from asynceapi import EapiCommandError
if TYPE_CHECKING: if TYPE_CHECKING:
@ -52,7 +51,7 @@ async def clear_counters(anta_inventory: AntaInventory, tags: set[str] | None =
async def collect_commands( async def collect_commands(
inv: AntaInventory, inv: AntaInventory,
commands: dict[str, str], commands: dict[str, list[str]],
root_dir: Path, root_dir: Path,
tags: set[str] | None = None, tags: set[str] | None = None,
) -> None: ) -> None:
@ -61,17 +60,16 @@ async def collect_commands(
async def collect(dev: AntaDevice, command: str, outformat: Literal["json", "text"]) -> None: async def collect(dev: AntaDevice, command: str, outformat: Literal["json", "text"]) -> None:
outdir = Path() / root_dir / dev.name / outformat outdir = Path() / root_dir / dev.name / outformat
outdir.mkdir(parents=True, exist_ok=True) outdir.mkdir(parents=True, exist_ok=True)
safe_command = re.sub(rf"{REGEXP_PATH_MARKERS}", "_", command)
c = AntaCommand(command=command, ofmt=outformat) c = AntaCommand(command=command, ofmt=outformat)
await dev.collect(c) await dev.collect(c)
if not c.collected: if not c.collected:
logger.error("Could not collect commands on device %s: %s", dev.name, c.errors) logger.error("Could not collect commands on device %s: %s", dev.name, c.errors)
return return
if c.ofmt == "json": if c.ofmt == "json":
outfile = outdir / f"{safe_command}.json" outfile = outdir / f"{safe_command(command)}.json"
content = json.dumps(c.json_output, indent=2) content = json.dumps(c.json_output, indent=2)
elif c.ofmt == "text": elif c.ofmt == "text":
outfile = outdir / f"{safe_command}.log" outfile = outdir / f"{safe_command(command)}.log"
content = c.text_output content = c.text_output
else: else:
logger.error("Command outformat is not in ['json', 'text'] for command '%s'", command) logger.error("Command outformat is not in ['json', 'text'] for command '%s'", command)
@ -83,6 +81,9 @@ async def collect_commands(
logger.info("Connecting to devices...") logger.info("Connecting to devices...")
await inv.connect_inventory() await inv.connect_inventory()
devices = inv.get_inventory(established_only=True, tags=tags).devices devices = inv.get_inventory(established_only=True, tags=tags).devices
if not devices:
logger.info("No online device found. Exiting")
return
logger.info("Collecting commands from remote devices") logger.info("Collecting commands from remote devices")
coros = [] coros = []
if "json_format" in commands: if "json_format" in commands:
@ -134,8 +135,8 @@ async def collect_show_tech(inv: AntaInventory, root_dir: Path, *, configure: bo
if not isinstance(device, AsyncEOSDevice): if not isinstance(device, AsyncEOSDevice):
msg = "anta exec collect-tech-support is only supported with AsyncEOSDevice for now." msg = "anta exec collect-tech-support is only supported with AsyncEOSDevice for now."
raise UsageError(msg) raise UsageError(msg)
if device.enable and device._enable_password is not None: # pylint: disable=protected-access if device.enable and device._enable_password is not None:
commands.append({"cmd": "enable", "input": device._enable_password}) # pylint: disable=protected-access commands.append({"cmd": "enable", "input": device._enable_password})
elif device.enable: elif device.enable:
commands.append({"cmd": "enable"}) commands.append({"cmd": "enable"})
commands.extend( commands.extend(
@ -146,7 +147,7 @@ async def collect_show_tech(inv: AntaInventory, root_dir: Path, *, configure: bo
) )
logger.warning("Configuring 'aaa authorization exec default local' on device %s", device.name) logger.warning("Configuring 'aaa authorization exec default local' on device %s", device.name)
command = AntaCommand(command="show running-config | include aaa authorization exec default local", ofmt="text") command = AntaCommand(command="show running-config | include aaa authorization exec default local", ofmt="text")
await device._session.cli(commands=commands) # pylint: disable=protected-access await device._session.cli(commands=commands)
logger.info("Configured 'aaa authorization exec default local' on device %s", device.name) logger.info("Configured 'aaa authorization exec default local' on device %s", device.name)
logger.debug("'aaa authorization exec default local' is already configured on device %s", device.name) logger.debug("'aaa authorization exec default local' is already configured on device %s", device.name)

View file

@ -45,7 +45,6 @@ logger = logging.getLogger(__name__)
default=False, default=False,
) )
def from_cvp(ctx: click.Context, output: Path, host: str, username: str, password: str, container: str | None, *, ignore_cert: bool) -> None: def from_cvp(ctx: click.Context, output: Path, host: str, username: str, password: str, container: str | None, *, ignore_cert: bool) -> None:
# pylint: disable=too-many-arguments
"""Build ANTA inventory from CloudVision. """Build ANTA inventory from CloudVision.
NOTE: Only username/password authentication is supported for on-premises CloudVision instances. NOTE: Only username/password authentication is supported for on-premises CloudVision instances.
@ -127,7 +126,6 @@ def inventory(inventory: AntaInventory, tags: set[str] | None, *, connected: boo
@click.command @click.command
@inventory_options @inventory_options
def tags(inventory: AntaInventory, **kwargs: Any) -> None: def tags(inventory: AntaInventory, **kwargs: Any) -> None:
# pylint: disable=unused-argument
"""Get list of configured tags in user inventory.""" """Get list of configured tags in user inventory."""
tags: set[str] = set() tags: set[str] = set()
for device in inventory.values(): for device in inventory.values():

View file

@ -82,20 +82,26 @@ def get_cv_token(cvp_ip: str, cvp_username: str, cvp_password: str, *, verify_ce
TODO: need to handle requests error TODO: need to handle requests error
Args: Parameters
---- ----------
cvp_ip: IP address of CloudVision. cvp_ip
cvp_username: Username to connect to CloudVision. IP address of CloudVision.
cvp_password: Password to connect to CloudVision. cvp_username
verify_cert: Enable or disable certificate verification when connecting to CloudVision. Username to connect to CloudVision.
cvp_password
Password to connect to CloudVision.
verify_cert
Enable or disable certificate verification when connecting to CloudVision.
Returns Returns
------- -------
token(str): The token to use in further API calls to CloudVision. str
The token to use in further API calls to CloudVision.
Raises Raises
------ ------
requests.ssl.SSLError: If the certificate verification fails requests.ssl.SSLError
If the certificate verification fails.
""" """
# use CVP REST API to generate a token # use CVP REST API to generate a token
@ -161,11 +167,14 @@ def deep_yaml_parsing(data: dict[str, Any], hosts: list[AntaInventoryHost] | Non
def create_inventory_from_ansible(inventory: Path, output: Path, ansible_group: str = "all") -> None: def create_inventory_from_ansible(inventory: Path, output: Path, ansible_group: str = "all") -> None:
"""Create an ANTA inventory from an Ansible inventory YAML file. """Create an ANTA inventory from an Ansible inventory YAML file.
Args: Parameters
---- ----------
inventory: Ansible Inventory file to read inventory
output: ANTA inventory file to generate. Ansible Inventory file to read.
ansible_group: Ansible group from where to extract data. output
ANTA inventory file to generate.
ansible_group
Ansible group from where to extract data.
""" """
try: try:

View file

@ -5,19 +5,14 @@
from __future__ import annotations from __future__ import annotations
import asyncio from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, get_args
import click import click
from anta.cli.nrfu import commands from anta.cli.nrfu import commands
from anta.cli.utils import AliasedGroup, catalog_options, inventory_options from anta.cli.utils import AliasedGroup, catalog_options, inventory_options
from anta.custom_types import TestStatus
from anta.models import AntaTest
from anta.result_manager import ResultManager from anta.result_manager import ResultManager
from anta.runner import main from anta.result_manager.models import AntaTestStatus
from .utils import anta_progress_bar, print_settings
if TYPE_CHECKING: if TYPE_CHECKING:
from anta.catalog import AntaCatalog from anta.catalog import AntaCatalog
@ -37,6 +32,7 @@ class IgnoreRequiredWithHelp(AliasedGroup):
"""Ignore MissingParameter exception when parsing arguments if `--help` is present for a subcommand.""" """Ignore MissingParameter exception when parsing arguments if `--help` is present for a subcommand."""
# Adding a flag for potential callbacks # Adding a flag for potential callbacks
ctx.ensure_object(dict) ctx.ensure_object(dict)
ctx.obj["args"] = args
if "--help" in args: if "--help" in args:
ctx.obj["_anta_help"] = True ctx.obj["_anta_help"] = True
@ -53,7 +49,7 @@ class IgnoreRequiredWithHelp(AliasedGroup):
return super().parse_args(ctx, args) return super().parse_args(ctx, args)
HIDE_STATUS: list[str] = list(get_args(TestStatus)) HIDE_STATUS: list[str] = list(AntaTestStatus)
HIDE_STATUS.remove("unset") HIDE_STATUS.remove("unset")
@ -96,7 +92,7 @@ HIDE_STATUS.remove("unset")
default=None, default=None,
type=click.Choice(HIDE_STATUS, case_sensitive=False), type=click.Choice(HIDE_STATUS, case_sensitive=False),
multiple=True, multiple=True,
help="Group result by test or device.", help="Hide results by type: success / failure / error / skipped'.",
required=False, required=False,
) )
@click.option( @click.option(
@ -107,7 +103,6 @@ HIDE_STATUS.remove("unset")
is_flag=True, is_flag=True,
default=False, default=False,
) )
# pylint: disable=too-many-arguments
def nrfu( def nrfu(
ctx: click.Context, ctx: click.Context,
inventory: AntaInventory, inventory: AntaInventory,
@ -120,38 +115,35 @@ def nrfu(
ignore_status: bool, ignore_status: bool,
ignore_error: bool, ignore_error: bool,
dry_run: bool, dry_run: bool,
catalog_format: str = "yaml",
) -> None: ) -> None:
"""Run ANTA tests on selected inventory devices.""" """Run ANTA tests on selected inventory devices."""
# If help is invoke somewhere, skip the command # If help is invoke somewhere, skip the command
if ctx.obj.get("_anta_help"): if ctx.obj.get("_anta_help"):
return return
# We use ctx.obj to pass stuff to the next Click functions # We use ctx.obj to pass stuff to the next Click functions
ctx.ensure_object(dict) ctx.ensure_object(dict)
ctx.obj["result_manager"] = ResultManager() ctx.obj["result_manager"] = ResultManager()
ctx.obj["ignore_status"] = ignore_status ctx.obj["ignore_status"] = ignore_status
ctx.obj["ignore_error"] = ignore_error ctx.obj["ignore_error"] = ignore_error
ctx.obj["hide"] = set(hide) if hide else None ctx.obj["hide"] = set(hide) if hide else None
print_settings(inventory, catalog) ctx.obj["catalog"] = catalog
with anta_progress_bar() as AntaTest.progress: ctx.obj["catalog_format"] = catalog_format
asyncio.run( ctx.obj["inventory"] = inventory
main( ctx.obj["tags"] = tags
ctx.obj["result_manager"], ctx.obj["device"] = device
inventory, ctx.obj["test"] = test
catalog, ctx.obj["dry_run"] = dry_run
tags=tags,
devices=set(device) if device else None,
tests=set(test) if test else None,
dry_run=dry_run,
)
)
if dry_run:
return
# Invoke `anta nrfu table` if no command is passed # Invoke `anta nrfu table` if no command is passed
if ctx.invoked_subcommand is None: if not ctx.invoked_subcommand:
ctx.invoke(commands.table) ctx.invoke(commands.table)
nrfu.add_command(commands.table) nrfu.add_command(commands.table)
nrfu.add_command(commands.csv)
nrfu.add_command(commands.json) nrfu.add_command(commands.json)
nrfu.add_command(commands.text) nrfu.add_command(commands.text)
nrfu.add_command(commands.tpl_report) nrfu.add_command(commands.tpl_report)
nrfu.add_command(commands.md_report)

View file

@ -13,7 +13,7 @@ import click
from anta.cli.utils import exit_with_code from anta.cli.utils import exit_with_code
from .utils import print_jinja, print_json, print_table, print_text from .utils import print_jinja, print_json, print_table, print_text, run_tests, save_markdown_report, save_to_csv
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -27,11 +27,9 @@ logger = logging.getLogger(__name__)
help="Group result by test or device.", help="Group result by test or device.",
required=False, required=False,
) )
def table( def table(ctx: click.Context, group_by: Literal["device", "test"] | None) -> None:
ctx: click.Context, """ANTA command to check network state with table results."""
group_by: Literal["device", "test"] | None, run_tests(ctx)
) -> None:
"""ANTA command to check network states with table result."""
print_table(ctx, group_by=group_by) print_table(ctx, group_by=group_by)
exit_with_code(ctx) exit_with_code(ctx)
@ -44,10 +42,11 @@ def table(
type=click.Path(file_okay=True, dir_okay=False, exists=False, writable=True, path_type=pathlib.Path), type=click.Path(file_okay=True, dir_okay=False, exists=False, writable=True, path_type=pathlib.Path),
show_envvar=True, show_envvar=True,
required=False, required=False,
help="Path to save report as a file", help="Path to save report as a JSON file",
) )
def json(ctx: click.Context, output: pathlib.Path | None) -> None: def json(ctx: click.Context, output: pathlib.Path | None) -> None:
"""ANTA command to check network state with JSON result.""" """ANTA command to check network state with JSON results."""
run_tests(ctx)
print_json(ctx, output=output) print_json(ctx, output=output)
exit_with_code(ctx) exit_with_code(ctx)
@ -55,11 +54,34 @@ def json(ctx: click.Context, output: pathlib.Path | None) -> None:
@click.command() @click.command()
@click.pass_context @click.pass_context
def text(ctx: click.Context) -> None: def text(ctx: click.Context) -> None:
"""ANTA command to check network states with text result.""" """ANTA command to check network state with text results."""
run_tests(ctx)
print_text(ctx) print_text(ctx)
exit_with_code(ctx) exit_with_code(ctx)
@click.command()
@click.pass_context
@click.option(
"--csv-output",
type=click.Path(
file_okay=True,
dir_okay=False,
exists=False,
writable=True,
path_type=pathlib.Path,
),
show_envvar=True,
required=False,
help="Path to save report as a CSV file",
)
def csv(ctx: click.Context, csv_output: pathlib.Path) -> None:
"""ANTA command to check network states with CSV result."""
run_tests(ctx)
save_to_csv(ctx, csv_file=csv_output)
exit_with_code(ctx)
@click.command() @click.command()
@click.pass_context @click.pass_context
@click.option( @click.option(
@ -80,5 +102,22 @@ def text(ctx: click.Context) -> None:
) )
def tpl_report(ctx: click.Context, template: pathlib.Path, output: pathlib.Path | None) -> None: def tpl_report(ctx: click.Context, template: pathlib.Path, output: pathlib.Path | None) -> None:
"""ANTA command to check network state with templated report.""" """ANTA command to check network state with templated report."""
run_tests(ctx)
print_jinja(results=ctx.obj["result_manager"], template=template, output=output) print_jinja(results=ctx.obj["result_manager"], template=template, output=output)
exit_with_code(ctx) exit_with_code(ctx)
@click.command()
@click.pass_context
@click.option(
"--md-output",
type=click.Path(file_okay=True, dir_okay=False, exists=False, writable=True, path_type=pathlib.Path),
show_envvar=True,
required=True,
help="Path to save the report as a Markdown file",
)
def md_report(ctx: click.Context, md_output: pathlib.Path) -> None:
"""ANTA command to check network state with Markdown report."""
run_tests(ctx)
save_markdown_report(ctx, md_output=md_output)
exit_with_code(ctx)

View file

@ -5,6 +5,7 @@
from __future__ import annotations from __future__ import annotations
import asyncio
import json import json
import logging import logging
from typing import TYPE_CHECKING, Literal from typing import TYPE_CHECKING, Literal
@ -14,7 +15,12 @@ from rich.panel import Panel
from rich.progress import BarColumn, MofNCompleteColumn, Progress, SpinnerColumn, TextColumn, TimeElapsedColumn, TimeRemainingColumn from rich.progress import BarColumn, MofNCompleteColumn, Progress, SpinnerColumn, TextColumn, TimeElapsedColumn, TimeRemainingColumn
from anta.cli.console import console from anta.cli.console import console
from anta.cli.utils import ExitCode
from anta.models import AntaTest
from anta.reporter import ReportJinja, ReportTable from anta.reporter import ReportJinja, ReportTable
from anta.reporter.csv_reporter import ReportCsv
from anta.reporter.md_reporter import MDReportGenerator
from anta.runner import main
if TYPE_CHECKING: if TYPE_CHECKING:
import pathlib import pathlib
@ -28,6 +34,37 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def run_tests(ctx: click.Context) -> None:
"""Run the tests."""
# Digging up the parameters from the parent context
if ctx.parent is None:
ctx.exit()
nrfu_ctx_params = ctx.parent.params
tags = nrfu_ctx_params["tags"]
device = nrfu_ctx_params["device"] or None
test = nrfu_ctx_params["test"] or None
dry_run = nrfu_ctx_params["dry_run"]
catalog = ctx.obj["catalog"]
inventory = ctx.obj["inventory"]
print_settings(inventory, catalog)
with anta_progress_bar() as AntaTest.progress:
asyncio.run(
main(
ctx.obj["result_manager"],
inventory,
catalog,
tags=tags,
devices=set(device) if device else None,
tests=set(test) if test else None,
dry_run=dry_run,
)
)
if dry_run:
ctx.exit()
def _get_result_manager(ctx: click.Context) -> ResultManager: def _get_result_manager(ctx: click.Context) -> ResultManager:
"""Get a ResultManager instance based on Click context.""" """Get a ResultManager instance based on Click context."""
return ctx.obj["result_manager"].filter(ctx.obj.get("hide")) if ctx.obj.get("hide") is not None else ctx.obj["result_manager"] return ctx.obj["result_manager"].filter(ctx.obj.get("hide")) if ctx.obj.get("hide") is not None else ctx.obj["result_manager"]
@ -58,14 +95,21 @@ def print_table(ctx: click.Context, group_by: Literal["device", "test"] | None =
def print_json(ctx: click.Context, output: pathlib.Path | None = None) -> None: def print_json(ctx: click.Context, output: pathlib.Path | None = None) -> None:
"""Print result in a json format.""" """Print results as JSON. If output is provided, save to file instead."""
results = _get_result_manager(ctx) results = _get_result_manager(ctx)
if output is None:
console.print() console.print()
console.print(Panel("JSON results", style="cyan")) console.print(Panel("JSON results", style="cyan"))
rich.print_json(results.json) rich.print_json(results.json)
if output is not None: else:
with output.open(mode="w", encoding="utf-8") as fout: try:
fout.write(results.json) with output.open(mode="w", encoding="utf-8") as file:
file.write(results.json)
console.print(f"JSON results saved to {output}", style="cyan")
except OSError:
console.print(f"Failed to save JSON results to {output}", style="cyan")
ctx.exit(ExitCode.USAGE_ERROR)
def print_text(ctx: click.Context) -> None: def print_text(ctx: click.Context) -> None:
@ -88,6 +132,34 @@ def print_jinja(results: ResultManager, template: pathlib.Path, output: pathlib.
file.write(report) file.write(report)
def save_to_csv(ctx: click.Context, csv_file: pathlib.Path) -> None:
"""Save results to a CSV file."""
try:
ReportCsv.generate(results=_get_result_manager(ctx), csv_filename=csv_file)
console.print(f"CSV report saved to {csv_file}", style="cyan")
except OSError:
console.print(f"Failed to save CSV report to {csv_file}", style="cyan")
ctx.exit(ExitCode.USAGE_ERROR)
def save_markdown_report(ctx: click.Context, md_output: pathlib.Path) -> None:
"""Save the markdown report to a file.
Parameters
----------
ctx
Click context containing the result manager.
md_output
Path to save the markdown report.
"""
try:
MDReportGenerator.generate(results=_get_result_manager(ctx), md_filename=md_output)
console.print(f"Markdown report saved to {md_output}", style="cyan")
except OSError:
console.print(f"Failed to save Markdown report to {md_output}", style="cyan")
ctx.exit(ExitCode.USAGE_ERROR)
# Adding our own ANTA spinner - overriding rich SPINNERS for our own # Adding our own ANTA spinner - overriding rich SPINNERS for our own
# so ignore warning for redefinition # so ignore warning for redefinition
rich.spinner.SPINNERS = { # type: ignore[attr-defined] rich.spinner.SPINNERS = { # type: ignore[attr-defined]

View file

@ -40,7 +40,6 @@ class ExitCode(enum.IntEnum):
def parse_tags(ctx: click.Context, param: Option, value: str | None) -> set[str] | None: def parse_tags(ctx: click.Context, param: Option, value: str | None) -> set[str] | None:
# pylint: disable=unused-argument
# ruff: noqa: ARG001 # ruff: noqa: ARG001
"""Click option callback to parse an ANTA inventory tags.""" """Click option callback to parse an ANTA inventory tags."""
if value is not None: if value is not None:
@ -60,9 +59,10 @@ def exit_with_code(ctx: click.Context) -> None:
* 1 if status is `failure` * 1 if status is `failure`
* 2 if status is `error`. * 2 if status is `error`.
Args: Parameters
---- ----------
ctx: Click Context ctx
Click Context.
""" """
if ctx.obj.get("ignore_status"): if ctx.obj.get("ignore_status"):
@ -112,7 +112,7 @@ class AliasedGroup(click.Group):
return cmd.name, cmd, args return cmd.name, cmd, args
def inventory_options(f: Callable[..., Any]) -> Callable[..., Any]: def core_options(f: Callable[..., Any]) -> Callable[..., Any]:
"""Click common options when requiring an inventory to interact with devices.""" """Click common options when requiring an inventory to interact with devices."""
@click.option( @click.option(
@ -190,22 +190,12 @@ def inventory_options(f: Callable[..., Any]) -> Callable[..., Any]:
required=True, required=True,
type=click.Path(file_okay=True, dir_okay=False, exists=True, readable=True, path_type=Path), type=click.Path(file_okay=True, dir_okay=False, exists=True, readable=True, path_type=Path),
) )
@click.option(
"--tags",
help="List of tags using comma as separator: tag1,tag2,tag3.",
show_envvar=True,
envvar="ANTA_TAGS",
type=str,
required=False,
callback=parse_tags,
)
@click.pass_context @click.pass_context
@functools.wraps(f) @functools.wraps(f)
def wrapper( def wrapper(
ctx: click.Context, ctx: click.Context,
*args: tuple[Any], *args: tuple[Any],
inventory: Path, inventory: Path,
tags: set[str] | None,
username: str, username: str,
password: str | None, password: str | None,
enable_password: str | None, enable_password: str | None,
@ -216,10 +206,9 @@ def inventory_options(f: Callable[..., Any]) -> Callable[..., Any]:
disable_cache: bool, disable_cache: bool,
**kwargs: dict[str, Any], **kwargs: dict[str, Any],
) -> Any: ) -> Any:
# pylint: disable=too-many-arguments
# If help is invoke somewhere, do not parse inventory # If help is invoke somewhere, do not parse inventory
if ctx.obj.get("_anta_help"): if ctx.obj.get("_anta_help"):
return f(*args, inventory=None, tags=tags, **kwargs) return f(*args, inventory=None, **kwargs)
if prompt: if prompt:
# User asked for a password prompt # User asked for a password prompt
if password is None: if password is None:
@ -255,7 +244,36 @@ def inventory_options(f: Callable[..., Any]) -> Callable[..., Any]:
) )
except (TypeError, ValueError, YAMLError, OSError, InventoryIncorrectSchemaError, InventoryRootKeyError): except (TypeError, ValueError, YAMLError, OSError, InventoryIncorrectSchemaError, InventoryRootKeyError):
ctx.exit(ExitCode.USAGE_ERROR) ctx.exit(ExitCode.USAGE_ERROR)
return f(*args, inventory=i, tags=tags, **kwargs) return f(*args, inventory=i, **kwargs)
return wrapper
def inventory_options(f: Callable[..., Any]) -> Callable[..., Any]:
"""Click common options when requiring an inventory to interact with devices."""
@core_options
@click.option(
"--tags",
help="List of tags using comma as separator: tag1,tag2,tag3.",
show_envvar=True,
envvar="ANTA_TAGS",
type=str,
required=False,
callback=parse_tags,
)
@click.pass_context
@functools.wraps(f)
def wrapper(
ctx: click.Context,
*args: tuple[Any],
tags: set[str] | None,
**kwargs: dict[str, Any],
) -> Any:
# If help is invoke somewhere, do not parse inventory
if ctx.obj.get("_anta_help"):
return f(*args, tags=tags, **kwargs)
return f(*args, tags=tags, **kwargs)
return wrapper return wrapper
@ -268,7 +286,7 @@ def catalog_options(f: Callable[..., Any]) -> Callable[..., Any]:
"-c", "-c",
envvar="ANTA_CATALOG", envvar="ANTA_CATALOG",
show_envvar=True, show_envvar=True,
help="Path to the test catalog YAML file", help="Path to the test catalog file",
type=click.Path( type=click.Path(
file_okay=True, file_okay=True,
dir_okay=False, dir_okay=False,
@ -278,19 +296,29 @@ def catalog_options(f: Callable[..., Any]) -> Callable[..., Any]:
), ),
required=True, required=True,
) )
@click.option(
"--catalog-format",
envvar="ANTA_CATALOG_FORMAT",
show_envvar=True,
help="Format of the catalog file, either 'yaml' or 'json'",
default="yaml",
type=click.Choice(["yaml", "json"], case_sensitive=False),
)
@click.pass_context @click.pass_context
@functools.wraps(f) @functools.wraps(f)
def wrapper( def wrapper(
ctx: click.Context, ctx: click.Context,
*args: tuple[Any], *args: tuple[Any],
catalog: Path, catalog: Path,
catalog_format: str,
**kwargs: dict[str, Any], **kwargs: dict[str, Any],
) -> Any: ) -> Any:
# If help is invoke somewhere, do not parse catalog # If help is invoke somewhere, do not parse catalog
if ctx.obj.get("_anta_help"): if ctx.obj.get("_anta_help"):
return f(*args, catalog=None, **kwargs) return f(*args, catalog=None, **kwargs)
try: try:
c = AntaCatalog.parse(catalog) file_format = catalog_format.lower()
c = AntaCatalog.parse(catalog, file_format=file_format) # type: ignore[arg-type]
except (TypeError, ValueError, YAMLError, OSError): except (TypeError, ValueError, YAMLError, OSError):
ctx.exit(ExitCode.USAGE_ERROR) ctx.exit(ExitCode.USAGE_ERROR)
return f(*args, catalog=c, **kwargs) return f(*args, catalog=c, **kwargs)

19
anta/constants.py Normal file
View file

@ -0,0 +1,19 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Constants used in ANTA."""
from __future__ import annotations
ACRONYM_CATEGORIES: set[str] = {"aaa", "mlag", "snmp", "bgp", "ospf", "vxlan", "stp", "igmp", "ip", "lldp", "ntp", "bfd", "ptp", "lanz", "stun", "vlan"}
"""A set of network protocol or feature acronyms that should be represented in uppercase."""
MD_REPORT_TOC = """**Table of Contents:**
- [ANTA Report](#anta-report)
- [Test Results Summary](#test-results-summary)
- [Summary Totals](#summary-totals)
- [Summary Totals Device Under Test](#summary-totals-device-under-test)
- [Summary Totals Per Category](#summary-totals-per-category)
- [Test Results](#test-results)"""
"""Table of Contents for the Markdown report."""

View file

@ -21,6 +21,8 @@ REGEXP_TYPE_EOS_INTERFACE = r"^(Dps|Ethernet|Fabric|Loopback|Management|Port-Cha
"""Match EOS interface types like Ethernet1/1, Vlan1, Loopback1, etc.""" """Match EOS interface types like Ethernet1/1, Vlan1, Loopback1, etc."""
REGEXP_TYPE_VXLAN_SRC_INTERFACE = r"^(Loopback)([0-9]|[1-9][0-9]{1,2}|[1-7][0-9]{3}|8[01][0-9]{2}|819[01])$" REGEXP_TYPE_VXLAN_SRC_INTERFACE = r"^(Loopback)([0-9]|[1-9][0-9]{1,2}|[1-7][0-9]{3}|8[01][0-9]{2}|819[01])$"
"""Match Vxlan source interface like Loopback10.""" """Match Vxlan source interface like Loopback10."""
REGEX_TYPE_PORTCHANNEL = r"^Port-Channel[0-9]{1,6}$"
"""Match Port Channel interface like Port-Channel5."""
REGEXP_TYPE_HOSTNAME = r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$" REGEXP_TYPE_HOSTNAME = r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
"""Match hostname like `my-hostname`, `my-hostname-1`, `my-hostname-1-2`.""" """Match hostname like `my-hostname`, `my-hostname-1`, `my-hostname-1-2`."""
@ -112,9 +114,6 @@ def validate_regex(value: str) -> str:
return value return value
# ANTA framework
TestStatus = Literal["unset", "success", "failure", "error", "skipped"]
# AntaTest.Input types # AntaTest.Input types
AAAAuthMethod = Annotated[str, AfterValidator(aaa_group_prefix)] AAAAuthMethod = Annotated[str, AfterValidator(aaa_group_prefix)]
Vlan = Annotated[int, Field(ge=0, le=4094)] Vlan = Annotated[int, Field(ge=0, le=4094)]
@ -138,6 +137,12 @@ VxlanSrcIntf = Annotated[
BeforeValidator(interface_autocomplete), BeforeValidator(interface_autocomplete),
BeforeValidator(interface_case_sensitivity), BeforeValidator(interface_case_sensitivity),
] ]
PortChannelInterface = Annotated[
str,
Field(pattern=REGEX_TYPE_PORTCHANNEL),
BeforeValidator(interface_autocomplete),
BeforeValidator(interface_case_sensitivity),
]
Afi = Literal["ipv4", "ipv6", "vpn-ipv4", "vpn-ipv6", "evpn", "rt-membership", "path-selection", "link-state"] Afi = Literal["ipv4", "ipv6", "vpn-ipv4", "vpn-ipv6", "evpn", "rt-membership", "path-selection", "link-state"]
Safi = Literal["unicast", "multicast", "labeled-unicast", "sr-te"] Safi = Literal["unicast", "multicast", "labeled-unicast", "sr-te"]
EncryptionAlgorithm = Literal["RSA", "ECDSA"] EncryptionAlgorithm = Literal["RSA", "ECDSA"]
@ -167,3 +172,39 @@ Revision = Annotated[int, Field(ge=1, le=99)]
Hostname = Annotated[str, Field(pattern=REGEXP_TYPE_HOSTNAME)] Hostname = Annotated[str, Field(pattern=REGEXP_TYPE_HOSTNAME)]
Port = Annotated[int, Field(ge=1, le=65535)] Port = Annotated[int, Field(ge=1, le=65535)]
RegexString = Annotated[str, AfterValidator(validate_regex)] RegexString = Annotated[str, AfterValidator(validate_regex)]
BgpDropStats = Literal[
"inDropAsloop",
"inDropClusterIdLoop",
"inDropMalformedMpbgp",
"inDropOrigId",
"inDropNhLocal",
"inDropNhAfV6",
"prefixDroppedMartianV4",
"prefixDroppedMaxRouteLimitViolatedV4",
"prefixDroppedMartianV6",
"prefixDroppedMaxRouteLimitViolatedV6",
"prefixLuDroppedV4",
"prefixLuDroppedMartianV4",
"prefixLuDroppedMaxRouteLimitViolatedV4",
"prefixLuDroppedV6",
"prefixLuDroppedMartianV6",
"prefixLuDroppedMaxRouteLimitViolatedV6",
"prefixEvpnDroppedUnsupportedRouteType",
"prefixBgpLsDroppedReceptionUnsupported",
"outDropV4LocalAddr",
"outDropV6LocalAddr",
"prefixVpnIpv4DroppedImportMatchFailure",
"prefixVpnIpv4DroppedMaxRouteLimitViolated",
"prefixVpnIpv6DroppedImportMatchFailure",
"prefixVpnIpv6DroppedMaxRouteLimitViolated",
"prefixEvpnDroppedImportMatchFailure",
"prefixEvpnDroppedMaxRouteLimitViolated",
"prefixRtMembershipDroppedLocalAsReject",
"prefixRtMembershipDroppedMaxRouteLimitViolated",
]
BgpUpdateError = Literal["inUpdErrWithdraw", "inUpdErrIgnore", "inUpdErrDisableAfiSafi", "disabledAfiSafi", "lastUpdErrTime"]
BfdProtocol = Literal["bgp", "isis", "lag", "ospf", "ospfv3", "pim", "route-input", "static-bfd", "static-route", "vrrp", "vxlan"]
SnmpPdu = Literal["inGetPdus", "inGetNextPdus", "inSetPdus", "outGetResponsePdus", "outTrapPdus"]
SnmpErrorCounter = Literal[
"inVersionErrs", "inBadCommunityNames", "inBadCommunityUses", "inParseErrs", "outTooBigErrs", "outNoSuchNameErrs", "outBadValueErrs", "outGeneralErrs"
]

View file

@ -20,26 +20,30 @@ F = TypeVar("F", bound=Callable[..., Any])
def deprecated_test(new_tests: list[str] | None = None) -> Callable[[F], F]: def deprecated_test(new_tests: list[str] | None = None) -> Callable[[F], F]:
"""Return a decorator to log a message of WARNING severity when a test is deprecated. """Return a decorator to log a message of WARNING severity when a test is deprecated.
Args: Parameters
---- ----------
new_tests: A list of new test classes that should replace the deprecated test. new_tests
A list of new test classes that should replace the deprecated test.
Returns Returns
------- -------
Callable[[F], F]: A decorator that can be used to wrap test functions. Callable[[F], F]
A decorator that can be used to wrap test functions.
""" """
def decorator(function: F) -> F: def decorator(function: F) -> F:
"""Actual decorator that logs the message. """Actual decorator that logs the message.
Args: Parameters
---- ----------
function: The test function to be decorated. function
The test function to be decorated.
Returns Returns
------- -------
F: The decorated function. F
The decorated function.
""" """
@ -64,26 +68,30 @@ def skip_on_platforms(platforms: list[str]) -> Callable[[F], F]:
This decorator factory generates a decorator that will check the hardware model of the device This decorator factory generates a decorator that will check the hardware model of the device
the test is run on. If the model is in the list of platforms specified, the test will be skipped. the test is run on. If the model is in the list of platforms specified, the test will be skipped.
Args: Parameters
---- ----------
platforms: List of hardware models on which the test should be skipped. platforms
List of hardware models on which the test should be skipped.
Returns Returns
------- -------
Callable[[F], F]: A decorator that can be used to wrap test functions. Callable[[F], F]
A decorator that can be used to wrap test functions.
""" """
def decorator(function: F) -> F: def decorator(function: F) -> F:
"""Actual decorator that either runs the test or skips it based on the device's hardware model. """Actual decorator that either runs the test or skips it based on the device's hardware model.
Args: Parameters
---- ----------
function: The test function to be decorated. function
The test function to be decorated.
Returns Returns
------- -------
F: The decorated function. F
The decorated function.
""" """

View file

@ -42,24 +42,34 @@ class AntaDevice(ABC):
Attributes Attributes
---------- ----------
name: Device name name : str
is_online: True if the device IP is reachable and a port can be open. Device name.
established: True if remote command execution succeeds. is_online : bool
hw_model: Hardware model of the device. True if the device IP is reachable and a port can be open.
tags: Tags for this device. established : bool
cache: In-memory cache from aiocache library for this device (None if cache is disabled). True if remote command execution succeeds.
cache_locks: Dictionary mapping keys to asyncio locks to guarantee exclusive access to the cache if not disabled. hw_model : str
Hardware model of the device.
tags : set[str]
Tags for this device.
cache : Cache | None
In-memory cache from aiocache library for this device (None if cache is disabled).
cache_locks : dict
Dictionary mapping keys to asyncio locks to guarantee exclusive access to the cache if not disabled.
""" """
def __init__(self, name: str, tags: set[str] | None = None, *, disable_cache: bool = False) -> None: def __init__(self, name: str, tags: set[str] | None = None, *, disable_cache: bool = False) -> None:
"""Initialize an AntaDevice. """Initialize an AntaDevice.
Args: Parameters
---- ----------
name: Device name. name
tags: Tags for this device. Device name.
disable_cache: Disable caching for all commands for this device. tags
Tags for this device.
disable_cache
Disable caching for all commands for this device.
""" """
self.name: str = name self.name: str = name
@ -96,7 +106,7 @@ class AntaDevice(ABC):
@property @property
def cache_statistics(self) -> dict[str, Any] | None: def cache_statistics(self) -> dict[str, Any] | None:
"""Returns the device cache statistics for logging purposes.""" """Return the device cache statistics for logging purposes."""
# Need to ignore pylint no-member as Cache is a proxy class and pylint is not smart enough # Need to ignore pylint no-member as Cache is a proxy class and pylint is not smart enough
# https://github.com/pylint-dev/pylint/issues/7258 # https://github.com/pylint-dev/pylint/issues/7258
if self.cache is not None: if self.cache is not None:
@ -116,6 +126,17 @@ class AntaDevice(ABC):
yield "established", self.established yield "established", self.established
yield "disable_cache", self.cache is None yield "disable_cache", self.cache is None
def __repr__(self) -> str:
"""Return a printable representation of an AntaDevice."""
return (
f"AntaDevice({self.name!r}, "
f"tags={self.tags!r}, "
f"hw_model={self.hw_model!r}, "
f"is_online={self.is_online!r}, "
f"established={self.established!r}, "
f"disable_cache={self.cache is None!r})"
)
@abstractmethod @abstractmethod
async def _collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None: async def _collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None:
"""Collect device command output. """Collect device command output.
@ -130,10 +151,12 @@ class AntaDevice(ABC):
exception and implement proper logging, the `output` attribute of the exception and implement proper logging, the `output` attribute of the
`AntaCommand` object passed as argument would be `None` in this case. `AntaCommand` object passed as argument would be `None` in this case.
Args: Parameters
---- ----------
command: The command to collect. command
collection_id: An identifier used to build the eAPI request ID. The command to collect.
collection_id
An identifier used to build the eAPI request ID.
""" """
async def collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None: async def collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None:
@ -147,10 +170,12 @@ class AntaDevice(ABC):
When caching is NOT enabled, either at the device or command level, the method directly collects the output When caching is NOT enabled, either at the device or command level, the method directly collects the output
via the private `_collect` method without interacting with the cache. via the private `_collect` method without interacting with the cache.
Args: Parameters
---- ----------
command: The command to collect. command
collection_id: An identifier used to build the eAPI request ID. The command to collect.
collection_id
An identifier used to build the eAPI request ID.
""" """
# Need to ignore pylint no-member as Cache is a proxy class and pylint is not smart enough # Need to ignore pylint no-member as Cache is a proxy class and pylint is not smart enough
# https://github.com/pylint-dev/pylint/issues/7258 # https://github.com/pylint-dev/pylint/issues/7258
@ -170,10 +195,12 @@ class AntaDevice(ABC):
async def collect_commands(self, commands: list[AntaCommand], *, collection_id: str | None = None) -> None: async def collect_commands(self, commands: list[AntaCommand], *, collection_id: str | None = None) -> None:
"""Collect multiple commands. """Collect multiple commands.
Args: Parameters
---- ----------
commands: The commands to collect. commands
collection_id: An identifier used to build the eAPI request ID. The commands to collect.
collection_id
An identifier used to build the eAPI request ID.
""" """
await asyncio.gather(*(self.collect(command=command, collection_id=collection_id) for command in commands)) await asyncio.gather(*(self.collect(command=command, collection_id=collection_id) for command in commands))
@ -182,9 +209,12 @@ class AntaDevice(ABC):
"""Update attributes of an AntaDevice instance. """Update attributes of an AntaDevice instance.
This coroutine must update the following attributes of AntaDevice: This coroutine must update the following attributes of AntaDevice:
- `is_online`: When the device IP is reachable and a port can be open
- `established`: When a command execution succeeds - `is_online`: When the device IP is reachable and a port can be open.
- `hw_model`: The hardware model of the device
- `established`: When a command execution succeeds.
- `hw_model`: The hardware model of the device.
""" """
async def copy(self, sources: list[Path], destination: Path, direction: Literal["to", "from"] = "from") -> None: async def copy(self, sources: list[Path], destination: Path, direction: Literal["to", "from"] = "from") -> None:
@ -192,11 +222,14 @@ class AntaDevice(ABC):
It is not mandatory to implement this for a valid AntaDevice subclass. It is not mandatory to implement this for a valid AntaDevice subclass.
Args: Parameters
---- ----------
sources: List of files to copy to or from the device. sources
destination: Local or remote destination when copying the files. Can be a folder. List of files to copy to or from the device.
direction: Defines if this coroutine copies files to or from the device. destination
Local or remote destination when copying the files. Can be a folder.
direction
Defines if this coroutine copies files to or from the device.
""" """
_ = (sources, destination, direction) _ = (sources, destination, direction)
@ -209,15 +242,19 @@ class AsyncEOSDevice(AntaDevice):
Attributes Attributes
---------- ----------
name: Device name name : str
is_online: True if the device IP is reachable and a port can be open Device name.
established: True if remote command execution succeeds is_online : bool
hw_model: Hardware model of the device True if the device IP is reachable and a port can be open.
tags: Tags for this device established : bool
True if remote command execution succeeds.
hw_model : str
Hardware model of the device.
tags : set[str]
Tags for this device.
""" """
# pylint: disable=R0913
def __init__( def __init__(
self, self,
host: str, host: str,
@ -237,21 +274,34 @@ class AsyncEOSDevice(AntaDevice):
) -> None: ) -> None:
"""Instantiate an AsyncEOSDevice. """Instantiate an AsyncEOSDevice.
Args: Parameters
---- ----------
host: Device FQDN or IP. host
username: Username to connect to eAPI and SSH. Device FQDN or IP.
password: Password to connect to eAPI and SSH. username
name: Device name. Username to connect to eAPI and SSH.
enable: Collect commands using privileged mode. password
enable_password: Password used to gain privileged access on EOS. Password to connect to eAPI and SSH.
port: eAPI port. Defaults to 80 is proto is 'http' or 443 if proto is 'https'. name
ssh_port: SSH port. Device name.
tags: Tags for this device. enable
timeout: Timeout value in seconds for outgoing API calls. Collect commands using privileged mode.
insecure: Disable SSH Host Key validation. enable_password
proto: eAPI protocol. Value can be 'http' or 'https'. Password used to gain privileged access on EOS.
disable_cache: Disable caching for all commands for this device. port
eAPI port. Defaults to 80 is proto is 'http' or 443 if proto is 'https'.
ssh_port
SSH port.
tags
Tags for this device.
timeout
Timeout value in seconds for outgoing API calls.
insecure
Disable SSH Host Key validation.
proto
eAPI protocol. Value can be 'http' or 'https'.
disable_cache
Disable caching for all commands for this device.
""" """
if host is None: if host is None:
@ -298,6 +348,22 @@ class AsyncEOSDevice(AntaDevice):
yield ("_session", vars(self._session)) yield ("_session", vars(self._session))
yield ("_ssh_opts", _ssh_opts) yield ("_ssh_opts", _ssh_opts)
def __repr__(self) -> str:
"""Return a printable representation of an AsyncEOSDevice."""
return (
f"AsyncEOSDevice({self.name!r}, "
f"tags={self.tags!r}, "
f"hw_model={self.hw_model!r}, "
f"is_online={self.is_online!r}, "
f"established={self.established!r}, "
f"disable_cache={self.cache is None!r}, "
f"host={self._session.host!r}, "
f"eapi_port={self._session.port!r}, "
f"username={self._ssh_opts.username!r}, "
f"enable={self.enable!r}, "
f"insecure={self._ssh_opts.known_hosts is None!r})"
)
@property @property
def _keys(self) -> tuple[Any, ...]: def _keys(self) -> tuple[Any, ...]:
"""Two AsyncEOSDevice objects are equal if the hostname and the port are the same. """Two AsyncEOSDevice objects are equal if the hostname and the port are the same.
@ -306,17 +372,19 @@ class AsyncEOSDevice(AntaDevice):
""" """
return (self._session.host, self._session.port) return (self._session.host, self._session.port)
async def _collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None: # noqa: C901 function is too complex - because of many required except blocks #pylint: disable=line-too-long async def _collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None: # noqa: C901 function is too complex - because of many required except blocks
"""Collect device command output from EOS using aio-eapi. """Collect device command output from EOS using aio-eapi.
Supports outformat `json` and `text` as output structure. Supports outformat `json` and `text` as output structure.
Gain privileged access using the `enable_password` attribute Gain privileged access using the `enable_password` attribute
of the `AntaDevice` instance if populated. of the `AntaDevice` instance if populated.
Args: Parameters
---- ----------
command: The command to collect. command
collection_id: An identifier used to build the eAPI request ID. The command to collect.
collection_id
An identifier used to build the eAPI request ID.
""" """
commands: list[dict[str, str | int]] = [] commands: list[dict[str, str | int]] = []
if self.enable and self._enable_password is not None: if self.enable and self._enable_password is not None:
@ -397,6 +465,10 @@ class AsyncEOSDevice(AntaDevice):
self.hw_model = show_version.json_output.get("modelName", None) self.hw_model = show_version.json_output.get("modelName", None)
if self.hw_model is None: if self.hw_model is None:
logger.critical("Cannot parse 'show version' returned by device %s", self.name) logger.critical("Cannot parse 'show version' returned by device %s", self.name)
# in some cases it is possible that 'modelName' comes back empty
# and it is nice to get a meaninfule error message
elif self.hw_model == "":
logger.critical("Got an empty 'modelName' in the 'show version' returned by device %s", self.name)
else: else:
logger.warning("Could not connect to device %s: cannot open eAPI port", self.name) logger.warning("Could not connect to device %s: cannot open eAPI port", self.name)
@ -405,11 +477,14 @@ class AsyncEOSDevice(AntaDevice):
async def copy(self, sources: list[Path], destination: Path, direction: Literal["to", "from"] = "from") -> None: async def copy(self, sources: list[Path], destination: Path, direction: Literal["to", "from"] = "from") -> None:
"""Copy files to and from the device using asyncssh.scp(). """Copy files to and from the device using asyncssh.scp().
Args: Parameters
---- ----------
sources: List of files to copy to or from the device. sources
destination: Local or remote destination when copying the files. Can be a folder. List of files to copy to or from the device.
direction: Defines if this coroutine copies files to or from the device. destination
Local or remote destination when copying the files. Can be a folder.
direction
Defines if this coroutine copies files to or from the device.
""" """
async with asyncssh.connect( async with asyncssh.connect(

View file

@ -44,10 +44,12 @@ class AntaInventory(dict[str, AntaDevice]):
def _update_disable_cache(kwargs: dict[str, Any], *, inventory_disable_cache: bool) -> dict[str, Any]: def _update_disable_cache(kwargs: dict[str, Any], *, inventory_disable_cache: bool) -> dict[str, Any]:
"""Return new dictionary, replacing kwargs with added disable_cache value from inventory_value if disable_cache has not been set by CLI. """Return new dictionary, replacing kwargs with added disable_cache value from inventory_value if disable_cache has not been set by CLI.
Args: Parameters
---- ----------
inventory_disable_cache: The value of disable_cache in the inventory inventory_disable_cache
kwargs: The kwargs to instantiate the device The value of disable_cache in the inventory.
kwargs
The kwargs to instantiate the device.
""" """
updated_kwargs = kwargs.copy() updated_kwargs = kwargs.copy()
@ -62,11 +64,14 @@ class AntaInventory(dict[str, AntaDevice]):
) -> None: ) -> None:
"""Parse the host section of an AntaInventoryInput and add the devices to the inventory. """Parse the host section of an AntaInventoryInput and add the devices to the inventory.
Args: Parameters
---- ----------
inventory_input: AntaInventoryInput used to parse the devices inventory_input
inventory: AntaInventory to add the parsed devices to AntaInventoryInput used to parse the devices.
**kwargs: Additional keyword arguments to pass to the device constructor inventory
AntaInventory to add the parsed devices to.
**kwargs
Additional keyword arguments to pass to the device constructor.
""" """
if inventory_input.hosts is None: if inventory_input.hosts is None:
@ -91,15 +96,19 @@ class AntaInventory(dict[str, AntaDevice]):
) -> None: ) -> None:
"""Parse the network section of an AntaInventoryInput and add the devices to the inventory. """Parse the network section of an AntaInventoryInput and add the devices to the inventory.
Args: Parameters
---- ----------
inventory_input: AntaInventoryInput used to parse the devices inventory_input
inventory: AntaInventory to add the parsed devices to AntaInventoryInput used to parse the devices.
**kwargs: Additional keyword arguments to pass to the device constructor inventory
AntaInventory to add the parsed devices to.
**kwargs
Additional keyword arguments to pass to the device constructor.
Raises Raises
------ ------
InventoryIncorrectSchemaError: Inventory file is not following AntaInventory Schema. InventoryIncorrectSchemaError
Inventory file is not following AntaInventory Schema.
""" """
if inventory_input.networks is None: if inventory_input.networks is None:
@ -124,15 +133,19 @@ class AntaInventory(dict[str, AntaDevice]):
) -> None: ) -> None:
"""Parse the range section of an AntaInventoryInput and add the devices to the inventory. """Parse the range section of an AntaInventoryInput and add the devices to the inventory.
Args: Parameters
---- ----------
inventory_input: AntaInventoryInput used to parse the devices inventory_input
inventory: AntaInventory to add the parsed devices to AntaInventoryInput used to parse the devices.
**kwargs: Additional keyword arguments to pass to the device constructor inventory
AntaInventory to add the parsed devices to.
**kwargs
Additional keyword arguments to pass to the device constructor.
Raises Raises
------ ------
InventoryIncorrectSchemaError: Inventory file is not following AntaInventory Schema. InventoryIncorrectSchemaError
Inventory file is not following AntaInventory Schema.
""" """
if inventory_input.ranges is None: if inventory_input.ranges is None:
@ -158,7 +171,6 @@ class AntaInventory(dict[str, AntaDevice]):
anta_log_exception(e, message, logger) anta_log_exception(e, message, logger)
raise InventoryIncorrectSchemaError(message) from e raise InventoryIncorrectSchemaError(message) from e
# pylint: disable=too-many-arguments
@staticmethod @staticmethod
def parse( def parse(
filename: str | Path, filename: str | Path,
@ -175,21 +187,31 @@ class AntaInventory(dict[str, AntaDevice]):
The inventory devices are AsyncEOSDevice instances. The inventory devices are AsyncEOSDevice instances.
Args: Parameters
---- ----------
filename: Path to device inventory YAML file. filename
username: Username to use to connect to devices. Path to device inventory YAML file.
password: Password to use to connect to devices. username
enable_password: Enable password to use if required. Username to use to connect to devices.
timeout: Timeout value in seconds for outgoing API calls. password
enable: Whether or not the commands need to be run in enable mode towards the devices. Password to use to connect to devices.
insecure: Disable SSH Host Key validation. enable_password
disable_cache: Disable cache globally. Enable password to use if required.
timeout
Timeout value in seconds for outgoing API calls.
enable
Whether or not the commands need to be run in enable mode towards the devices.
insecure
Disable SSH Host Key validation.
disable_cache
Disable cache globally.
Raises Raises
------ ------
InventoryRootKeyError: Root key of inventory is missing. InventoryRootKeyError
InventoryIncorrectSchemaError: Inventory file is not following AntaInventory Schema. Root key of inventory is missing.
InventoryIncorrectSchemaError
Inventory file is not following AntaInventory Schema.
""" """
inventory = AntaInventory() inventory = AntaInventory()
@ -254,14 +276,18 @@ class AntaInventory(dict[str, AntaDevice]):
def get_inventory(self, *, established_only: bool = False, tags: set[str] | None = None, devices: set[str] | None = None) -> AntaInventory: def get_inventory(self, *, established_only: bool = False, tags: set[str] | None = None, devices: set[str] | None = None) -> AntaInventory:
"""Return a filtered inventory. """Return a filtered inventory.
Args: Parameters
---- ----------
established_only: Whether or not to include only established devices. established_only
tags: Tags to filter devices. Whether or not to include only established devices.
devices: Names to filter devices. tags
Tags to filter devices.
devices
Names to filter devices.
Returns Returns
------- -------
AntaInventory
An inventory with filtered AntaDevice objects. An inventory with filtered AntaDevice objects.
""" """
@ -293,9 +319,10 @@ class AntaInventory(dict[str, AntaDevice]):
def add_device(self, device: AntaDevice) -> None: def add_device(self, device: AntaDevice) -> None:
"""Add a device to final inventory. """Add a device to final inventory.
Args: Parameters
---- ----------
device: Device object to be added device
Device object to be added.
""" """
self[device.name] = device self[device.name] = device

View file

@ -21,11 +21,16 @@ class AntaInventoryHost(BaseModel):
Attributes Attributes
---------- ----------
host: IP Address or FQDN of the device. host : Hostname | IPvAnyAddress
port: Custom eAPI port to use. IP Address or FQDN of the device.
name: Custom name of the device. port : Port | None
tags: Tags of the device. Custom eAPI port to use.
disable_cache: Disable cache for this device. name : str | None
Custom name of the device.
tags : set[str]
Tags of the device.
disable_cache : bool
Disable cache for this device.
""" """
@ -43,9 +48,12 @@ class AntaInventoryNetwork(BaseModel):
Attributes Attributes
---------- ----------
network: Subnet to use for scanning. network : IPvAnyNetwork
tags: Tags of the devices in this network. Subnet to use for scanning.
disable_cache: Disable cache for all devices in this network. tags : set[str]
Tags of the devices in this network.
disable_cache : bool
Disable cache for all devices in this network.
""" """
@ -61,10 +69,14 @@ class AntaInventoryRange(BaseModel):
Attributes Attributes
---------- ----------
start: IPv4 or IPv6 address for the beginning of the range. start : IPvAnyAddress
stop: IPv4 or IPv6 address for the end of the range. IPv4 or IPv6 address for the beginning of the range.
tags: Tags of the devices in this IP range. stop : IPvAnyAddress
disable_cache: Disable cache for all devices in this IP range. IPv4 or IPv6 address for the end of the range.
tags : set[str]
Tags of the devices in this IP range.
disable_cache : bool
Disable cache for all devices in this IP range.
""" """
@ -90,6 +102,7 @@ class AntaInventoryInput(BaseModel):
Returns Returns
------- -------
str
The YAML representation string of this model. The YAML representation string of this model.
""" """
# TODO: Pydantic and YAML serialization/deserialization is not supported natively. # TODO: Pydantic and YAML serialization/deserialization is not supported natively.

View file

@ -49,10 +49,12 @@ def setup_logging(level: LogLevel = Log.INFO, file: Path | None = None) -> None:
If a file is provided and logging level is DEBUG, only the logging level INFO and higher will If a file is provided and logging level is DEBUG, only the logging level INFO and higher will
be logged to stdout while all levels will be logged in the file. be logged to stdout while all levels will be logged in the file.
Args: Parameters
---- ----------
level: ANTA logging level level
file: Send logs to a file ANTA logging level
file
Send logs to a file
""" """
# Init root logger # Init root logger
@ -104,11 +106,14 @@ def anta_log_exception(exception: BaseException, message: str | None = None, cal
If `anta.__DEBUG__` is True then the `logger.exception` method is called to get the traceback, otherwise `logger.error` is called. If `anta.__DEBUG__` is True then the `logger.exception` method is called to get the traceback, otherwise `logger.error` is called.
Args: Parameters
---- ----------
exception: The Exception being logged. exception
message: An optional message. The Exception being logged.
calling_logger: A logger to which the exception should be logged. If not present, the logger in this file is used. message
An optional message.
calling_logger
A logger to which the exception should be logged. If not present, the logger in this file is used.
""" """
if calling_logger is None: if calling_logger is None:

View file

@ -18,7 +18,7 @@ from pydantic import BaseModel, ConfigDict, ValidationError, create_model
from anta import GITHUB_SUGGESTION from anta import GITHUB_SUGGESTION
from anta.custom_types import REGEXP_EOS_BLACKLIST_CMDS, Revision from anta.custom_types import REGEXP_EOS_BLACKLIST_CMDS, Revision
from anta.logger import anta_log_exception, exc_to_str from anta.logger import anta_log_exception, exc_to_str
from anta.result_manager.models import TestResult from anta.result_manager.models import AntaTestStatus, TestResult
if TYPE_CHECKING: if TYPE_CHECKING:
from collections.abc import Coroutine from collections.abc import Coroutine
@ -48,16 +48,21 @@ class AntaTemplate:
Attributes Attributes
---------- ----------
template: Python f-string. Example: 'show vlan {vlan_id}' template
version: eAPI version - valid values are 1 or "latest". Python f-string. Example: 'show vlan {vlan_id}'.
revision: Revision of the command. Valid values are 1 to 99. Revision has precedence over version. version
ofmt: eAPI output - json or text. eAPI version - valid values are 1 or "latest".
use_cache: Enable or disable caching for this AntaTemplate if the AntaDevice supports it. revision
Revision of the command. Valid values are 1 to 99. Revision has precedence over version.
ofmt
eAPI output - json or text.
use_cache
Enable or disable caching for this AntaTemplate if the AntaDevice supports it.
""" """
# pylint: disable=too-few-public-methods # pylint: disable=too-few-public-methods
def __init__( # noqa: PLR0913 def __init__(
self, self,
template: str, template: str,
version: Literal[1, "latest"] = "latest", version: Literal[1, "latest"] = "latest",
@ -66,7 +71,6 @@ class AntaTemplate:
*, *,
use_cache: bool = True, use_cache: bool = True,
) -> None: ) -> None:
# pylint: disable=too-many-arguments
self.template = template self.template = template
self.version = version self.version = version
self.revision = revision self.revision = revision
@ -95,12 +99,14 @@ class AntaTemplate:
Keep the parameters used in the AntaTemplate instance. Keep the parameters used in the AntaTemplate instance.
Args: Parameters
---- ----------
params: dictionary of variables with string values to render the Python f-string params
Dictionary of variables with string values to render the Python f-string.
Returns Returns
------- -------
AntaCommand
The rendered AntaCommand. The rendered AntaCommand.
This AntaCommand instance have a template attribute that references this This AntaCommand instance have a template attribute that references this
AntaTemplate instance. AntaTemplate instance.
@ -141,15 +147,24 @@ class AntaCommand(BaseModel):
Attributes Attributes
---------- ----------
command: Device command command
version: eAPI version - valid values are 1 or "latest". Device command.
revision: eAPI revision of the command. Valid values are 1 to 99. Revision has precedence over version. version
ofmt: eAPI output - json or text. eAPI version - valid values are 1 or "latest".
output: Output of the command. Only defined if there was no errors. revision
template: AntaTemplate object used to render this command. eAPI revision of the command. Valid values are 1 to 99. Revision has precedence over version.
errors: If the command execution fails, eAPI returns a list of strings detailing the error(s). ofmt
params: Pydantic Model containing the variables values used to render the template. eAPI output - json or text.
use_cache: Enable or disable caching for this AntaCommand if the AntaDevice supports it. output
Output of the command. Only defined if there was no errors.
template
AntaTemplate object used to render this command.
errors
If the command execution fails, eAPI returns a list of strings detailing the error(s).
params
Pydantic Model containing the variables values used to render the template.
use_cache
Enable or disable caching for this AntaCommand if the AntaDevice supports it.
""" """
@ -245,10 +260,12 @@ class AntaTemplateRenderError(RuntimeError):
def __init__(self, template: AntaTemplate, key: str) -> None: def __init__(self, template: AntaTemplate, key: str) -> None:
"""Initialize an AntaTemplateRenderError. """Initialize an AntaTemplateRenderError.
Args: Parameters
---- ----------
template: The AntaTemplate instance that failed to render template
key: Key that has not been provided to render the template The AntaTemplate instance that failed to render.
key
Key that has not been provided to render the template.
""" """
self.template = template self.template = template
@ -297,11 +314,16 @@ class AntaTest(ABC):
Attributes Attributes
---------- ----------
device: AntaDevice instance on which this test is run device
inputs: AntaTest.Input instance carrying the test inputs AntaDevice instance on which this test is run.
instance_commands: List of AntaCommand instances of this test inputs
result: TestResult instance representing the result of this test AntaTest.Input instance carrying the test inputs.
logger: Python logger for this test instance instance_commands
List of AntaCommand instances of this test.
result
TestResult instance representing the result of this test.
logger
Python logger for this test instance.
""" """
# Mandatory class attributes # Mandatory class attributes
@ -332,7 +354,8 @@ class AntaTest(ABC):
Attributes Attributes
---------- ----------
result_overwrite: Define fields to overwrite in the TestResult object result_overwrite
Define fields to overwrite in the TestResult object.
""" """
model_config = ConfigDict(extra="forbid") model_config = ConfigDict(extra="forbid")
@ -351,9 +374,12 @@ class AntaTest(ABC):
Attributes Attributes
---------- ----------
description: overwrite TestResult.description description
categories: overwrite TestResult.categories Overwrite `TestResult.description`.
custom_field: a free string that will be included in the TestResult object categories
Overwrite `TestResult.categories`.
custom_field
A free string that will be included in the TestResult object.
""" """
@ -367,7 +393,8 @@ class AntaTest(ABC):
Attributes Attributes
---------- ----------
tags: Tag of devices on which to run the test. tags
Tag of devices on which to run the test.
""" """
model_config = ConfigDict(extra="forbid") model_config = ConfigDict(extra="forbid")
@ -381,11 +408,14 @@ class AntaTest(ABC):
) -> None: ) -> None:
"""AntaTest Constructor. """AntaTest Constructor.
Args: Parameters
---- ----------
device: AntaDevice instance on which the test will be run device
inputs: dictionary of attributes used to instantiate the AntaTest.Input instance AntaDevice instance on which the test will be run.
eos_data: Populate outputs of the test commands instead of collecting from devices. inputs
Dictionary of attributes used to instantiate the AntaTest.Input instance.
eos_data
Populate outputs of the test commands instead of collecting from devices.
This list must have the same length and order than the `instance_commands` instance attribute. This list must have the same length and order than the `instance_commands` instance attribute.
""" """
self.logger: logging.Logger = logging.getLogger(f"{self.module}.{self.__class__.__name__}") self.logger: logging.Logger = logging.getLogger(f"{self.module}.{self.__class__.__name__}")
@ -399,7 +429,7 @@ class AntaTest(ABC):
description=self.description, description=self.description,
) )
self._init_inputs(inputs) self._init_inputs(inputs)
if self.result.result == "unset": if self.result.result == AntaTestStatus.UNSET:
self._init_commands(eos_data) self._init_commands(eos_data)
def _init_inputs(self, inputs: dict[str, Any] | AntaTest.Input | None) -> None: def _init_inputs(self, inputs: dict[str, Any] | AntaTest.Input | None) -> None:
@ -450,7 +480,7 @@ class AntaTest(ABC):
except NotImplementedError as e: except NotImplementedError as e:
self.result.is_error(message=e.args[0]) self.result.is_error(message=e.args[0])
return return
except Exception as e: # pylint: disable=broad-exception-caught except Exception as e: # noqa: BLE001
# render() is user-defined code. # render() is user-defined code.
# We need to catch everything if we want the AntaTest object # We need to catch everything if we want the AntaTest object
# to live until the reporting # to live until the reporting
@ -528,7 +558,7 @@ class AntaTest(ABC):
try: try:
if self.blocked is False: if self.blocked is False:
await self.device.collect_commands(self.instance_commands, collection_id=self.name) await self.device.collect_commands(self.instance_commands, collection_id=self.name)
except Exception as e: # pylint: disable=broad-exception-caught except Exception as e: # noqa: BLE001
# device._collect() is user-defined code. # device._collect() is user-defined code.
# We need to catch everything if we want the AntaTest object # We need to catch everything if we want the AntaTest object
# to live until the reporting # to live until the reporting
@ -556,16 +586,20 @@ class AntaTest(ABC):
) -> TestResult: ) -> TestResult:
"""Inner function for the anta_test decorator. """Inner function for the anta_test decorator.
Args: Parameters
---- ----------
self: The test instance. self
eos_data: Populate outputs of the test commands instead of collecting from devices. The test instance.
eos_data
Populate outputs of the test commands instead of collecting from devices.
This list must have the same length and order than the `instance_commands` instance attribute. This list must have the same length and order than the `instance_commands` instance attribute.
kwargs: Any keyword argument to pass to the test. kwargs
Any keyword argument to pass to the test.
Returns Returns
------- -------
result: TestResult instance attribute populated with error status if any TestResult
The TestResult instance attribute populated with error status if any.
""" """
if self.result.result != "unset": if self.result.result != "unset":
@ -596,7 +630,7 @@ class AntaTest(ABC):
try: try:
function(self, **kwargs) function(self, **kwargs)
except Exception as e: # pylint: disable=broad-exception-caught except Exception as e: # noqa: BLE001
# test() is user-defined code. # test() is user-defined code.
# We need to catch everything if we want the AntaTest object # We need to catch everything if we want the AntaTest object
# to live until the reporting # to live until the reporting

View file

@ -7,19 +7,20 @@
from __future__ import annotations from __future__ import annotations
import logging import logging
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any from typing import TYPE_CHECKING, Any
from jinja2 import Template from jinja2 import Template
from rich.table import Table from rich.table import Table
from anta import RICH_COLOR_PALETTE, RICH_COLOR_THEME from anta import RICH_COLOR_PALETTE, RICH_COLOR_THEME
from anta.tools import convert_categories
if TYPE_CHECKING: if TYPE_CHECKING:
import pathlib import pathlib
from anta.custom_types import TestStatus
from anta.result_manager import ResultManager from anta.result_manager import ResultManager
from anta.result_manager.models import TestResult from anta.result_manager.models import AntaTestStatus, TestResult
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -27,17 +28,33 @@ logger = logging.getLogger(__name__)
class ReportTable: class ReportTable:
"""TableReport Generate a Table based on TestResult.""" """TableReport Generate a Table based on TestResult."""
@dataclass()
class Headers: # pylint: disable=too-many-instance-attributes
"""Headers for the table report."""
device: str = "Device"
test_case: str = "Test Name"
number_of_success: str = "# of success"
number_of_failure: str = "# of failure"
number_of_skipped: str = "# of skipped"
number_of_errors: str = "# of errors"
list_of_error_nodes: str = "List of failed or error nodes"
list_of_error_tests: str = "List of failed or error test cases"
def _split_list_to_txt_list(self, usr_list: list[str], delimiter: str | None = None) -> str: def _split_list_to_txt_list(self, usr_list: list[str], delimiter: str | None = None) -> str:
"""Split list to multi-lines string. """Split list to multi-lines string.
Args: Parameters
---- ----------
usr_list (list[str]): List of string to concatenate usr_list : list[str]
delimiter (str, optional): A delimiter to use to start string. Defaults to None. List of string to concatenate.
delimiter : str, optional
A delimiter to use to start string. Defaults to None.
Returns Returns
------- -------
str: Multi-lines string str
Multi-lines string.
""" """
if delimiter is not None: if delimiter is not None:
@ -49,55 +66,58 @@ class ReportTable:
First key is considered as header and is colored using RICH_COLOR_PALETTE.HEADER First key is considered as header and is colored using RICH_COLOR_PALETTE.HEADER
Args: Parameters
---- ----------
headers: List of headers. headers
table: A rich Table instance. List of headers.
table
A rich Table instance.
Returns Returns
------- -------
Table
A rich `Table` instance with headers. A rich `Table` instance with headers.
""" """
for idx, header in enumerate(headers): for idx, header in enumerate(headers):
if idx == 0: if idx == 0:
table.add_column(header, justify="left", style=RICH_COLOR_PALETTE.HEADER, no_wrap=True) table.add_column(header, justify="left", style=RICH_COLOR_PALETTE.HEADER, no_wrap=True)
elif header == "Test Name":
# We always want the full test name
table.add_column(header, justify="left", no_wrap=True)
else: else:
table.add_column(header, justify="left") table.add_column(header, justify="left")
return table return table
def _color_result(self, status: TestStatus) -> str: def _color_result(self, status: AntaTestStatus) -> str:
"""Return a colored string based on the status value. """Return a colored string based on an AntaTestStatus.
Args: Parameters
---- ----------
status (TestStatus): status value to color. status
AntaTestStatus enum to color.
Returns Returns
------- -------
str: the colored string str
The colored string.
""" """
color = RICH_COLOR_THEME.get(status, "") color = RICH_COLOR_THEME.get(str(status), "")
return f"[{color}]{status}" if color != "" else str(status) return f"[{color}]{status}" if color != "" else str(status)
def report_all(self, manager: ResultManager, title: str = "All tests results") -> Table: def report_all(self, manager: ResultManager, title: str = "All tests results") -> Table:
"""Create a table report with all tests for one or all devices. """Create a table report with all tests for one or all devices.
Create table with full output: Host / Test / Status / Message Create table with full output: Device | Test Name | Test Status | Message(s) | Test description | Test category
Args: Parameters
---- ----------
manager: A ResultManager instance. manager
title: Title for the report. Defaults to 'All tests results'. A ResultManager instance.
title
Title for the report. Defaults to 'All tests results'.
Returns Returns
------- -------
A fully populated rich `Table` Table
A fully populated rich `Table`.
""" """
table = Table(title=title, show_lines=True) table = Table(title=title, show_lines=True)
headers = ["Device", "Test Name", "Test Status", "Message(s)", "Test description", "Test category"] headers = ["Device", "Test Name", "Test Status", "Message(s)", "Test description", "Test category"]
@ -106,7 +126,7 @@ class ReportTable:
def add_line(result: TestResult) -> None: def add_line(result: TestResult) -> None:
state = self._color_result(result.result) state = self._color_result(result.result)
message = self._split_list_to_txt_list(result.messages) if len(result.messages) > 0 else "" message = self._split_list_to_txt_list(result.messages) if len(result.messages) > 0 else ""
categories = ", ".join(result.categories) categories = ", ".join(convert_categories(result.categories))
table.add_row(str(result.name), result.test, state, message, result.description, categories) table.add_row(str(result.name), result.test, state, message, result.description, categories)
for result in manager.results: for result in manager.results:
@ -121,43 +141,42 @@ class ReportTable:
) -> Table: ) -> Table:
"""Create a table report with result aggregated per test. """Create a table report with result aggregated per test.
Create table with full output: Test | Number of success | Number of failure | Number of error | List of nodes in error or failure Create table with full output:
Test Name | # of success | # of skipped | # of failure | # of errors | List of failed or error nodes
Args: Parameters
---- ----------
manager: A ResultManager instance. manager
tests: List of test names to include. None to select all tests. A ResultManager instance.
title: Title of the report. tests
List of test names to include. None to select all tests.
title
Title of the report.
Returns Returns
------- -------
Table
A fully populated rich `Table`. A fully populated rich `Table`.
""" """
table = Table(title=title, show_lines=True) table = Table(title=title, show_lines=True)
headers = [ headers = [
"Test Case", self.Headers.test_case,
"# of success", self.Headers.number_of_success,
"# of skipped", self.Headers.number_of_skipped,
"# of failure", self.Headers.number_of_failure,
"# of errors", self.Headers.number_of_errors,
"List of failed or error nodes", self.Headers.list_of_error_nodes,
] ]
table = self._build_headers(headers=headers, table=table) table = self._build_headers(headers=headers, table=table)
for test in manager.get_tests(): for test, stats in sorted(manager.test_stats.items()):
if tests is None or test in tests: if tests is None or test in tests:
results = manager.filter_by_tests({test}).results
nb_failure = len([result for result in results if result.result == "failure"])
nb_error = len([result for result in results if result.result == "error"])
list_failure = [result.name for result in results if result.result in ["failure", "error"]]
nb_success = len([result for result in results if result.result == "success"])
nb_skipped = len([result for result in results if result.result == "skipped"])
table.add_row( table.add_row(
test, test,
str(nb_success), str(stats.devices_success_count),
str(nb_skipped), str(stats.devices_skipped_count),
str(nb_failure), str(stats.devices_failure_count),
str(nb_error), str(stats.devices_error_count),
str(list_failure), ", ".join(stats.devices_failure),
) )
return table return table
@ -169,43 +188,41 @@ class ReportTable:
) -> Table: ) -> Table:
"""Create a table report with result aggregated per device. """Create a table report with result aggregated per device.
Create table with full output: Host | Number of success | Number of failure | Number of error | List of nodes in error or failure Create table with full output: Device | # of success | # of skipped | # of failure | # of errors | List of failed or error test cases
Args: Parameters
---- ----------
manager: A ResultManager instance. manager
devices: List of device names to include. None to select all devices. A ResultManager instance.
title: Title of the report. devices
List of device names to include. None to select all devices.
title
Title of the report.
Returns Returns
------- -------
Table
A fully populated rich `Table`. A fully populated rich `Table`.
""" """
table = Table(title=title, show_lines=True) table = Table(title=title, show_lines=True)
headers = [ headers = [
"Device", self.Headers.device,
"# of success", self.Headers.number_of_success,
"# of skipped", self.Headers.number_of_skipped,
"# of failure", self.Headers.number_of_failure,
"# of errors", self.Headers.number_of_errors,
"List of failed or error test cases", self.Headers.list_of_error_tests,
] ]
table = self._build_headers(headers=headers, table=table) table = self._build_headers(headers=headers, table=table)
for device in manager.get_devices(): for device, stats in sorted(manager.device_stats.items()):
if devices is None or device in devices: if devices is None or device in devices:
results = manager.filter_by_devices({device}).results
nb_failure = len([result for result in results if result.result == "failure"])
nb_error = len([result for result in results if result.result == "error"])
list_failure = [result.test for result in results if result.result in ["failure", "error"]]
nb_success = len([result for result in results if result.result == "success"])
nb_skipped = len([result for result in results if result.result == "skipped"])
table.add_row( table.add_row(
device, device,
str(nb_success), str(stats.tests_success_count),
str(nb_skipped), str(stats.tests_skipped_count),
str(nb_failure), str(stats.tests_failure_count),
str(nb_error), str(stats.tests_error_count),
str(list_failure), ", ".join(stats.tests_failure),
) )
return table return table
@ -227,6 +244,9 @@ class ReportJinja:
Report is built based on a J2 template provided by user. Report is built based on a J2 template provided by user.
Data structure sent to template is: Data structure sent to template is:
Example
-------
```
>>> print(ResultManager.json) >>> print(ResultManager.json)
[ [
{ {
@ -238,15 +258,20 @@ class ReportJinja:
description: ..., description: ...,
} }
] ]
```
Args: Parameters
---- ----------
data: List of results from ResultManager.results data
trim_blocks: enable trim_blocks for J2 rendering. List of results from `ResultManager.results`.
lstrip_blocks: enable lstrip_blocks for J2 rendering. trim_blocks
enable trim_blocks for J2 rendering.
lstrip_blocks
enable lstrip_blocks for J2 rendering.
Returns Returns
------- -------
str
Rendered template Rendered template
""" """

View file

@ -0,0 +1,122 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""CSV Report management for ANTA."""
# pylint: disable = too-few-public-methods
from __future__ import annotations
import csv
import logging
from dataclasses import dataclass
from typing import TYPE_CHECKING
from anta.logger import anta_log_exception
from anta.tools import convert_categories
if TYPE_CHECKING:
import pathlib
from anta.result_manager import ResultManager
from anta.result_manager.models import TestResult
logger = logging.getLogger(__name__)
class ReportCsv:
"""Build a CSV report."""
@dataclass()
class Headers:
"""Headers for the CSV report."""
device: str = "Device"
test_name: str = "Test Name"
test_status: str = "Test Status"
messages: str = "Message(s)"
description: str = "Test description"
categories: str = "Test category"
@classmethod
def split_list_to_txt_list(cls, usr_list: list[str], delimiter: str = " - ") -> str:
"""Split list to multi-lines string.
Parameters
----------
usr_list
List of string to concatenate.
delimiter
A delimiter to use to start string. Defaults to None.
Returns
-------
str
Multi-lines string.
"""
return f"{delimiter}".join(f"{line}" for line in usr_list)
@classmethod
def convert_to_list(cls, result: TestResult) -> list[str]:
"""
Convert a TestResult into a list of string for creating file content.
Parameters
----------
result
A TestResult to convert into list.
Returns
-------
list[str]
TestResult converted into a list.
"""
message = cls.split_list_to_txt_list(result.messages) if len(result.messages) > 0 else ""
categories = cls.split_list_to_txt_list(convert_categories(result.categories)) if len(result.categories) > 0 else "None"
return [
str(result.name),
result.test,
result.result,
message,
result.description,
categories,
]
@classmethod
def generate(cls, results: ResultManager, csv_filename: pathlib.Path) -> None:
"""Build CSV flle with tests results.
Parameters
----------
results
A ResultManager instance.
csv_filename
File path where to save CSV data.
Raises
------
OSError
if any is raised while writing the CSV file.
"""
headers = [
cls.Headers.device,
cls.Headers.test_name,
cls.Headers.test_status,
cls.Headers.messages,
cls.Headers.description,
cls.Headers.categories,
]
try:
with csv_filename.open(mode="w", encoding="utf-8") as csvfile:
csvwriter = csv.writer(
csvfile,
delimiter=",",
)
csvwriter.writerow(headers)
for entry in results.results:
csvwriter.writerow(cls.convert_to_list(entry))
except OSError as exc:
message = f"OSError caught while writing the CSV file '{csv_filename.resolve()}'."
anta_log_exception(exc, message, logger)
raise

View file

@ -0,0 +1,299 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Markdown report generator for ANTA test results."""
from __future__ import annotations
import logging
import re
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, ClassVar
from anta.constants import MD_REPORT_TOC
from anta.logger import anta_log_exception
from anta.result_manager.models import AntaTestStatus
from anta.tools import convert_categories
if TYPE_CHECKING:
from collections.abc import Generator
from io import TextIOWrapper
from pathlib import Path
from anta.result_manager import ResultManager
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class MDReportGenerator:
"""Class responsible for generating a Markdown report based on the provided `ResultManager` object.
It aggregates different report sections, each represented by a subclass of `MDReportBase`,
and sequentially generates their content into a markdown file.
The `generate` class method will loop over all the section subclasses and call their `generate_section` method.
The final report will be generated in the same order as the `sections` list of the method.
"""
@classmethod
def generate(cls, results: ResultManager, md_filename: Path) -> None:
"""Generate and write the various sections of the markdown report.
Parameters
----------
results
The ResultsManager instance containing all test results.
md_filename
The path to the markdown file to write the report into.
"""
try:
with md_filename.open("w", encoding="utf-8") as mdfile:
sections: list[MDReportBase] = [
ANTAReport(mdfile, results),
TestResultsSummary(mdfile, results),
SummaryTotals(mdfile, results),
SummaryTotalsDeviceUnderTest(mdfile, results),
SummaryTotalsPerCategory(mdfile, results),
TestResults(mdfile, results),
]
for section in sections:
section.generate_section()
except OSError as exc:
message = f"OSError caught while writing the Markdown file '{md_filename.resolve()}'."
anta_log_exception(exc, message, logger)
raise
class MDReportBase(ABC):
"""Base class for all sections subclasses.
Every subclasses must implement the `generate_section` method that uses the `ResultManager` object
to generate and write content to the provided markdown file.
"""
def __init__(self, mdfile: TextIOWrapper, results: ResultManager) -> None:
"""Initialize the MDReportBase with an open markdown file object to write to and a ResultManager instance.
Parameters
----------
mdfile
An open file object to write the markdown data into.
results
The ResultsManager instance containing all test results.
"""
self.mdfile = mdfile
self.results = results
@abstractmethod
def generate_section(self) -> None:
"""Abstract method to generate a specific section of the markdown report.
Must be implemented by subclasses.
"""
msg = "Must be implemented by subclasses"
raise NotImplementedError(msg)
def generate_rows(self) -> Generator[str, None, None]:
"""Generate the rows of a markdown table for a specific report section.
Subclasses can implement this method to generate the content of the table rows.
"""
msg = "Subclasses should implement this method"
raise NotImplementedError(msg)
def generate_heading_name(self) -> str:
"""Generate a formatted heading name based on the class name.
Returns
-------
str
Formatted header name.
Example
-------
- `ANTAReport` will become `ANTA Report`.
- `TestResultsSummary` will become `Test Results Summary`.
"""
class_name = self.__class__.__name__
# Split the class name into words, keeping acronyms together
words = re.findall(r"[A-Z]?[a-z]+|[A-Z]+(?=[A-Z][a-z]|\d|\W|$)|\d+", class_name)
# Capitalize each word, but keep acronyms in all caps
formatted_words = [word if word.isupper() else word.capitalize() for word in words]
return " ".join(formatted_words)
def write_table(self, table_heading: list[str], *, last_table: bool = False) -> None:
"""Write a markdown table with a table heading and multiple rows to the markdown file.
Parameters
----------
table_heading
List of strings to join for the table heading.
last_table
Flag to determine if it's the last table of the markdown file to avoid unnecessary new line. Defaults to False.
"""
self.mdfile.write("\n".join(table_heading) + "\n")
for row in self.generate_rows():
self.mdfile.write(row)
if not last_table:
self.mdfile.write("\n")
def write_heading(self, heading_level: int) -> None:
"""Write a markdown heading to the markdown file.
The heading name used is the class name.
Parameters
----------
heading_level
The level of the heading (1-6).
Example
-------
`## Test Results Summary`
"""
# Ensure the heading level is within the valid range of 1 to 6
heading_level = max(1, min(heading_level, 6))
heading_name = self.generate_heading_name()
heading = "#" * heading_level + " " + heading_name
self.mdfile.write(f"{heading}\n\n")
def safe_markdown(self, text: str | None) -> str:
"""Escape markdown characters in the text to prevent markdown rendering issues.
Parameters
----------
text
The text to escape markdown characters from.
Returns
-------
str
The text with escaped markdown characters.
"""
# Custom field from a TestResult object can be None
if text is None:
return ""
# Replace newlines with spaces to keep content on one line
text = text.replace("\n", " ")
# Replace backticks with single quotes
return text.replace("`", "'")
class ANTAReport(MDReportBase):
"""Generate the `# ANTA Report` section of the markdown report."""
def generate_section(self) -> None:
"""Generate the `# ANTA Report` section of the markdown report."""
self.write_heading(heading_level=1)
toc = MD_REPORT_TOC
self.mdfile.write(toc + "\n\n")
class TestResultsSummary(MDReportBase):
"""Generate the `## Test Results Summary` section of the markdown report."""
def generate_section(self) -> None:
"""Generate the `## Test Results Summary` section of the markdown report."""
self.write_heading(heading_level=2)
class SummaryTotals(MDReportBase):
"""Generate the `### Summary Totals` section of the markdown report."""
TABLE_HEADING: ClassVar[list[str]] = [
"| Total Tests | Total Tests Success | Total Tests Skipped | Total Tests Failure | Total Tests Error |",
"| ----------- | ------------------- | ------------------- | ------------------- | ------------------|",
]
def generate_rows(self) -> Generator[str, None, None]:
"""Generate the rows of the summary totals table."""
yield (
f"| {self.results.get_total_results()} "
f"| {self.results.get_total_results({AntaTestStatus.SUCCESS})} "
f"| {self.results.get_total_results({AntaTestStatus.SKIPPED})} "
f"| {self.results.get_total_results({AntaTestStatus.FAILURE})} "
f"| {self.results.get_total_results({AntaTestStatus.ERROR})} |\n"
)
def generate_section(self) -> None:
"""Generate the `### Summary Totals` section of the markdown report."""
self.write_heading(heading_level=3)
self.write_table(table_heading=self.TABLE_HEADING)
class SummaryTotalsDeviceUnderTest(MDReportBase):
"""Generate the `### Summary Totals Devices Under Tests` section of the markdown report."""
TABLE_HEADING: ClassVar[list[str]] = [
"| Device Under Test | Total Tests | Tests Success | Tests Skipped | Tests Failure | Tests Error | Categories Skipped | Categories Failed |",
"| ------------------| ----------- | ------------- | ------------- | ------------- | ----------- | -------------------| ------------------|",
]
def generate_rows(self) -> Generator[str, None, None]:
"""Generate the rows of the summary totals device under test table."""
for device, stat in self.results.device_stats.items():
total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count
categories_skipped = ", ".join(sorted(convert_categories(list(stat.categories_skipped))))
categories_failed = ", ".join(sorted(convert_categories(list(stat.categories_failed))))
yield (
f"| {device} | {total_tests} | {stat.tests_success_count} | {stat.tests_skipped_count} | {stat.tests_failure_count} | {stat.tests_error_count} "
f"| {categories_skipped or '-'} | {categories_failed or '-'} |\n"
)
def generate_section(self) -> None:
"""Generate the `### Summary Totals Devices Under Tests` section of the markdown report."""
self.write_heading(heading_level=3)
self.write_table(table_heading=self.TABLE_HEADING)
class SummaryTotalsPerCategory(MDReportBase):
"""Generate the `### Summary Totals Per Category` section of the markdown report."""
TABLE_HEADING: ClassVar[list[str]] = [
"| Test Category | Total Tests | Tests Success | Tests Skipped | Tests Failure | Tests Error |",
"| ------------- | ----------- | ------------- | ------------- | ------------- | ----------- |",
]
def generate_rows(self) -> Generator[str, None, None]:
"""Generate the rows of the summary totals per category table."""
for category, stat in self.results.sorted_category_stats.items():
total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count
yield (
f"| {category} | {total_tests} | {stat.tests_success_count} | {stat.tests_skipped_count} | {stat.tests_failure_count} "
f"| {stat.tests_error_count} |\n"
)
def generate_section(self) -> None:
"""Generate the `### Summary Totals Per Category` section of the markdown report."""
self.write_heading(heading_level=3)
self.write_table(table_heading=self.TABLE_HEADING)
class TestResults(MDReportBase):
"""Generates the `## Test Results` section of the markdown report."""
TABLE_HEADING: ClassVar[list[str]] = [
"| Device Under Test | Categories | Test | Description | Custom Field | Result | Messages |",
"| ----------------- | ---------- | ---- | ----------- | ------------ | ------ | -------- |",
]
def generate_rows(self) -> Generator[str, None, None]:
"""Generate the rows of the all test results table."""
for result in self.results.get_results(sort_by=["name", "test"]):
messages = self.safe_markdown(", ".join(result.messages))
categories = ", ".join(convert_categories(result.categories))
yield (
f"| {result.name or '-'} | {categories or '-'} | {result.test or '-'} "
f"| {result.description or '-'} | {self.safe_markdown(result.custom_field) or '-'} | {result.result or '-'} | {messages or '-'} |\n"
)
def generate_section(self) -> None:
"""Generate the `## Test Results` section of the markdown report."""
self.write_heading(heading_level=2)
self.write_table(table_heading=self.TABLE_HEADING, last_table=True)

View file

@ -6,14 +6,13 @@
from __future__ import annotations from __future__ import annotations
import json import json
from typing import TYPE_CHECKING from collections import defaultdict
from functools import cached_property
from itertools import chain
from pydantic import TypeAdapter from anta.result_manager.models import AntaTestStatus, TestResult
from anta.custom_types import TestStatus from .models import CategoryStats, DeviceStats, TestStats
if TYPE_CHECKING:
from anta.result_manager.models import TestResult
class ResultManager: class ResultManager:
@ -91,9 +90,13 @@ class ResultManager:
error_status is set to True. error_status is set to True.
""" """
self._result_entries: list[TestResult] = [] self._result_entries: list[TestResult] = []
self.status: TestStatus = "unset" self.status: AntaTestStatus = AntaTestStatus.UNSET
self.error_status = False self.error_status = False
self.device_stats: defaultdict[str, DeviceStats] = defaultdict(DeviceStats)
self.category_stats: defaultdict[str, CategoryStats] = defaultdict(CategoryStats)
self.test_stats: defaultdict[str, TestStats] = defaultdict(TestStats)
def __len__(self) -> int: def __len__(self) -> int:
"""Implement __len__ method to count number of results.""" """Implement __len__ method to count number of results."""
return len(self._result_entries) return len(self._result_entries)
@ -105,67 +108,184 @@ class ResultManager:
@results.setter @results.setter
def results(self, value: list[TestResult]) -> None: def results(self, value: list[TestResult]) -> None:
"""Set the list of TestResult."""
# When setting the results, we need to reset the state of the current instance
self._result_entries = [] self._result_entries = []
self.status = "unset" self.status = AntaTestStatus.UNSET
self.error_status = False self.error_status = False
for e in value:
self.add(e) # Also reset the stats attributes
self.device_stats = defaultdict(DeviceStats)
self.category_stats = defaultdict(CategoryStats)
self.test_stats = defaultdict(TestStats)
for result in value:
self.add(result)
@property @property
def json(self) -> str: def json(self) -> str:
"""Get a JSON representation of the results.""" """Get a JSON representation of the results."""
return json.dumps([result.model_dump() for result in self._result_entries], indent=4) return json.dumps([result.model_dump() for result in self._result_entries], indent=4)
def add(self, result: TestResult) -> None: @property
"""Add a result to the ResultManager instance. def sorted_category_stats(self) -> dict[str, CategoryStats]:
"""A property that returns the category_stats dictionary sorted by key name."""
return dict(sorted(self.category_stats.items()))
Args: @cached_property
---- def results_by_status(self) -> dict[AntaTestStatus, list[TestResult]]:
result: TestResult to add to the ResultManager instance. """A cached property that returns the results grouped by status."""
return {status: [result for result in self._result_entries if result.result == status] for status in AntaTestStatus}
def _update_status(self, test_status: AntaTestStatus) -> None:
"""Update the status of the ResultManager instance based on the test status.
Parameters
----------
test_status
AntaTestStatus to update the ResultManager status.
""" """
def _update_status(test_status: TestStatus) -> None:
result_validator = TypeAdapter(TestStatus)
result_validator.validate_python(test_status)
if test_status == "error": if test_status == "error":
self.error_status = True self.error_status = True
return return
if self.status == "unset" or self.status == "skipped" and test_status in {"success", "failure"}: if self.status == "unset" or self.status == "skipped" and test_status in {"success", "failure"}:
self.status = test_status self.status = test_status
elif self.status == "success" and test_status == "failure": elif self.status == "success" and test_status == "failure":
self.status = "failure" self.status = AntaTestStatus.FAILURE
def _update_stats(self, result: TestResult) -> None:
"""Update the statistics based on the test result.
Parameters
----------
result
TestResult to update the statistics.
"""
count_attr = f"tests_{result.result}_count"
# Update device stats
device_stats: DeviceStats = self.device_stats[result.name]
setattr(device_stats, count_attr, getattr(device_stats, count_attr) + 1)
if result.result in ("failure", "error"):
device_stats.tests_failure.add(result.test)
device_stats.categories_failed.update(result.categories)
elif result.result == "skipped":
device_stats.categories_skipped.update(result.categories)
# Update category stats
for category in result.categories:
category_stats: CategoryStats = self.category_stats[category]
setattr(category_stats, count_attr, getattr(category_stats, count_attr) + 1)
# Update test stats
count_attr = f"devices_{result.result}_count"
test_stats: TestStats = self.test_stats[result.test]
setattr(test_stats, count_attr, getattr(test_stats, count_attr) + 1)
if result.result in ("failure", "error"):
test_stats.devices_failure.add(result.name)
def add(self, result: TestResult) -> None:
"""Add a result to the ResultManager instance.
The result is added to the internal list of results and the overall status
of the ResultManager instance is updated based on the added test status.
Parameters
----------
result
TestResult to add to the ResultManager instance.
"""
self._result_entries.append(result) self._result_entries.append(result)
_update_status(result.result) self._update_status(result.result)
self._update_stats(result)
# Every time a new result is added, we need to clear the cached property
self.__dict__.pop("results_by_status", None)
def get_results(self, status: set[AntaTestStatus] | None = None, sort_by: list[str] | None = None) -> list[TestResult]:
"""Get the results, optionally filtered by status and sorted by TestResult fields.
If no status is provided, all results are returned.
Parameters
----------
status
Optional set of AntaTestStatus enum members to filter the results.
sort_by
Optional list of TestResult fields to sort the results.
Returns
-------
list[TestResult]
List of results.
"""
# Return all results if no status is provided, otherwise return results for multiple statuses
results = self._result_entries if status is None else list(chain.from_iterable(self.results_by_status.get(status, []) for status in status))
if sort_by:
accepted_fields = TestResult.model_fields.keys()
if not set(sort_by).issubset(set(accepted_fields)):
msg = f"Invalid sort_by fields: {sort_by}. Accepted fields are: {list(accepted_fields)}"
raise ValueError(msg)
results = sorted(results, key=lambda result: [getattr(result, field) for field in sort_by])
return results
def get_total_results(self, status: set[AntaTestStatus] | None = None) -> int:
"""Get the total number of results, optionally filtered by status.
If no status is provided, the total number of results is returned.
Parameters
----------
status
Optional set of AntaTestStatus enum members to filter the results.
Returns
-------
int
Total number of results.
"""
if status is None:
# Return the total number of results
return sum(len(results) for results in self.results_by_status.values())
# Return the total number of results for multiple statuses
return sum(len(self.results_by_status.get(status, [])) for status in status)
def get_status(self, *, ignore_error: bool = False) -> str: def get_status(self, *, ignore_error: bool = False) -> str:
"""Return the current status including error_status if ignore_error is False.""" """Return the current status including error_status if ignore_error is False."""
return "error" if self.error_status and not ignore_error else self.status return "error" if self.error_status and not ignore_error else self.status
def filter(self, hide: set[TestStatus]) -> ResultManager: def filter(self, hide: set[AntaTestStatus]) -> ResultManager:
"""Get a filtered ResultManager based on test status. """Get a filtered ResultManager based on test status.
Args: Parameters
---- ----------
hide: set of TestStatus literals to select tests to hide based on their status. hide
Set of AntaTestStatus enum members to select tests to hide based on their status.
Returns Returns
------- -------
ResultManager
A filtered `ResultManager`. A filtered `ResultManager`.
""" """
possible_statuses = set(AntaTestStatus)
manager = ResultManager() manager = ResultManager()
manager.results = [test for test in self._result_entries if test.result not in hide] manager.results = self.get_results(possible_statuses - hide)
return manager return manager
def filter_by_tests(self, tests: set[str]) -> ResultManager: def filter_by_tests(self, tests: set[str]) -> ResultManager:
"""Get a filtered ResultManager that only contains specific tests. """Get a filtered ResultManager that only contains specific tests.
Args: Parameters
---- ----------
tests: Set of test names to filter the results. tests
Set of test names to filter the results.
Returns Returns
------- -------
ResultManager
A filtered `ResultManager`. A filtered `ResultManager`.
""" """
manager = ResultManager() manager = ResultManager()
@ -175,12 +295,14 @@ class ResultManager:
def filter_by_devices(self, devices: set[str]) -> ResultManager: def filter_by_devices(self, devices: set[str]) -> ResultManager:
"""Get a filtered ResultManager that only contains specific devices. """Get a filtered ResultManager that only contains specific devices.
Args: Parameters
---- ----------
devices: Set of device names to filter the results. devices
Set of device names to filter the results.
Returns Returns
------- -------
ResultManager
A filtered `ResultManager`. A filtered `ResultManager`.
""" """
manager = ResultManager() manager = ResultManager()
@ -192,6 +314,7 @@ class ResultManager:
Returns Returns
------- -------
set[str]
Set of test names. Set of test names.
""" """
return {str(result.test) for result in self._result_entries} return {str(result.test) for result in self._result_entries}
@ -201,6 +324,7 @@ class ResultManager:
Returns Returns
------- -------
set[str]
Set of device names. Set of device names.
""" """
return {str(result.name) for result in self._result_entries} return {str(result.name) for result in self._result_entries}

View file

@ -5,9 +5,27 @@
from __future__ import annotations from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from pydantic import BaseModel from pydantic import BaseModel
from anta.custom_types import TestStatus
class AntaTestStatus(str, Enum):
"""Test status Enum for the TestResult.
NOTE: This could be updated to StrEnum when Python 3.11 is the minimum supported version in ANTA.
"""
UNSET = "unset"
SUCCESS = "success"
FAILURE = "failure"
ERROR = "error"
SKIPPED = "skipped"
def __str__(self) -> str:
"""Override the __str__ method to return the value of the Enum, mimicking the behavior of StrEnum."""
return self.value
class TestResult(BaseModel): class TestResult(BaseModel):
@ -15,13 +33,20 @@ class TestResult(BaseModel):
Attributes Attributes
---------- ----------
name: Device name where the test has run. name : str
test: Test name runs on the device. Name of the device where the test was run.
categories: List of categories the TestResult belongs to, by default the AntaTest categories. test : str
description: TestResult description, by default the AntaTest description. Name of the test run on the device.
result: Result of the test. Can be one of "unset", "success", "failure", "error" or "skipped". categories : list[str]
messages: Message to report after the test if any. List of categories the TestResult belongs to. Defaults to the AntaTest categories.
custom_field: Custom field to store a string for flexibility in integrating with ANTA description : str
Description of the TestResult. Defaults to the AntaTest description.
result : AntaTestStatus
Result of the test. Must be one of the AntaTestStatus Enum values: unset, success, failure, error or skipped.
messages : list[str]
Messages to report after the test, if any.
custom_field : str | None
Custom field to store a string for flexibility in integrating with ANTA.
""" """
@ -29,57 +54,63 @@ class TestResult(BaseModel):
test: str test: str
categories: list[str] categories: list[str]
description: str description: str
result: TestStatus = "unset" result: AntaTestStatus = AntaTestStatus.UNSET
messages: list[str] = [] messages: list[str] = []
custom_field: str | None = None custom_field: str | None = None
def is_success(self, message: str | None = None) -> None: def is_success(self, message: str | None = None) -> None:
"""Set status to success. """Set status to success.
Args: Parameters
---- ----------
message: Optional message related to the test message
Optional message related to the test.
""" """
self._set_status("success", message) self._set_status(AntaTestStatus.SUCCESS, message)
def is_failure(self, message: str | None = None) -> None: def is_failure(self, message: str | None = None) -> None:
"""Set status to failure. """Set status to failure.
Args: Parameters
---- ----------
message: Optional message related to the test message
Optional message related to the test.
""" """
self._set_status("failure", message) self._set_status(AntaTestStatus.FAILURE, message)
def is_skipped(self, message: str | None = None) -> None: def is_skipped(self, message: str | None = None) -> None:
"""Set status to skipped. """Set status to skipped.
Args: Parameters
---- ----------
message: Optional message related to the test message
Optional message related to the test.
""" """
self._set_status("skipped", message) self._set_status(AntaTestStatus.SKIPPED, message)
def is_error(self, message: str | None = None) -> None: def is_error(self, message: str | None = None) -> None:
"""Set status to error. """Set status to error.
Args: Parameters
---- ----------
message: Optional message related to the test message
Optional message related to the test.
""" """
self._set_status("error", message) self._set_status(AntaTestStatus.ERROR, message)
def _set_status(self, status: TestStatus, message: str | None = None) -> None: def _set_status(self, status: AntaTestStatus, message: str | None = None) -> None:
"""Set status and insert optional message. """Set status and insert optional message.
Args: Parameters
---- ----------
status: status of the test status
message: optional message Status of the test.
message
Optional message.
""" """
self.result = status self.result = status
@ -89,3 +120,42 @@ class TestResult(BaseModel):
def __str__(self) -> str: def __str__(self) -> str:
"""Return a human readable string of this TestResult.""" """Return a human readable string of this TestResult."""
return f"Test '{self.test}' (on '{self.name}'): Result '{self.result}'\nMessages: {self.messages}" return f"Test '{self.test}' (on '{self.name}'): Result '{self.result}'\nMessages: {self.messages}"
# Pylint does not treat dataclasses differently: https://github.com/pylint-dev/pylint/issues/9058
# pylint: disable=too-many-instance-attributes
@dataclass
class DeviceStats:
"""Device statistics for a run of tests."""
tests_success_count: int = 0
tests_skipped_count: int = 0
tests_failure_count: int = 0
tests_error_count: int = 0
tests_unset_count: int = 0
tests_failure: set[str] = field(default_factory=set)
categories_failed: set[str] = field(default_factory=set)
categories_skipped: set[str] = field(default_factory=set)
@dataclass
class CategoryStats:
"""Category statistics for a run of tests."""
tests_success_count: int = 0
tests_skipped_count: int = 0
tests_failure_count: int = 0
tests_error_count: int = 0
tests_unset_count: int = 0
@dataclass
class TestStats:
"""Test statistics for a run of tests."""
devices_success_count: int = 0
devices_skipped_count: int = 0
devices_failure_count: int = 0
devices_error_count: int = 0
devices_unset_count: int = 0
devices_failure: set[str] = field(default_factory=set)

View file

@ -40,7 +40,8 @@ def adjust_rlimit_nofile() -> tuple[int, int]:
Returns Returns
------- -------
tuple[int, int]: The new soft and hard limits for open file descriptors. tuple[int, int]
The new soft and hard limits for open file descriptors.
""" """
try: try:
nofile = int(os.environ.get("ANTA_NOFILE", DEFAULT_NOFILE)) nofile = int(os.environ.get("ANTA_NOFILE", DEFAULT_NOFILE))
@ -50,7 +51,7 @@ def adjust_rlimit_nofile() -> tuple[int, int]:
limits = resource.getrlimit(resource.RLIMIT_NOFILE) limits = resource.getrlimit(resource.RLIMIT_NOFILE)
logger.debug("Initial limit numbers for open file descriptors for the current ANTA process: Soft Limit: %s | Hard Limit: %s", limits[0], limits[1]) logger.debug("Initial limit numbers for open file descriptors for the current ANTA process: Soft Limit: %s | Hard Limit: %s", limits[0], limits[1])
nofile = nofile if limits[1] > nofile else limits[1] nofile = min(limits[1], nofile)
logger.debug("Setting soft limit for open file descriptors for the current ANTA process to %s", nofile) logger.debug("Setting soft limit for open file descriptors for the current ANTA process to %s", nofile)
resource.setrlimit(resource.RLIMIT_NOFILE, (nofile, limits[1])) resource.setrlimit(resource.RLIMIT_NOFILE, (nofile, limits[1]))
return resource.getrlimit(resource.RLIMIT_NOFILE) return resource.getrlimit(resource.RLIMIT_NOFILE)
@ -59,9 +60,10 @@ def adjust_rlimit_nofile() -> tuple[int, int]:
def log_cache_statistics(devices: list[AntaDevice]) -> None: def log_cache_statistics(devices: list[AntaDevice]) -> None:
"""Log cache statistics for each device in the inventory. """Log cache statistics for each device in the inventory.
Args: Parameters
---- ----------
devices: List of devices in the inventory. devices
List of devices in the inventory.
""" """
for device in devices: for device in devices:
if device.cache_statistics is not None: if device.cache_statistics is not None:
@ -78,15 +80,21 @@ def log_cache_statistics(devices: list[AntaDevice]) -> None:
async def setup_inventory(inventory: AntaInventory, tags: set[str] | None, devices: set[str] | None, *, established_only: bool) -> AntaInventory | None: async def setup_inventory(inventory: AntaInventory, tags: set[str] | None, devices: set[str] | None, *, established_only: bool) -> AntaInventory | None:
"""Set up the inventory for the ANTA run. """Set up the inventory for the ANTA run.
Args: Parameters
---- ----------
inventory: AntaInventory object that includes the device(s). inventory
tags: Tags to filter devices from the inventory. AntaInventory object that includes the device(s).
devices: Devices on which to run tests. None means all devices. tags
Tags to filter devices from the inventory.
devices
Devices on which to run tests. None means all devices.
established_only
If True use return only devices where a connection is established.
Returns Returns
------- -------
AntaInventory | None: The filtered inventory or None if there are no devices to run tests on. AntaInventory | None
The filtered inventory or None if there are no devices to run tests on.
""" """
if len(inventory) == 0: if len(inventory) == 0:
logger.info("The inventory is empty, exiting") logger.info("The inventory is empty, exiting")
@ -116,15 +124,20 @@ def prepare_tests(
) -> defaultdict[AntaDevice, set[AntaTestDefinition]] | None: ) -> defaultdict[AntaDevice, set[AntaTestDefinition]] | None:
"""Prepare the tests to run. """Prepare the tests to run.
Args: Parameters
---- ----------
inventory: AntaInventory object that includes the device(s). inventory
catalog: AntaCatalog object that includes the list of tests. AntaInventory object that includes the device(s).
tests: Tests to run against devices. None means all tests. catalog
tags: Tags to filter devices from the inventory. AntaCatalog object that includes the list of tests.
tests
Tests to run against devices. None means all tests.
tags
Tags to filter devices from the inventory.
Returns Returns
------- -------
defaultdict[AntaDevice, set[AntaTestDefinition]] | None
A mapping of devices to the tests to run or None if there are no tests to run. A mapping of devices to the tests to run or None if there are no tests to run.
""" """
# Build indexes for the catalog. If `tests` is set, filter the indexes based on these tests # Build indexes for the catalog. If `tests` is set, filter the indexes based on these tests
@ -133,21 +146,20 @@ def prepare_tests(
# Using a set to avoid inserting duplicate tests # Using a set to avoid inserting duplicate tests
device_to_tests: defaultdict[AntaDevice, set[AntaTestDefinition]] = defaultdict(set) device_to_tests: defaultdict[AntaDevice, set[AntaTestDefinition]] = defaultdict(set)
# Create AntaTestRunner tuples from the tags # Create the device to tests mapping from the tags
for device in inventory.devices: for device in inventory.devices:
if tags: if tags:
# If there are CLI tags, only execute tests with matching tags if not any(tag in device.tags for tag in tags):
device_to_tests[device].update(catalog.get_tests_by_tags(tags)) # The device does not have any selected tag, skipping
continue
else: else:
# If there is no CLI tags, execute all tests that do not have any tags # If there is no CLI tags, execute all tests that do not have any tags
device_to_tests[device].update(catalog.tag_to_tests[None]) device_to_tests[device].update(catalog.tag_to_tests[None])
# Then add the tests with matching tags from device tags # Add the tests with matching tags from device tags
device_to_tests[device].update(catalog.get_tests_by_tags(device.tags)) device_to_tests[device].update(catalog.get_tests_by_tags(device.tags))
catalog.final_tests_count += len(device_to_tests[device]) if len(device_to_tests.values()) == 0:
if catalog.final_tests_count == 0:
msg = ( msg = (
f"There are no tests{f' matching the tags {tags} ' if tags else ' '}to run in the current test catalog and device inventory, please verify your inputs." f"There are no tests{f' matching the tags {tags} ' if tags else ' '}to run in the current test catalog and device inventory, please verify your inputs."
) )
@ -157,15 +169,19 @@ def prepare_tests(
return device_to_tests return device_to_tests
def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinition]]) -> list[Coroutine[Any, Any, TestResult]]: def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinition]], manager: ResultManager) -> list[Coroutine[Any, Any, TestResult]]:
"""Get the coroutines for the ANTA run. """Get the coroutines for the ANTA run.
Args: Parameters
---- ----------
selected_tests: A mapping of devices to the tests to run. The selected tests are generated by the `prepare_tests` function. selected_tests
A mapping of devices to the tests to run. The selected tests are generated by the `prepare_tests` function.
manager
A ResultManager
Returns Returns
------- -------
list[Coroutine[Any, Any, TestResult]]
The list of coroutines to run. The list of coroutines to run.
""" """
coros = [] coros = []
@ -173,13 +189,14 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio
for test in test_definitions: for test in test_definitions:
try: try:
test_instance = test.test(device=device, inputs=test.inputs) test_instance = test.test(device=device, inputs=test.inputs)
manager.add(test_instance.result)
coros.append(test_instance.test()) coros.append(test_instance.test())
except Exception as e: # noqa: PERF203, pylint: disable=broad-exception-caught except Exception as e: # noqa: PERF203, BLE001
# An AntaTest instance is potentially user-defined code. # An AntaTest instance is potentially user-defined code.
# We need to catch everything and exit gracefully with an error message. # We need to catch everything and exit gracefully with an error message.
message = "\n".join( message = "\n".join(
[ [
f"There is an error when creating test {test.test.module}.{test.test.__name__}.", f"There is an error when creating test {test.test.__module__}.{test.test.__name__}.",
f"If this is not a custom test implementation: {GITHUB_SUGGESTION}", f"If this is not a custom test implementation: {GITHUB_SUGGESTION}",
], ],
) )
@ -199,22 +216,29 @@ async def main( # noqa: PLR0913
established_only: bool = True, established_only: bool = True,
dry_run: bool = False, dry_run: bool = False,
) -> None: ) -> None:
# pylint: disable=too-many-arguments
"""Run ANTA. """Run ANTA.
Use this as an entrypoint to the test framework in your script. Use this as an entrypoint to the test framework in your script.
ResultManager object gets updated with the test results. ResultManager object gets updated with the test results.
Args: Parameters
---- ----------
manager: ResultManager object to populate with the test results. manager
inventory: AntaInventory object that includes the device(s). ResultManager object to populate with the test results.
catalog: AntaCatalog object that includes the list of tests. inventory
devices: Devices on which to run tests. None means all devices. These may come from the `--device / -d` CLI option in NRFU. AntaInventory object that includes the device(s).
tests: Tests to run against devices. None means all tests. These may come from the `--test / -t` CLI option in NRFU. catalog
tags: Tags to filter devices from the inventory. These may come from the `--tags` CLI option in NRFU. AntaCatalog object that includes the list of tests.
established_only: Include only established device(s). devices
dry_run: Build the list of coroutine to run and stop before test execution. Devices on which to run tests. None means all devices. These may come from the `--device / -d` CLI option in NRFU.
tests
Tests to run against devices. None means all tests. These may come from the `--test / -t` CLI option in NRFU.
tags
Tags to filter devices from the inventory. These may come from the `--tags` CLI option in NRFU.
established_only
Include only established device(s).
dry_run
Build the list of coroutine to run and stop before test execution.
""" """
# Adjust the maximum number of open file descriptors for the ANTA process # Adjust the maximum number of open file descriptors for the ANTA process
limits = adjust_rlimit_nofile() limits = adjust_rlimit_nofile()
@ -233,25 +257,26 @@ async def main( # noqa: PLR0913
selected_tests = prepare_tests(selected_inventory, catalog, tests, tags) selected_tests = prepare_tests(selected_inventory, catalog, tests, tags)
if selected_tests is None: if selected_tests is None:
return return
final_tests_count = sum(len(tests) for tests in selected_tests.values())
run_info = ( run_info = (
"--- ANTA NRFU Run Information ---\n" "--- ANTA NRFU Run Information ---\n"
f"Number of devices: {len(inventory)} ({len(selected_inventory)} established)\n" f"Number of devices: {len(inventory)} ({len(selected_inventory)} established)\n"
f"Total number of selected tests: {catalog.final_tests_count}\n" f"Total number of selected tests: {final_tests_count}\n"
f"Maximum number of open file descriptors for the current ANTA process: {limits[0]}\n" f"Maximum number of open file descriptors for the current ANTA process: {limits[0]}\n"
"---------------------------------" "---------------------------------"
) )
logger.info(run_info) logger.info(run_info)
if catalog.final_tests_count > limits[0]: if final_tests_count > limits[0]:
logger.warning( logger.warning(
"The number of concurrent tests is higher than the open file descriptors limit for this ANTA process.\n" "The number of concurrent tests is higher than the open file descriptors limit for this ANTA process.\n"
"Errors may occur while running the tests.\n" "Errors may occur while running the tests.\n"
"Please consult the ANTA FAQ." "Please consult the ANTA FAQ."
) )
coroutines = get_coroutines(selected_tests) coroutines = get_coroutines(selected_tests, manager)
if dry_run: if dry_run:
logger.info("Dry-run mode, exiting before running the tests.") logger.info("Dry-run mode, exiting before running the tests.")
@ -263,8 +288,6 @@ async def main( # noqa: PLR0913
AntaTest.nrfu_task = AntaTest.progress.add_task("Running NRFU Tests...", total=len(coroutines)) AntaTest.nrfu_task = AntaTest.progress.add_task("Running NRFU Tests...", total=len(coroutines))
with Catchtime(logger=logger, message="Running ANTA tests"): with Catchtime(logger=logger, message="Running ANTA tests"):
test_results = await asyncio.gather(*coroutines) await asyncio.gather(*coroutines)
for r in test_results:
manager.add(r)
log_cache_statistics(selected_inventory.devices) log_cache_statistics(selected_inventory.devices)

View file

@ -13,7 +13,7 @@ from typing import TYPE_CHECKING, Any, ClassVar
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from anta.custom_types import BfdInterval, BfdMultiplier from anta.custom_types import BfdInterval, BfdMultiplier, BfdProtocol
from anta.models import AntaCommand, AntaTest from anta.models import AntaCommand, AntaTest
from anta.tools import get_value from anta.tools import get_value
@ -45,7 +45,7 @@ class VerifyBFDSpecificPeers(AntaTest):
name = "VerifyBFDSpecificPeers" name = "VerifyBFDSpecificPeers"
description = "Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF." description = "Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF."
categories: ClassVar[list[str]] = ["bfd"] categories: ClassVar[list[str]] = ["bfd"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bfd peers", revision=4)] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bfd peers", revision=1)]
class Input(AntaTest.Input): class Input(AntaTest.Input):
"""Input model for the VerifyBFDSpecificPeers test.""" """Input model for the VerifyBFDSpecificPeers test."""
@ -126,7 +126,7 @@ class VerifyBFDPeersIntervals(AntaTest):
name = "VerifyBFDPeersIntervals" name = "VerifyBFDPeersIntervals"
description = "Verifies the timers of the IPv4 BFD peers in the specified VRF." description = "Verifies the timers of the IPv4 BFD peers in the specified VRF."
categories: ClassVar[list[str]] = ["bfd"] categories: ClassVar[list[str]] = ["bfd"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bfd peers detail", revision=4)] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bfd peers detail", revision=1)]
class Input(AntaTest.Input): class Input(AntaTest.Input):
"""Input model for the VerifyBFDPeersIntervals test.""" """Input model for the VerifyBFDPeersIntervals test."""
@ -157,34 +157,34 @@ class VerifyBFDPeersIntervals(AntaTest):
for bfd_peers in self.inputs.bfd_peers: for bfd_peers in self.inputs.bfd_peers:
peer = str(bfd_peers.peer_address) peer = str(bfd_peers.peer_address)
vrf = bfd_peers.vrf vrf = bfd_peers.vrf
tx_interval = bfd_peers.tx_interval
# Converting milliseconds intervals into actual value rx_interval = bfd_peers.rx_interval
tx_interval = bfd_peers.tx_interval * 1000
rx_interval = bfd_peers.rx_interval * 1000
multiplier = bfd_peers.multiplier multiplier = bfd_peers.multiplier
# Check if BFD peer configured
bfd_output = get_value( bfd_output = get_value(
self.instance_commands[0].json_output, self.instance_commands[0].json_output,
f"vrfs..{vrf}..ipv4Neighbors..{peer}..peerStats..", f"vrfs..{vrf}..ipv4Neighbors..{peer}..peerStats..",
separator="..", separator="..",
) )
# Check if BFD peer configured
if not bfd_output: if not bfd_output:
failures[peer] = {vrf: "Not Configured"} failures[peer] = {vrf: "Not Configured"}
continue continue
# Convert interval timer(s) into milliseconds to be consistent with the inputs.
bfd_details = bfd_output.get("peerStatsDetail", {}) bfd_details = bfd_output.get("peerStatsDetail", {})
intervals_ok = ( op_tx_interval = bfd_details.get("operTxInterval") // 1000
bfd_details.get("operTxInterval") == tx_interval and bfd_details.get("operRxInterval") == rx_interval and bfd_details.get("detectMult") == multiplier op_rx_interval = bfd_details.get("operRxInterval") // 1000
) detect_multiplier = bfd_details.get("detectMult")
intervals_ok = op_tx_interval == tx_interval and op_rx_interval == rx_interval and detect_multiplier == multiplier
# Check timers of BFD peer # Check timers of BFD peer
if not intervals_ok: if not intervals_ok:
failures[peer] = { failures[peer] = {
vrf: { vrf: {
"tx_interval": bfd_details.get("operTxInterval"), "tx_interval": op_tx_interval,
"rx_interval": bfd_details.get("operRxInterval"), "rx_interval": op_rx_interval,
"multiplier": bfd_details.get("detectMult"), "multiplier": detect_multiplier,
} }
} }
@ -285,3 +285,79 @@ class VerifyBFDPeersHealth(AntaTest):
if up_failures: if up_failures:
up_failures_str = "\n".join(up_failures) up_failures_str = "\n".join(up_failures)
self.result.is_failure(f"\nFollowing BFD peers were down:\n{up_failures_str}") self.result.is_failure(f"\nFollowing BFD peers were down:\n{up_failures_str}")
class VerifyBFDPeersRegProtocols(AntaTest):
"""Verifies that IPv4 BFD peer(s) have the specified protocol(s) registered.
Expected Results
----------------
* Success: The test will pass if IPv4 BFD peers are registered with the specified protocol(s).
* Failure: The test will fail if IPv4 BFD peers are not found or the specified protocol(s) are not registered for the BFD peer(s).
Examples
--------
```yaml
anta.tests.bfd:
- VerifyBFDPeersRegProtocols:
bfd_peers:
- peer_address: 192.0.255.7
vrf: default
protocols:
- bgp
```
"""
name = "VerifyBFDPeersRegProtocols"
description = "Verifies that IPv4 BFD peer(s) have the specified protocol(s) registered."
categories: ClassVar[list[str]] = ["bfd"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bfd peers detail", revision=1)]
class Input(AntaTest.Input):
"""Input model for the VerifyBFDPeersRegProtocols test."""
bfd_peers: list[BFDPeer]
"""List of IPv4 BFD peers."""
class BFDPeer(BaseModel):
"""Model for an IPv4 BFD peer."""
peer_address: IPv4Address
"""IPv4 address of a BFD peer."""
vrf: str = "default"
"""Optional VRF for BFD peer. If not provided, it defaults to `default`."""
protocols: list[BfdProtocol]
"""List of protocols to be verified."""
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyBFDPeersRegProtocols."""
# Initialize failure messages
failures: dict[Any, Any] = {}
# Iterating over BFD peers, extract the parameters and command output
for bfd_peer in self.inputs.bfd_peers:
peer = str(bfd_peer.peer_address)
vrf = bfd_peer.vrf
protocols = bfd_peer.protocols
bfd_output = get_value(
self.instance_commands[0].json_output,
f"vrfs..{vrf}..ipv4Neighbors..{peer}..peerStats..",
separator="..",
)
# Check if BFD peer configured
if not bfd_output:
failures[peer] = {vrf: "Not Configured"}
continue
# Check registered protocols
difference = set(protocols) - set(get_value(bfd_output, "peerStatsDetail.apps"))
if difference:
failures[peer] = {vrf: sorted(difference)}
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"The following BFD peers are not configured or have non-registered protocol(s):\n{failures}")

View file

@ -33,16 +33,24 @@ class VerifyReachability(AntaTest):
- source: Management0 - source: Management0
destination: 1.1.1.1 destination: 1.1.1.1
vrf: MGMT vrf: MGMT
df_bit: True
size: 100
- source: Management0 - source: Management0
destination: 8.8.8.8 destination: 8.8.8.8
vrf: MGMT vrf: MGMT
df_bit: True
size: 100
``` ```
""" """
name = "VerifyReachability" name = "VerifyReachability"
description = "Test the network reachability to one or many destination IP(s)." description = "Test the network reachability to one or many destination IP(s)."
categories: ClassVar[list[str]] = ["connectivity"] categories: ClassVar[list[str]] = ["connectivity"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="ping vrf {vrf} {destination} source {source} repeat {repeat}", revision=1)] # Removing the <space> between '{size}' and '{df_bit}' to compensate the df-bit set default value
# i.e if df-bit kept disable then it will add redundant space in between the command
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [
AntaTemplate(template="ping vrf {vrf} {destination} source {source} size {size}{df_bit} repeat {repeat}", revision=1)
]
class Input(AntaTest.Input): class Input(AntaTest.Input):
"""Input model for the VerifyReachability test.""" """Input model for the VerifyReachability test."""
@ -61,15 +69,27 @@ class VerifyReachability(AntaTest):
"""VRF context. Defaults to `default`.""" """VRF context. Defaults to `default`."""
repeat: int = 2 repeat: int = 2
"""Number of ping repetition. Defaults to 2.""" """Number of ping repetition. Defaults to 2."""
size: int = 100
"""Specify datagram size. Defaults to 100."""
df_bit: bool = False
"""Enable do not fragment bit in IP header. Defaults to False."""
def render(self, template: AntaTemplate) -> list[AntaCommand]: def render(self, template: AntaTemplate) -> list[AntaCommand]:
"""Render the template for each host in the input list.""" """Render the template for each host in the input list."""
return [template.render(destination=host.destination, source=host.source, vrf=host.vrf, repeat=host.repeat) for host in self.inputs.hosts] commands = []
for host in self.inputs.hosts:
# Enables do not fragment bit in IP header if needed else keeping disable.
# Adding the <space> at start to compensate change in AntaTemplate
df_bit = " df-bit" if host.df_bit else ""
command = template.render(destination=host.destination, source=host.source, vrf=host.vrf, repeat=host.repeat, size=host.size, df_bit=df_bit)
commands.append(command)
return commands
@AntaTest.anta_test @AntaTest.anta_test
def test(self) -> None: def test(self) -> None:
"""Main test function for VerifyReachability.""" """Main test function for VerifyReachability."""
failures = [] failures = []
for command in self.instance_commands: for command in self.instance_commands:
src = command.params.source src = command.params.source
dst = command.params.destination dst = command.params.destination

View file

@ -196,4 +196,4 @@ class VerifyFieldNotice72Resolution(AntaTest):
self.result.is_success("FN72 is mitigated") self.result.is_success("FN72 is mitigated")
return return
# We should never hit this point # We should never hit this point
self.result.is_error("Error in running test - FixedSystemvrm1 not found") self.result.is_failure("Error in running test - Component FixedSystemvrm1 not found in 'show version'")

196
anta/tests/flow_tracking.py Normal file
View file

@ -0,0 +1,196 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Module related to the flow tracking tests."""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
from typing import ClassVar
from pydantic import BaseModel
from anta.decorators import skip_on_platforms
from anta.models import AntaCommand, AntaTemplate, AntaTest
from anta.tools import get_failed_logs
def validate_record_export(record_export: dict[str, str], tracker_info: dict[str, str]) -> str:
"""
Validate the record export configuration against the tracker info.
Parameters
----------
record_export
The expected record export configuration.
tracker_info
The actual tracker info from the command output.
Returns
-------
str
A failure message if the record export configuration does not match, otherwise blank string.
"""
failed_log = ""
actual_export = {"inactive timeout": tracker_info.get("inactiveTimeout"), "interval": tracker_info.get("activeInterval")}
expected_export = {"inactive timeout": record_export.get("on_inactive_timeout"), "interval": record_export.get("on_interval")}
if actual_export != expected_export:
failed_log = get_failed_logs(expected_export, actual_export)
return failed_log
def validate_exporters(exporters: list[dict[str, str]], tracker_info: dict[str, str]) -> str:
"""
Validate the exporter configurations against the tracker info.
Parameters
----------
exporters
The list of expected exporter configurations.
tracker_info
The actual tracker info from the command output.
Returns
-------
str
Failure message if any exporter configuration does not match.
"""
failed_log = ""
for exporter in exporters:
exporter_name = exporter["name"]
actual_exporter_info = tracker_info["exporters"].get(exporter_name)
if not actual_exporter_info:
failed_log += f"\nExporter `{exporter_name}` is not configured."
continue
expected_exporter_data = {"local interface": exporter["local_interface"], "template interval": exporter["template_interval"]}
actual_exporter_data = {"local interface": actual_exporter_info["localIntf"], "template interval": actual_exporter_info["templateInterval"]}
if expected_exporter_data != actual_exporter_data:
failed_msg = get_failed_logs(expected_exporter_data, actual_exporter_data)
failed_log += f"\nExporter `{exporter_name}`: {failed_msg}"
return failed_log
class VerifyHardwareFlowTrackerStatus(AntaTest):
"""
Verifies if hardware flow tracking is running and an input tracker is active.
This test optionally verifies the tracker interval/timeout and exporter configuration.
Expected Results
----------------
* Success: The test will pass if hardware flow tracking is running and an input tracker is active.
* Failure: The test will fail if hardware flow tracking is not running, an input tracker is not active,
or the tracker interval/timeout and exporter configuration does not match the expected values.
Examples
--------
```yaml
anta.tests.flow_tracking:
- VerifyFlowTrackingHardware:
trackers:
- name: FLOW-TRACKER
record_export:
on_inactive_timeout: 70000
on_interval: 300000
exporters:
- name: CV-TELEMETRY
local_interface: Loopback0
template_interval: 3600000
```
"""
name = "VerifyHardwareFlowTrackerStatus"
description = (
"Verifies if hardware flow tracking is running and an input tracker is active. Optionally verifies the tracker interval/timeout and exporter configuration."
)
categories: ClassVar[list[str]] = ["flow tracking"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show flow tracking hardware tracker {name}", revision=1)]
class Input(AntaTest.Input):
"""Input model for the VerifyHardwareFlowTrackerStatus test."""
trackers: list[FlowTracker]
"""List of flow trackers to verify."""
class FlowTracker(BaseModel):
"""Detail of a flow tracker."""
name: str
"""Name of the flow tracker."""
record_export: RecordExport | None = None
"""Record export configuration for the flow tracker."""
exporters: list[Exporter] | None = None
"""List of exporters for the flow tracker."""
class RecordExport(BaseModel):
"""Record export configuration."""
on_inactive_timeout: int
"""Timeout in milliseconds for exporting records when inactive."""
on_interval: int
"""Interval in milliseconds for exporting records."""
class Exporter(BaseModel):
"""Detail of an exporter."""
name: str
"""Name of the exporter."""
local_interface: str
"""Local interface used by the exporter."""
template_interval: int
"""Template interval in milliseconds for the exporter."""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
"""Render the template for each hardware tracker."""
return [template.render(name=tracker.name) for tracker in self.inputs.trackers]
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyHardwareFlowTrackerStatus."""
self.result.is_success()
for command, tracker_input in zip(self.instance_commands, self.inputs.trackers):
hardware_tracker_name = command.params.name
record_export = tracker_input.record_export.model_dump() if tracker_input.record_export else None
exporters = [exporter.model_dump() for exporter in tracker_input.exporters] if tracker_input.exporters else None
command_output = command.json_output
# Check if hardware flow tracking is configured
if not command_output.get("running"):
self.result.is_failure("Hardware flow tracking is not running.")
return
# Check if the input hardware tracker is configured
tracker_info = command_output["trackers"].get(hardware_tracker_name)
if not tracker_info:
self.result.is_failure(f"Hardware flow tracker `{hardware_tracker_name}` is not configured.")
continue
# Check if the input hardware tracker is active
if not tracker_info.get("active"):
self.result.is_failure(f"Hardware flow tracker `{hardware_tracker_name}` is not active.")
continue
# Check the input hardware tracker timeouts
failure_msg = ""
if record_export:
record_export_failure = validate_record_export(record_export, tracker_info)
if record_export_failure:
failure_msg += record_export_failure
# Check the input hardware tracker exporters' configuration
if exporters:
exporters_failure = validate_exporters(exporters, tracker_info)
if exporters_failure:
failure_msg += exporters_failure
if failure_msg:
self.result.is_failure(f"{hardware_tracker_name}: {failure_msg}\n")

View file

@ -15,7 +15,7 @@ from pydantic import BaseModel, Field
from pydantic_extra_types.mac_address import MacAddress from pydantic_extra_types.mac_address import MacAddress
from anta import GITHUB_SUGGESTION from anta import GITHUB_SUGGESTION
from anta.custom_types import EthernetInterface, Interface, Percent, PositiveInteger from anta.custom_types import EthernetInterface, Interface, Percent, PortChannelInterface, PositiveInteger
from anta.decorators import skip_on_platforms from anta.decorators import skip_on_platforms
from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.models import AntaCommand, AntaTemplate, AntaTest
from anta.tools import custom_division, get_failed_logs, get_item, get_value from anta.tools import custom_division, get_failed_logs, get_item, get_value
@ -71,7 +71,7 @@ class VerifyInterfaceUtilization(AntaTest):
if ((duplex := (interface := interfaces["interfaces"][intf]).get("duplex", None)) is not None and duplex != duplex_full) or ( if ((duplex := (interface := interfaces["interfaces"][intf]).get("duplex", None)) is not None and duplex != duplex_full) or (
(members := interface.get("memberInterfaces", None)) is not None and any(stats["duplex"] != duplex_full for stats in members.values()) (members := interface.get("memberInterfaces", None)) is not None and any(stats["duplex"] != duplex_full for stats in members.values())
): ):
self.result.is_error(f"Interface {intf} or one of its member interfaces is not Full-Duplex. VerifyInterfaceUtilization has not been implemented.") self.result.is_failure(f"Interface {intf} or one of its member interfaces is not Full-Duplex. VerifyInterfaceUtilization has not been implemented.")
return return
if (bandwidth := interfaces["interfaces"][intf]["bandwidth"]) == 0: if (bandwidth := interfaces["interfaces"][intf]["bandwidth"]) == 0:
@ -705,7 +705,7 @@ class VerifyInterfaceIPv4(AntaTest):
input_interface_detail = interface input_interface_detail = interface
break break
else: else:
self.result.is_error(f"Could not find `{intf}` in the input interfaces. {GITHUB_SUGGESTION}") self.result.is_failure(f"Could not find `{intf}` in the input interfaces. {GITHUB_SUGGESTION}")
continue continue
input_primary_ip = str(input_interface_detail.primary_ip) input_primary_ip = str(input_interface_detail.primary_ip)
@ -883,3 +883,107 @@ class VerifyInterfacesSpeed(AntaTest):
output["speed"] = f"{custom_division(output['speed'], BPS_GBPS_CONVERSIONS)}Gbps" output["speed"] = f"{custom_division(output['speed'], BPS_GBPS_CONVERSIONS)}Gbps"
failed_log = get_failed_logs(expected_interface_output, actual_interface_output) failed_log = get_failed_logs(expected_interface_output, actual_interface_output)
self.result.is_failure(f"For interface {intf}:{failed_log}\n") self.result.is_failure(f"For interface {intf}:{failed_log}\n")
class VerifyLACPInterfacesStatus(AntaTest):
"""Verifies the Link Aggregation Control Protocol (LACP) status of the provided interfaces.
- Verifies that the interface is a member of the LACP port channel.
- Ensures that the synchronization is established.
- Ensures the interfaces are in the correct state for collecting and distributing traffic.
- Validates that LACP settings, such as timeouts, are correctly configured. (i.e The long timeout mode, also known as "slow" mode, is the default setting.)
Expected Results
----------------
* Success: The test will pass if the provided interfaces are bundled in port channel and all specified parameters are correct.
* Failure: The test will fail if any interface is not bundled in port channel or any of specified parameter is not correct.
Examples
--------
```yaml
anta.tests.interfaces:
- VerifyLACPInterfacesStatus:
interfaces:
- name: Ethernet1
portchannel: Port-Channel100
```
"""
name = "VerifyLACPInterfacesStatus"
description = "Verifies the Link Aggregation Control Protocol(LACP) status of the provided interfaces."
categories: ClassVar[list[str]] = ["interfaces"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show lacp interface {interface}", revision=1)]
class Input(AntaTest.Input):
"""Input model for the VerifyLACPInterfacesStatus test."""
interfaces: list[LACPInterface]
"""List of LACP member interface."""
class LACPInterface(BaseModel):
"""Model for an LACP member interface."""
name: EthernetInterface
"""Ethernet interface to validate."""
portchannel: PortChannelInterface
"""Port Channel in which the interface is bundled."""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
"""Render the template for each interface in the input list."""
return [template.render(interface=interface.name) for interface in self.inputs.interfaces]
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyLACPInterfacesStatus."""
self.result.is_success()
# Member port verification parameters.
member_port_details = ["activity", "aggregation", "synchronization", "collecting", "distributing", "timeout"]
# Iterating over command output for different interfaces
for command, input_entry in zip(self.instance_commands, self.inputs.interfaces):
interface = input_entry.name
portchannel = input_entry.portchannel
# Verify if a PortChannel is configured with the provided interface
if not (interface_details := get_value(command.json_output, f"portChannels.{portchannel}.interfaces.{interface}")):
self.result.is_failure(f"Interface '{interface}' is not configured to be a member of LACP '{portchannel}'.")
continue
# Verify the interface is bundled in port channel.
actor_port_status = interface_details.get("actorPortStatus")
if actor_port_status != "bundled":
message = f"For Interface {interface}:\nExpected `bundled` as the local port status, but found `{actor_port_status}` instead.\n"
self.result.is_failure(message)
continue
# Collecting actor and partner port details
actor_port_details = interface_details.get("actorPortState", {})
partner_port_details = interface_details.get("partnerPortState", {})
# Collecting actual interface details
actual_interface_output = {
"actor_port_details": {param: actor_port_details.get(param, "NotFound") for param in member_port_details},
"partner_port_details": {param: partner_port_details.get(param, "NotFound") for param in member_port_details},
}
# Forming expected interface details
expected_details = {param: param != "timeout" for param in member_port_details}
expected_interface_output = {"actor_port_details": expected_details, "partner_port_details": expected_details}
# Forming failure message
if actual_interface_output != expected_interface_output:
message = f"For Interface {interface}:\n"
actor_port_failed_log = get_failed_logs(
expected_interface_output.get("actor_port_details", {}), actual_interface_output.get("actor_port_details", {})
)
partner_port_failed_log = get_failed_logs(
expected_interface_output.get("partner_port_details", {}), actual_interface_output.get("partner_port_details", {})
)
if actor_port_failed_log:
message += f"Actor port details:{actor_port_failed_log}\n"
if partner_port_failed_log:
message += f"Partner port details:{partner_port_failed_log}\n"
self.result.is_failure(message)

View file

@ -25,14 +25,17 @@ if TYPE_CHECKING:
def _get_logging_states(logger: logging.Logger, command_output: str) -> str: def _get_logging_states(logger: logging.Logger, command_output: str) -> str:
"""Parse `show logging` output and gets operational logging states used in the tests in this module. """Parse `show logging` output and gets operational logging states used in the tests in this module.
Args: Parameters
---- ----------
logger: The logger object. logger
command_output: The `show logging` output. The logger object.
command_output
The `show logging` output.
Returns Returns
------- -------
str: The operational logging states. str
The operational logging states.
""" """
log_states = command_output.partition("\n\nExternal configuration:")[0] log_states = command_output.partition("\n\nExternal configuration:")[0]
@ -97,13 +100,13 @@ class VerifyLoggingSourceIntf(AntaTest):
``` ```
""" """
name = "VerifyLoggingSourceInt" name = "VerifyLoggingSourceIntf"
description = "Verifies logging source-interface for a specified VRF." description = "Verifies logging source-interface for a specified VRF."
categories: ClassVar[list[str]] = ["logging"] categories: ClassVar[list[str]] = ["logging"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show logging", ofmt="text")] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show logging", ofmt="text")]
class Input(AntaTest.Input): class Input(AntaTest.Input):
"""Input model for the VerifyLoggingSourceInt test.""" """Input model for the VerifyLoggingSourceIntf test."""
interface: str interface: str
"""Source-interface to use as source IP of log messages.""" """Source-interface to use as source IP of log messages."""
@ -112,7 +115,7 @@ class VerifyLoggingSourceIntf(AntaTest):
@AntaTest.anta_test @AntaTest.anta_test
def test(self) -> None: def test(self) -> None:
"""Main test function for VerifyLoggingSourceInt.""" """Main test function for VerifyLoggingSourceIntf."""
output = self.instance_commands[0].text_output output = self.instance_commands[0].text_output
pattern = rf"Logging source-interface '{self.inputs.interface}'.*VRF {self.inputs.vrf}" pattern = rf"Logging source-interface '{self.inputs.interface}'.*VRF {self.inputs.vrf}"
if re.search(pattern, _get_logging_states(self.logger, output)): if re.search(pattern, _get_logging_states(self.logger, output)):
@ -268,7 +271,7 @@ class VerifyLoggingTimestamp(AntaTest):
""" """
name = "VerifyLoggingTimestamp" name = "VerifyLoggingTimestamp"
description = "Verifies if logs are generated with the riate timestamp." description = "Verifies if logs are generated with the appropriate timestamp."
categories: ClassVar[list[str]] = ["logging"] categories: ClassVar[list[str]] = ["logging"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ commands: ClassVar[list[AntaCommand | AntaTemplate]] = [
AntaCommand(command="send log level informational message ANTA VerifyLoggingTimestamp validation", ofmt="text"), AntaCommand(command="send log level informational message ANTA VerifyLoggingTimestamp validation", ofmt="text"),
@ -279,7 +282,7 @@ class VerifyLoggingTimestamp(AntaTest):
def test(self) -> None: def test(self) -> None:
"""Main test function for VerifyLoggingTimestamp.""" """Main test function for VerifyLoggingTimestamp."""
log_pattern = r"ANTA VerifyLoggingTimestamp validation" log_pattern = r"ANTA VerifyLoggingTimestamp validation"
timestamp_pattern = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}-\d{2}:\d{2}" timestamp_pattern = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}[+-]\d{2}:\d{2}"
output = self.instance_commands[1].text_output output = self.instance_commands[1].text_output
lines = output.strip().split("\n")[::-1] lines = output.strip().split("\n")[::-1]
last_line_with_pattern = "" last_line_with_pattern = ""

View file

@ -123,10 +123,7 @@ class VerifyMlagConfigSanity(AntaTest):
def test(self) -> None: def test(self) -> None:
"""Main test function for VerifyMlagConfigSanity.""" """Main test function for VerifyMlagConfigSanity."""
command_output = self.instance_commands[0].json_output command_output = self.instance_commands[0].json_output
if (mlag_status := get_value(command_output, "mlagActive")) is None: if command_output["mlagActive"] is False:
self.result.is_error(message="Incorrect JSON response - 'mlagActive' state was not found")
return
if mlag_status is False:
self.result.is_skipped("MLAG is disabled") self.result.is_skipped("MLAG is disabled")
return return
keys_to_verify = ["globalConfiguration", "interfaceConfiguration"] keys_to_verify = ["globalConfiguration", "interfaceConfiguration"]

View file

@ -8,33 +8,47 @@
from __future__ import annotations from __future__ import annotations
from ipaddress import IPv4Address, IPv4Network, IPv6Address from ipaddress import IPv4Address, IPv4Network, IPv6Address
from typing import Any, ClassVar from typing import TYPE_CHECKING, Any, ClassVar
from pydantic import BaseModel, Field, PositiveInt, model_validator from pydantic import BaseModel, Field, PositiveInt, model_validator
from pydantic.v1.utils import deep_update from pydantic.v1.utils import deep_update
from pydantic_extra_types.mac_address import MacAddress from pydantic_extra_types.mac_address import MacAddress
from anta.custom_types import Afi, MultiProtocolCaps, Safi, Vni from anta.custom_types import Afi, BgpDropStats, BgpUpdateError, MultiProtocolCaps, Safi, Vni
from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.models import AntaCommand, AntaTemplate, AntaTest
from anta.tools import get_item, get_value from anta.tools import get_item, get_value
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
def _add_bgp_failures(failures: dict[tuple[str, str | None], dict[str, Any]], afi: Afi, safi: Safi | None, vrf: str, issue: str | dict[str, Any]) -> None: def _add_bgp_failures(failures: dict[tuple[str, str | None], dict[str, Any]], afi: Afi, safi: Safi | None, vrf: str, issue: str | dict[str, Any]) -> None:
"""Add a BGP failure entry to the given `failures` dictionary. """Add a BGP failure entry to the given `failures` dictionary.
Note: This function modifies `failures` in-place. Note: This function modifies `failures` in-place.
Args: Parameters
---- ----------
failures: The dictionary to which the failure will be added. failures
afi: The address family identifier. The dictionary to which the failure will be added.
vrf: The VRF name. afi
safi: The subsequent address family identifier. The address family identifier.
issue: A description of the issue. Can be of any type. vrf
The VRF name.
safi
The subsequent address family identifier.
issue
A description of the issue. Can be of any type.
Example: Example
------- -------
The `failures` dictionary will have the following structure: The `failures` dictionary will have the following structure:
```
{ {
('afi1', 'safi1'): { ('afi1', 'safi1'): {
'afi': 'afi1', 'afi': 'afi1',
@ -51,6 +65,7 @@ def _add_bgp_failures(failures: dict[tuple[str, str | None], dict[str, Any]], af
} }
} }
} }
```
""" """
key = (afi, safi) key = (afi, safi)
@ -63,23 +78,29 @@ def _add_bgp_failures(failures: dict[tuple[str, str | None], dict[str, Any]], af
def _check_peer_issues(peer_data: dict[str, Any] | None) -> dict[str, Any]: def _check_peer_issues(peer_data: dict[str, Any] | None) -> dict[str, Any]:
"""Check for issues in BGP peer data. """Check for issues in BGP peer data.
Args: Parameters
---- ----------
peer_data: The BGP peer data dictionary nested in the `show bgp <afi> <safi> summary` command. peer_data
The BGP peer data dictionary nested in the `show bgp <afi> <safi> summary` command.
Returns Returns
------- -------
dict: Dictionary with keys indicating issues or an empty dictionary if no issues. dict
Dictionary with keys indicating issues or an empty dictionary if no issues.
Raises Raises
------ ------
ValueError: If any of the required keys ("peerState", "inMsgQueue", "outMsgQueue") are missing in `peer_data`, i.e. invalid BGP peer data. ValueError
If any of the required keys ("peerState", "inMsgQueue", "outMsgQueue") are missing in `peer_data`, i.e. invalid BGP peer data.
Example: Example
------- -------
This can for instance return
```
{"peerNotFound": True} {"peerNotFound": True}
{"peerState": "Idle", "inMsgQueue": 2, "outMsgQueue": 0} {"peerState": "Idle", "inMsgQueue": 2, "outMsgQueue": 0}
{} {}
```
""" """
if peer_data is None: if peer_data is None:
@ -104,17 +125,23 @@ def _add_bgp_routes_failure(
It identifies any missing routes as well as any routes that are invalid or inactive. The results are returned in a dictionary. It identifies any missing routes as well as any routes that are invalid or inactive. The results are returned in a dictionary.
Args: Parameters
---- ----------
bgp_routes: The list of expected routes. bgp_routes
bgp_output: The BGP output from the device. The list of expected routes.
peer: The IP address of the BGP peer. bgp_output
vrf: The name of the VRF for which the routes need to be verified. The BGP output from the device.
route_type: The type of BGP routes. Defaults to 'advertised_routes'. peer
The IP address of the BGP peer.
vrf
The name of the VRF for which the routes need to be verified.
route_type
The type of BGP routes. Defaults to 'advertised_routes'.
Returns Returns
------- -------
dict[str, dict[str, dict[str, dict[str, list[str]]]]]: A dictionary containing the missing routes and invalid or inactive routes. dict[str, dict[str, dict[str, dict[str, list[str]]]]]
A dictionary containing the missing routes and invalid or inactive routes.
""" """
# Prepare the failure routes dictionary # Prepare the failure routes dictionary
@ -123,7 +150,7 @@ def _add_bgp_routes_failure(
# Iterate over the expected BGP routes # Iterate over the expected BGP routes
for route in bgp_routes: for route in bgp_routes:
str_route = str(route) str_route = str(route)
failure = {"bgp_peers": {peer: {vrf: {route_type: {str_route: Any}}}}} failure: dict[str, Any] = {"bgp_peers": {peer: {vrf: {route_type: {}}}}}
# Check if the route is missing in the BGP output # Check if the route is missing in the BGP output
if str_route not in bgp_output: if str_route not in bgp_output:
@ -216,7 +243,7 @@ class VerifyBGPPeerCount(AntaTest):
"""Number of expected BGP peer(s).""" """Number of expected BGP peer(s)."""
@model_validator(mode="after") @model_validator(mode="after")
def validate_inputs(self: BaseModel) -> BaseModel: def validate_inputs(self) -> Self:
"""Validate the inputs provided to the BgpAfi class. """Validate the inputs provided to the BgpAfi class.
If afi is either ipv4 or ipv6, safi must be provided. If afi is either ipv4 or ipv6, safi must be provided.
@ -356,7 +383,7 @@ class VerifyBGPPeersHealth(AntaTest):
""" """
@model_validator(mode="after") @model_validator(mode="after")
def validate_inputs(self: BaseModel) -> BaseModel: def validate_inputs(self) -> Self:
"""Validate the inputs provided to the BgpAfi class. """Validate the inputs provided to the BgpAfi class.
If afi is either ipv4 or ipv6, safi must be provided. If afi is either ipv4 or ipv6, safi must be provided.
@ -503,7 +530,7 @@ class VerifyBGPSpecificPeers(AntaTest):
"""List of BGP IPv4 or IPv6 peer.""" """List of BGP IPv4 or IPv6 peer."""
@model_validator(mode="after") @model_validator(mode="after")
def validate_inputs(self: BaseModel) -> BaseModel: def validate_inputs(self) -> Self:
"""Validate the inputs provided to the BgpAfi class. """Validate the inputs provided to the BgpAfi class.
If afi is either ipv4 or ipv6, safi must be provided and vrf must NOT be all. If afi is either ipv4 or ipv6, safi must be provided and vrf must NOT be all.
@ -685,6 +712,8 @@ class VerifyBGPExchangedRoutes(AntaTest):
class VerifyBGPPeerMPCaps(AntaTest): class VerifyBGPPeerMPCaps(AntaTest):
"""Verifies the multiprotocol capabilities of a BGP peer in a specified VRF. """Verifies the multiprotocol capabilities of a BGP peer in a specified VRF.
Supports `strict: True` to verify that only the specified capabilities are configured, requiring an exact match.
Expected Results Expected Results
---------------- ----------------
* Success: The test will pass if the BGP peer's multiprotocol capabilities are advertised, received, and enabled in the specified VRF. * Success: The test will pass if the BGP peer's multiprotocol capabilities are advertised, received, and enabled in the specified VRF.
@ -699,6 +728,7 @@ class VerifyBGPPeerMPCaps(AntaTest):
bgp_peers: bgp_peers:
- peer_address: 172.30.11.1 - peer_address: 172.30.11.1
vrf: default vrf: default
strict: False
capabilities: capabilities:
- ipv4Unicast - ipv4Unicast
``` ```
@ -722,6 +752,8 @@ class VerifyBGPPeerMPCaps(AntaTest):
"""IPv4 address of a BGP peer.""" """IPv4 address of a BGP peer."""
vrf: str = "default" vrf: str = "default"
"""Optional VRF for BGP peer. If not provided, it defaults to `default`.""" """Optional VRF for BGP peer. If not provided, it defaults to `default`."""
strict: bool = False
"""If True, requires exact matching of provided capabilities. Defaults to False."""
capabilities: list[MultiProtocolCaps] capabilities: list[MultiProtocolCaps]
"""List of multiprotocol capabilities to be verified.""" """List of multiprotocol capabilities to be verified."""
@ -730,14 +762,14 @@ class VerifyBGPPeerMPCaps(AntaTest):
"""Main test function for VerifyBGPPeerMPCaps.""" """Main test function for VerifyBGPPeerMPCaps."""
failures: dict[str, Any] = {"bgp_peers": {}} failures: dict[str, Any] = {"bgp_peers": {}}
# Iterate over each bgp peer # Iterate over each bgp peer.
for bgp_peer in self.inputs.bgp_peers: for bgp_peer in self.inputs.bgp_peers:
peer = str(bgp_peer.peer_address) peer = str(bgp_peer.peer_address)
vrf = bgp_peer.vrf vrf = bgp_peer.vrf
capabilities = bgp_peer.capabilities capabilities = bgp_peer.capabilities
failure: dict[str, dict[str, dict[str, Any]]] = {"bgp_peers": {peer: {vrf: {}}}} failure: dict[str, dict[str, dict[str, Any]]] = {"bgp_peers": {peer: {vrf: {}}}}
# Check if BGP output exists # Check if BGP output exists.
if ( if (
not (bgp_output := get_value(self.instance_commands[0].json_output, f"vrfs.{vrf}.peerList")) not (bgp_output := get_value(self.instance_commands[0].json_output, f"vrfs.{vrf}.peerList"))
or (bgp_output := get_item(bgp_output, "peerAddress", peer)) is None or (bgp_output := get_item(bgp_output, "peerAddress", peer)) is None
@ -746,8 +778,17 @@ class VerifyBGPPeerMPCaps(AntaTest):
failures = deep_update(failures, failure) failures = deep_update(failures, failure)
continue continue
# Check each capability # Fetching the capabilities output.
bgp_output = get_value(bgp_output, "neighborCapabilities.multiprotocolCaps") bgp_output = get_value(bgp_output, "neighborCapabilities.multiprotocolCaps")
if bgp_peer.strict and sorted(capabilities) != sorted(bgp_output):
failure["bgp_peers"][peer][vrf] = {
"status": f"Expected only `{', '.join(capabilities)}` capabilities should be listed but found `{', '.join(bgp_output)}` instead."
}
failures = deep_update(failures, failure)
continue
# Check each capability
for capability in capabilities: for capability in capabilities:
capability_output = bgp_output.get(capability) capability_output = bgp_output.get(capability)
@ -1226,3 +1267,364 @@ class VerifyBGPTimers(AntaTest):
self.result.is_success() self.result.is_success()
else: else:
self.result.is_failure(f"Following BGP peers are not configured or hold and keep-alive timers are not correct:\n{failures}") self.result.is_failure(f"Following BGP peers are not configured or hold and keep-alive timers are not correct:\n{failures}")
class VerifyBGPPeerDropStats(AntaTest):
"""Verifies BGP NLRI drop statistics for the provided BGP IPv4 peer(s).
By default, all drop statistics counters will be checked for any non-zero values.
An optional list of specific drop statistics can be provided for granular testing.
Expected Results
----------------
* Success: The test will pass if the BGP peer's drop statistic(s) are zero.
* Failure: The test will fail if the BGP peer's drop statistic(s) are non-zero/Not Found or peer is not configured.
Examples
--------
```yaml
anta.tests.routing:
bgp:
- VerifyBGPPeerDropStats:
bgp_peers:
- peer_address: 172.30.11.1
vrf: default
drop_stats:
- inDropAsloop
- prefixEvpnDroppedUnsupportedRouteType
```
"""
name = "VerifyBGPPeerDropStats"
description = "Verifies the NLRI drop statistics of a BGP IPv4 peer(s)."
categories: ClassVar[list[str]] = ["bgp"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show bgp neighbors {peer} vrf {vrf}", revision=3)]
class Input(AntaTest.Input):
"""Input model for the VerifyBGPPeerDropStats test."""
bgp_peers: list[BgpPeer]
"""List of BGP peers"""
class BgpPeer(BaseModel):
"""Model for a BGP peer."""
peer_address: IPv4Address
"""IPv4 address of a BGP peer."""
vrf: str = "default"
"""Optional VRF for BGP peer. If not provided, it defaults to `default`."""
drop_stats: list[BgpDropStats] | None = None
"""Optional list of drop statistics to be verified. If not provided, test will verifies all the drop statistics."""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
"""Render the template for each BGP peer in the input list."""
return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers]
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyBGPPeerDropStats."""
failures: dict[Any, Any] = {}
for command, input_entry in zip(self.instance_commands, self.inputs.bgp_peers):
peer = command.params.peer
vrf = command.params.vrf
drop_statistics = input_entry.drop_stats
# Verify BGP peer
if not (peer_list := get_value(command.json_output, f"vrfs.{vrf}.peerList")) or (peer_detail := get_item(peer_list, "peerAddress", peer)) is None:
failures[peer] = {vrf: "Not configured"}
continue
# Verify BGP peer's drop stats
drop_stats_output = peer_detail.get("dropStats", {})
# In case drop stats not provided, It will check all drop statistics
if not drop_statistics:
drop_statistics = drop_stats_output
# Verify BGP peer's drop stats
drop_stats_not_ok = {
drop_stat: drop_stats_output.get(drop_stat, "Not Found") for drop_stat in drop_statistics if drop_stats_output.get(drop_stat, "Not Found")
}
if any(drop_stats_not_ok):
failures[peer] = {vrf: drop_stats_not_ok}
# Check if any failures
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"The following BGP peers are not configured or have non-zero NLRI drop statistics counters:\n{failures}")
class VerifyBGPPeerUpdateErrors(AntaTest):
"""Verifies BGP update error counters for the provided BGP IPv4 peer(s).
By default, all update error counters will be checked for any non-zero values.
An optional list of specific update error counters can be provided for granular testing.
Note: For "disabledAfiSafi" error counter field, checking that it's not "None" versus 0.
Expected Results
----------------
* Success: The test will pass if the BGP peer's update error counter(s) are zero/None.
* Failure: The test will fail if the BGP peer's update error counter(s) are non-zero/not None/Not Found or
peer is not configured.
Examples
--------
```yaml
anta.tests.routing:
bgp:
- VerifyBGPPeerUpdateErrors:
bgp_peers:
- peer_address: 172.30.11.1
vrf: default
update_error_filter:
- inUpdErrWithdraw
```
"""
name = "VerifyBGPPeerUpdateErrors"
description = "Verifies the update error counters of a BGP IPv4 peer."
categories: ClassVar[list[str]] = ["bgp"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show bgp neighbors {peer} vrf {vrf}", revision=3)]
class Input(AntaTest.Input):
"""Input model for the VerifyBGPPeerUpdateErrors test."""
bgp_peers: list[BgpPeer]
"""List of BGP peers"""
class BgpPeer(BaseModel):
"""Model for a BGP peer."""
peer_address: IPv4Address
"""IPv4 address of a BGP peer."""
vrf: str = "default"
"""Optional VRF for BGP peer. If not provided, it defaults to `default`."""
update_errors: list[BgpUpdateError] | None = None
"""Optional list of update error counters to be verified. If not provided, test will verifies all the update error counters."""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
"""Render the template for each BGP peer in the input list."""
return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers]
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyBGPPeerUpdateErrors."""
failures: dict[Any, Any] = {}
for command, input_entry in zip(self.instance_commands, self.inputs.bgp_peers):
peer = command.params.peer
vrf = command.params.vrf
update_error_counters = input_entry.update_errors
# Verify BGP peer.
if not (peer_list := get_value(command.json_output, f"vrfs.{vrf}.peerList")) or (peer_detail := get_item(peer_list, "peerAddress", peer)) is None:
failures[peer] = {vrf: "Not configured"}
continue
# Getting the BGP peer's error counters output.
error_counters_output = peer_detail.get("peerInUpdateErrors", {})
# In case update error counters not provided, It will check all the update error counters.
if not update_error_counters:
update_error_counters = error_counters_output
# verifying the error counters.
error_counters_not_ok = {
("disabledAfiSafi" if error_counter == "disabledAfiSafi" else error_counter): value
for error_counter in update_error_counters
if (value := error_counters_output.get(error_counter, "Not Found")) != "None" and value != 0
}
if error_counters_not_ok:
failures[peer] = {vrf: error_counters_not_ok}
# Check if any failures
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"The following BGP peers are not configured or have non-zero update error counters:\n{failures}")
class VerifyBgpRouteMaps(AntaTest):
"""Verifies BGP inbound and outbound route-maps of BGP IPv4 peer(s).
Expected Results
----------------
* Success: The test will pass if the correct route maps are applied in the correct direction (inbound or outbound) for IPv4 BGP peers in the specified VRF.
* Failure: The test will fail if BGP peers are not configured or any neighbor has an incorrect or missing route map in either the inbound or outbound direction.
Examples
--------
```yaml
anta.tests.routing:
bgp:
- VerifyBgpRouteMaps:
bgp_peers:
- peer_address: 172.30.11.1
vrf: default
inbound_route_map: RM-MLAG-PEER-IN
outbound_route_map: RM-MLAG-PEER-OUT
```
"""
name = "VerifyBgpRouteMaps"
description = "Verifies BGP inbound and outbound route-maps of BGP IPv4 peer(s)."
categories: ClassVar[list[str]] = ["bgp"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show bgp neighbors {peer} vrf {vrf}", revision=3)]
class Input(AntaTest.Input):
"""Input model for the VerifyBgpRouteMaps test."""
bgp_peers: list[BgpPeer]
"""List of BGP peers"""
class BgpPeer(BaseModel):
"""Model for a BGP peer."""
peer_address: IPv4Address
"""IPv4 address of a BGP peer."""
vrf: str = "default"
"""Optional VRF for BGP peer. If not provided, it defaults to `default`."""
inbound_route_map: str | None = None
"""Inbound route map applied, defaults to None."""
outbound_route_map: str | None = None
"""Outbound route map applied, defaults to None."""
@model_validator(mode="after")
def validate_inputs(self) -> Self:
"""Validate the inputs provided to the BgpPeer class.
At least one of 'inbound' or 'outbound' route-map must be provided.
"""
if not (self.inbound_route_map or self.outbound_route_map):
msg = "At least one of 'inbound_route_map' or 'outbound_route_map' must be provided."
raise ValueError(msg)
return self
def render(self, template: AntaTemplate) -> list[AntaCommand]:
"""Render the template for each BGP peer in the input list."""
return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers]
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyBgpRouteMaps."""
failures: dict[Any, Any] = {}
for command, input_entry in zip(self.instance_commands, self.inputs.bgp_peers):
peer = str(input_entry.peer_address)
vrf = input_entry.vrf
inbound_route_map = input_entry.inbound_route_map
outbound_route_map = input_entry.outbound_route_map
failure: dict[Any, Any] = {vrf: {}}
# Verify BGP peer.
if not (peer_list := get_value(command.json_output, f"vrfs.{vrf}.peerList")) or (peer_detail := get_item(peer_list, "peerAddress", peer)) is None:
failures[peer] = {vrf: "Not configured"}
continue
# Verify Inbound route-map
if inbound_route_map and (inbound_map := peer_detail.get("routeMapInbound", "Not Configured")) != inbound_route_map:
failure[vrf].update({"Inbound route-map": inbound_map})
# Verify Outbound route-map
if outbound_route_map and (outbound_map := peer_detail.get("routeMapOutbound", "Not Configured")) != outbound_route_map:
failure[vrf].update({"Outbound route-map": outbound_map})
if failure[vrf]:
failures[peer] = failure
# Check if any failures
if not failures:
self.result.is_success()
else:
self.result.is_failure(
f"The following BGP peers are not configured or has an incorrect or missing route map in either the inbound or outbound direction:\n{failures}"
)
class VerifyBGPPeerRouteLimit(AntaTest):
"""Verifies the maximum routes and optionally verifies the maximum routes warning limit for the provided BGP IPv4 peer(s).
Expected Results
----------------
* Success: The test will pass if the BGP peer's maximum routes and, if provided, the maximum routes warning limit are equal to the given limits.
* Failure: The test will fail if the BGP peer's maximum routes do not match the given limit, or if the maximum routes warning limit is provided
and does not match the given limit, or if the peer is not configured.
Examples
--------
```yaml
anta.tests.routing:
bgp:
- VerifyBGPPeerRouteLimit:
bgp_peers:
- peer_address: 172.30.11.1
vrf: default
maximum_routes: 12000
warning_limit: 10000
```
"""
name = "VerifyBGPPeerRouteLimit"
description = "Verifies maximum routes and maximum routes warning limit for the provided BGP IPv4 peer(s)."
categories: ClassVar[list[str]] = ["bgp"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show bgp neighbors {peer} vrf {vrf}", revision=3)]
class Input(AntaTest.Input):
"""Input model for the VerifyBGPPeerRouteLimit test."""
bgp_peers: list[BgpPeer]
"""List of BGP peers"""
class BgpPeer(BaseModel):
"""Model for a BGP peer."""
peer_address: IPv4Address
"""IPv4 address of a BGP peer."""
vrf: str = "default"
"""Optional VRF for BGP peer. If not provided, it defaults to `default`."""
maximum_routes: int = Field(ge=0, le=4294967294)
"""The maximum allowable number of BGP routes, `0` means unlimited."""
warning_limit: int = Field(default=0, ge=0, le=4294967294)
"""Optional maximum routes warning limit. If not provided, it defaults to `0` meaning no warning limit."""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
"""Render the template for each BGP peer in the input list."""
return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers]
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyBGPPeerRouteLimit."""
failures: dict[Any, Any] = {}
for command, input_entry in zip(self.instance_commands, self.inputs.bgp_peers):
peer = str(input_entry.peer_address)
vrf = input_entry.vrf
maximum_routes = input_entry.maximum_routes
warning_limit = input_entry.warning_limit
failure: dict[Any, Any] = {}
# Verify BGP peer.
if not (peer_list := get_value(command.json_output, f"vrfs.{vrf}.peerList")) or (peer_detail := get_item(peer_list, "peerAddress", peer)) is None:
failures[peer] = {vrf: "Not configured"}
continue
# Verify maximum routes configured.
if (actual_routes := peer_detail.get("maxTotalRoutes", "Not Found")) != maximum_routes:
failure["Maximum total routes"] = actual_routes
# Verify warning limit if given.
if warning_limit and (actual_warning_limit := peer_detail.get("totalRoutesWarnLimit", "Not Found")) != warning_limit:
failure["Warning limit"] = actual_warning_limit
# Updated failures if any.
if failure:
failures[peer] = {vrf: failure}
# Check if any failures
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"The following BGP peer(s) are not configured or maximum routes and maximum routes warning limit is not correct:\n{failures}")

View file

@ -7,13 +7,23 @@
# mypy: disable-error-code=attr-defined # mypy: disable-error-code=attr-defined
from __future__ import annotations from __future__ import annotations
from ipaddress import IPv4Address, ip_interface from functools import cache
from typing import ClassVar, Literal from ipaddress import IPv4Address, IPv4Interface
from typing import TYPE_CHECKING, ClassVar, Literal
from pydantic import model_validator from pydantic import model_validator
from anta.custom_types import PositiveInteger
from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.models import AntaCommand, AntaTemplate, AntaTest
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
class VerifyRoutingProtocolModel(AntaTest): class VerifyRoutingProtocolModel(AntaTest):
"""Verifies the configured routing protocol model is the one we expect. """Verifies the configured routing protocol model is the one we expect.
@ -83,13 +93,13 @@ class VerifyRoutingTableSize(AntaTest):
class Input(AntaTest.Input): class Input(AntaTest.Input):
"""Input model for the VerifyRoutingTableSize test.""" """Input model for the VerifyRoutingTableSize test."""
minimum: int minimum: PositiveInteger
"""Expected minimum routing table size.""" """Expected minimum routing table size."""
maximum: int maximum: PositiveInteger
"""Expected maximum routing table size.""" """Expected maximum routing table size."""
@model_validator(mode="after") # type: ignore[misc] @model_validator(mode="after")
def check_min_max(self) -> AntaTest.Input: def check_min_max(self) -> Self:
"""Validate that maximum is greater than minimum.""" """Validate that maximum is greater than minimum."""
if self.minimum > self.maximum: if self.minimum > self.maximum:
msg = f"Minimum {self.minimum} is greater than maximum {self.maximum}" msg = f"Minimum {self.minimum} is greater than maximum {self.maximum}"
@ -131,7 +141,10 @@ class VerifyRoutingTableEntry(AntaTest):
name = "VerifyRoutingTableEntry" name = "VerifyRoutingTableEntry"
description = "Verifies that the provided routes are present in the routing table of a specified VRF." description = "Verifies that the provided routes are present in the routing table of a specified VRF."
categories: ClassVar[list[str]] = ["routing"] categories: ClassVar[list[str]] = ["routing"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip route vrf {vrf} {route}", revision=4)] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [
AntaTemplate(template="show ip route vrf {vrf} {route}", revision=4),
AntaTemplate(template="show ip route vrf {vrf}", revision=4),
]
class Input(AntaTest.Input): class Input(AntaTest.Input):
"""Input model for the VerifyRoutingTableEntry test.""" """Input model for the VerifyRoutingTableEntry test."""
@ -140,20 +153,35 @@ class VerifyRoutingTableEntry(AntaTest):
"""VRF context. Defaults to `default` VRF.""" """VRF context. Defaults to `default` VRF."""
routes: list[IPv4Address] routes: list[IPv4Address]
"""List of routes to verify.""" """List of routes to verify."""
collect: Literal["one", "all"] = "one"
"""Route collect behavior: one=one route per command, all=all routes in vrf per command. Defaults to `one`"""
def render(self, template: AntaTemplate) -> list[AntaCommand]: def render(self, template: AntaTemplate) -> list[AntaCommand]:
"""Render the template for each route in the input list.""" """Render the template for the input vrf."""
if template == VerifyRoutingTableEntry.commands[0] and self.inputs.collect == "one":
return [template.render(vrf=self.inputs.vrf, route=route) for route in self.inputs.routes] return [template.render(vrf=self.inputs.vrf, route=route) for route in self.inputs.routes]
if template == VerifyRoutingTableEntry.commands[1] and self.inputs.collect == "all":
return [template.render(vrf=self.inputs.vrf)]
return []
@staticmethod
@cache
def ip_interface_ip(route: str) -> IPv4Address:
"""Return the IP address of the provided ip route with mask."""
return IPv4Interface(route).ip
@AntaTest.anta_test @AntaTest.anta_test
def test(self) -> None: def test(self) -> None:
"""Main test function for VerifyRoutingTableEntry.""" """Main test function for VerifyRoutingTableEntry."""
missing_routes = [] commands_output_route_ips = set()
for command in self.instance_commands: for command in self.instance_commands:
vrf, route = command.params.vrf, command.params.route command_output_vrf = command.json_output["vrfs"][self.inputs.vrf]
if len(routes := command.json_output["vrfs"][vrf]["routes"]) == 0 or route != ip_interface(next(iter(routes))).ip: commands_output_route_ips |= {self.ip_interface_ip(route) for route in command_output_vrf["routes"]}
missing_routes.append(str(route))
missing_routes = [str(route) for route in self.inputs.routes if route not in commands_output_route_ips]
if not missing_routes: if not missing_routes:
self.result.is_success() self.result.is_success()

View file

@ -20,13 +20,15 @@ from anta.tools import get_value
def _count_isis_neighbor(isis_neighbor_json: dict[str, Any]) -> int: def _count_isis_neighbor(isis_neighbor_json: dict[str, Any]) -> int:
"""Count the number of isis neighbors. """Count the number of isis neighbors.
Args Parameters
---- ----------
isis_neighbor_json: The JSON output of the `show isis neighbors` command. isis_neighbor_json
The JSON output of the `show isis neighbors` command.
Returns Returns
------- -------
int: The number of isis neighbors. int
The number of isis neighbors.
""" """
count = 0 count = 0
@ -39,13 +41,15 @@ def _count_isis_neighbor(isis_neighbor_json: dict[str, Any]) -> int:
def _get_not_full_isis_neighbors(isis_neighbor_json: dict[str, Any]) -> list[dict[str, Any]]: def _get_not_full_isis_neighbors(isis_neighbor_json: dict[str, Any]) -> list[dict[str, Any]]:
"""Return the isis neighbors whose adjacency state is not `up`. """Return the isis neighbors whose adjacency state is not `up`.
Args Parameters
---- ----------
isis_neighbor_json: The JSON output of the `show isis neighbors` command. isis_neighbor_json
The JSON output of the `show isis neighbors` command.
Returns Returns
------- -------
list[dict[str, Any]]: A list of isis neighbors whose adjacency state is not `UP`. list[dict[str, Any]]
A list of isis neighbors whose adjacency state is not `UP`.
""" """
return [ return [
@ -66,14 +70,17 @@ def _get_not_full_isis_neighbors(isis_neighbor_json: dict[str, Any]) -> list[dic
def _get_full_isis_neighbors(isis_neighbor_json: dict[str, Any], neighbor_state: Literal["up", "down"] = "up") -> list[dict[str, Any]]: def _get_full_isis_neighbors(isis_neighbor_json: dict[str, Any], neighbor_state: Literal["up", "down"] = "up") -> list[dict[str, Any]]:
"""Return the isis neighbors whose adjacency state is `up`. """Return the isis neighbors whose adjacency state is `up`.
Args Parameters
---- ----------
isis_neighbor_json: The JSON output of the `show isis neighbors` command. isis_neighbor_json
neighbor_state: Value of the neihbor state we are looking for. Default up The JSON output of the `show isis neighbors` command.
neighbor_state
Value of the neihbor state we are looking for. Defaults to `up`.
Returns Returns
------- -------
list[dict[str, Any]]: A list of isis neighbors whose adjacency state is not `UP`. list[dict[str, Any]]
A list of isis neighbors whose adjacency state is not `UP`.
""" """
return [ return [
@ -597,10 +604,6 @@ class VerifyISISSegmentRoutingTunnels(AntaTest):
This method performs the main test logic for verifying ISIS Segment Routing tunnels. This method performs the main test logic for verifying ISIS Segment Routing tunnels.
It checks the command output, initiates defaults, and performs various checks on the tunnels. It checks the command output, initiates defaults, and performs various checks on the tunnels.
Returns
-------
None
""" """
command_output = self.instance_commands[0].json_output command_output = self.instance_commands[0].json_output
self.result.is_success() self.result.is_success()
@ -638,13 +641,17 @@ class VerifyISISSegmentRoutingTunnels(AntaTest):
""" """
Check if the tunnel type specified in `via_input` matches any of the tunnel types in `eos_entry`. Check if the tunnel type specified in `via_input` matches any of the tunnel types in `eos_entry`.
Args: Parameters
via_input (VerifyISISSegmentRoutingTunnels.Input.Entry.Vias): The input tunnel type to check. ----------
eos_entry (dict[str, Any]): The EOS entry containing the tunnel types. via_input : VerifyISISSegmentRoutingTunnels.Input.Entry.Vias
The input tunnel type to check.
eos_entry : dict[str, Any]
The EOS entry containing the tunnel types.
Returns Returns
------- -------
bool: True if the tunnel type matches any of the tunnel types in `eos_entry`, False otherwise. bool
True if the tunnel type matches any of the tunnel types in `eos_entry`, False otherwise.
""" """
if via_input.type is not None: if via_input.type is not None:
return any( return any(
@ -662,13 +669,17 @@ class VerifyISISSegmentRoutingTunnels(AntaTest):
""" """
Check if the tunnel nexthop matches the given input. Check if the tunnel nexthop matches the given input.
Args: Parameters
via_input (VerifyISISSegmentRoutingTunnels.Input.Entry.Vias): The input via object. ----------
eos_entry (dict[str, Any]): The EOS entry dictionary. via_input : VerifyISISSegmentRoutingTunnels.Input.Entry.Vias
The input via object.
eos_entry : dict[str, Any]
The EOS entry dictionary.
Returns Returns
------- -------
bool: True if the tunnel nexthop matches, False otherwise. bool
True if the tunnel nexthop matches, False otherwise.
""" """
if via_input.nexthop is not None: if via_input.nexthop is not None:
return any( return any(
@ -686,13 +697,17 @@ class VerifyISISSegmentRoutingTunnels(AntaTest):
""" """
Check if the tunnel interface exists in the given EOS entry. Check if the tunnel interface exists in the given EOS entry.
Args: Parameters
via_input (VerifyISISSegmentRoutingTunnels.Input.Entry.Vias): The input via object. ----------
eos_entry (dict[str, Any]): The EOS entry dictionary. via_input : VerifyISISSegmentRoutingTunnels.Input.Entry.Vias
The input via object.
eos_entry : dict[str, Any]
The EOS entry dictionary.
Returns Returns
------- -------
bool: True if the tunnel interface exists, False otherwise. bool
True if the tunnel interface exists, False otherwise.
""" """
if via_input.interface is not None: if via_input.interface is not None:
return any( return any(
@ -710,13 +725,17 @@ class VerifyISISSegmentRoutingTunnels(AntaTest):
""" """
Check if the tunnel ID matches any of the tunnel IDs in the EOS entry's vias. Check if the tunnel ID matches any of the tunnel IDs in the EOS entry's vias.
Args: Parameters
via_input (VerifyISISSegmentRoutingTunnels.Input.Entry.Vias): The input vias to check. ----------
eos_entry (dict[str, Any]): The EOS entry to compare against. via_input : VerifyISISSegmentRoutingTunnels.Input.Entry.Vias
The input vias to check.
eos_entry : dict[str, Any])
The EOS entry to compare against.
Returns Returns
------- -------
bool: True if the tunnel ID matches any of the tunnel IDs in the EOS entry's vias, False otherwise. bool
True if the tunnel ID matches any of the tunnel IDs in the EOS entry's vias, False otherwise.
""" """
if via_input.tunnel_id is not None: if via_input.tunnel_id is not None:
return any( return any(

View file

@ -18,13 +18,15 @@ if TYPE_CHECKING:
def _count_ospf_neighbor(ospf_neighbor_json: dict[str, Any]) -> int: def _count_ospf_neighbor(ospf_neighbor_json: dict[str, Any]) -> int:
"""Count the number of OSPF neighbors. """Count the number of OSPF neighbors.
Args: Parameters
---- ----------
ospf_neighbor_json: The JSON output of the `show ip ospf neighbor` command. ospf_neighbor_json
The JSON output of the `show ip ospf neighbor` command.
Returns Returns
------- -------
int: The number of OSPF neighbors. int
The number of OSPF neighbors.
""" """
count = 0 count = 0
@ -37,13 +39,15 @@ def _count_ospf_neighbor(ospf_neighbor_json: dict[str, Any]) -> int:
def _get_not_full_ospf_neighbors(ospf_neighbor_json: dict[str, Any]) -> list[dict[str, Any]]: def _get_not_full_ospf_neighbors(ospf_neighbor_json: dict[str, Any]) -> list[dict[str, Any]]:
"""Return the OSPF neighbors whose adjacency state is not `full`. """Return the OSPF neighbors whose adjacency state is not `full`.
Args: Parameters
---- ----------
ospf_neighbor_json: The JSON output of the `show ip ospf neighbor` command. ospf_neighbor_json
The JSON output of the `show ip ospf neighbor` command.
Returns Returns
------- -------
list[dict[str, Any]]: A list of OSPF neighbors whose adjacency state is not `full`. list[dict[str, Any]]
A list of OSPF neighbors whose adjacency state is not `full`.
""" """
return [ return [
@ -63,13 +67,15 @@ def _get_not_full_ospf_neighbors(ospf_neighbor_json: dict[str, Any]) -> list[dic
def _get_ospf_max_lsa_info(ospf_process_json: dict[str, Any]) -> list[dict[str, Any]]: def _get_ospf_max_lsa_info(ospf_process_json: dict[str, Any]) -> list[dict[str, Any]]:
"""Return information about OSPF instances and their LSAs. """Return information about OSPF instances and their LSAs.
Args: Parameters
---- ----------
ospf_process_json: OSPF process information in JSON format. ospf_process_json
OSPF process information in JSON format.
Returns Returns
------- -------
list[dict[str, Any]]: A list of dictionaries containing OSPF LSAs information. list[dict[str, Any]]
A list of dictionaries containing OSPF LSAs information.
""" """
return [ return [

View file

@ -9,7 +9,7 @@ from __future__ import annotations
# mypy: disable-error-code=attr-defined # mypy: disable-error-code=attr-defined
from datetime import datetime, timezone from datetime import datetime, timezone
from ipaddress import IPv4Address from ipaddress import IPv4Address
from typing import ClassVar from typing import TYPE_CHECKING, ClassVar, get_args
from pydantic import BaseModel, Field, model_validator from pydantic import BaseModel, Field, model_validator
@ -17,6 +17,14 @@ from anta.custom_types import EcdsaKeySize, EncryptionAlgorithm, PositiveInteger
from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.models import AntaCommand, AntaTemplate, AntaTest
from anta.tools import get_failed_logs, get_item, get_value from anta.tools import get_failed_logs, get_item, get_value
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
class VerifySSHStatus(AntaTest): class VerifySSHStatus(AntaTest):
"""Verifies if the SSHD agent is disabled in the default VRF. """Verifies if the SSHD agent is disabled in the default VRF.
@ -47,9 +55,9 @@ class VerifySSHStatus(AntaTest):
try: try:
line = next(line for line in command_output.split("\n") if line.startswith("SSHD status")) line = next(line for line in command_output.split("\n") if line.startswith("SSHD status"))
except StopIteration: except StopIteration:
self.result.is_error("Could not find SSH status in returned output.") self.result.is_failure("Could not find SSH status in returned output.")
return return
status = line.split("is ")[1] status = line.split()[-1]
if status == "disabled": if status == "disabled":
self.result.is_success() self.result.is_success()
@ -416,19 +424,19 @@ class VerifyAPISSLCertificate(AntaTest):
"""The encryption algorithm key size of the certificate.""" """The encryption algorithm key size of the certificate."""
@model_validator(mode="after") @model_validator(mode="after")
def validate_inputs(self: BaseModel) -> BaseModel: def validate_inputs(self) -> Self:
"""Validate the key size provided to the APISSLCertificates class. """Validate the key size provided to the APISSLCertificates class.
If encryption_algorithm is RSA then key_size should be in {2048, 3072, 4096}. If encryption_algorithm is RSA then key_size should be in {2048, 3072, 4096}.
If encryption_algorithm is ECDSA then key_size should be in {256, 384, 521}. If encryption_algorithm is ECDSA then key_size should be in {256, 384, 521}.
""" """
if self.encryption_algorithm == "RSA" and self.key_size not in RsaKeySize.__args__: if self.encryption_algorithm == "RSA" and self.key_size not in get_args(RsaKeySize):
msg = f"`{self.certificate_name}` key size {self.key_size} is invalid for RSA encryption. Allowed sizes are {RsaKeySize.__args__}." msg = f"`{self.certificate_name}` key size {self.key_size} is invalid for RSA encryption. Allowed sizes are {get_args(RsaKeySize)}."
raise ValueError(msg) raise ValueError(msg)
if self.encryption_algorithm == "ECDSA" and self.key_size not in EcdsaKeySize.__args__: if self.encryption_algorithm == "ECDSA" and self.key_size not in get_args(EcdsaKeySize):
msg = f"`{self.certificate_name}` key size {self.key_size} is invalid for ECDSA encryption. Allowed sizes are {EcdsaKeySize.__args__}." msg = f"`{self.certificate_name}` key size {self.key_size} is invalid for ECDSA encryption. Allowed sizes are {get_args(EcdsaKeySize)}."
raise ValueError(msg) raise ValueError(msg)
return self return self
@ -820,3 +828,37 @@ class VerifySpecificIPSecConn(AntaTest):
self.result.is_failure( self.result.is_failure(
f"IPv4 security connection `source:{source_input} destination:{destination_input} vrf:{vrf}` for peer `{peer}` is not found." f"IPv4 security connection `source:{source_input} destination:{destination_input} vrf:{vrf}` for peer `{peer}` is not found."
) )
class VerifyHardwareEntropy(AntaTest):
"""
Verifies hardware entropy generation is enabled on device.
Expected Results
----------------
* Success: The test will pass if hardware entropy generation is enabled.
* Failure: The test will fail if hardware entropy generation is not enabled.
Examples
--------
```yaml
anta.tests.security:
- VerifyHardwareEntropy:
```
"""
name = "VerifyHardwareEntropy"
description = "Verifies hardware entropy generation is enabled on device."
categories: ClassVar[list[str]] = ["security"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management security")]
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyHardwareEntropy."""
command_output = self.instance_commands[0].json_output
# Check if hardware entropy generation is enabled.
if not command_output.get("hardwareEntropyEnabled"):
self.result.is_failure("Hardware entropy generation is disabled.")
else:
self.result.is_success()

View file

@ -7,10 +7,11 @@
# mypy: disable-error-code=attr-defined # mypy: disable-error-code=attr-defined
from __future__ import annotations from __future__ import annotations
from typing import TYPE_CHECKING, ClassVar from typing import TYPE_CHECKING, ClassVar, get_args
from anta.custom_types import PositiveInteger from anta.custom_types import PositiveInteger, SnmpErrorCounter, SnmpPdu
from anta.models import AntaCommand, AntaTest from anta.models import AntaCommand, AntaTest
from anta.tools import get_value
if TYPE_CHECKING: if TYPE_CHECKING:
from anta.models import AntaTemplate from anta.models import AntaTemplate
@ -183,8 +184,12 @@ class VerifySnmpLocation(AntaTest):
@AntaTest.anta_test @AntaTest.anta_test
def test(self) -> None: def test(self) -> None:
"""Main test function for VerifySnmpLocation.""" """Main test function for VerifySnmpLocation."""
location = self.instance_commands[0].json_output["location"]["location"] # Verifies the SNMP location is configured.
if not (location := get_value(self.instance_commands[0].json_output, "location.location")):
self.result.is_failure("SNMP location is not configured.")
return
# Verifies the expected SNMP location.
if location != self.inputs.location: if location != self.inputs.location:
self.result.is_failure(f"Expected `{self.inputs.location}` as the location, but found `{location}` instead.") self.result.is_failure(f"Expected `{self.inputs.location}` as the location, but found `{location}` instead.")
else: else:
@ -222,9 +227,126 @@ class VerifySnmpContact(AntaTest):
@AntaTest.anta_test @AntaTest.anta_test
def test(self) -> None: def test(self) -> None:
"""Main test function for VerifySnmpContact.""" """Main test function for VerifySnmpContact."""
contact = self.instance_commands[0].json_output["contact"]["contact"] # Verifies the SNMP contact is configured.
if not (contact := get_value(self.instance_commands[0].json_output, "contact.contact")):
self.result.is_failure("SNMP contact is not configured.")
return
# Verifies the expected SNMP contact.
if contact != self.inputs.contact: if contact != self.inputs.contact:
self.result.is_failure(f"Expected `{self.inputs.contact}` as the contact, but found `{contact}` instead.") self.result.is_failure(f"Expected `{self.inputs.contact}` as the contact, but found `{contact}` instead.")
else: else:
self.result.is_success() self.result.is_success()
class VerifySnmpPDUCounters(AntaTest):
"""Verifies the SNMP PDU counters.
By default, all SNMP PDU counters will be checked for any non-zero values.
An optional list of specific SNMP PDU(s) can be provided for granular testing.
Expected Results
----------------
* Success: The test will pass if the SNMP PDU counter(s) are non-zero/greater than zero.
* Failure: The test will fail if the SNMP PDU counter(s) are zero/None/Not Found.
Examples
--------
```yaml
anta.tests.snmp:
- VerifySnmpPDUCounters:
pdus:
- outTrapPdus
- inGetNextPdus
```
"""
name = "VerifySnmpPDUCounters"
description = "Verifies the SNMP PDU counters."
categories: ClassVar[list[str]] = ["snmp"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)]
class Input(AntaTest.Input):
"""Input model for the VerifySnmpPDUCounters test."""
pdus: list[SnmpPdu] | None = None
"""Optional list of SNMP PDU counters to be verified. If not provided, test will verifies all PDU counters."""
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifySnmpPDUCounters."""
snmp_pdus = self.inputs.pdus
command_output = self.instance_commands[0].json_output
# Verify SNMP PDU counters.
if not (pdu_counters := get_value(command_output, "counters")):
self.result.is_failure("SNMP counters not found.")
return
# In case SNMP PDUs not provided, It will check all the update error counters.
if not snmp_pdus:
snmp_pdus = list(get_args(SnmpPdu))
failures = {pdu: value for pdu in snmp_pdus if (value := pdu_counters.get(pdu, "Not Found")) == "Not Found" or value == 0}
# Check if any failures
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"The following SNMP PDU counters are not found or have zero PDU counters:\n{failures}")
class VerifySnmpErrorCounters(AntaTest):
"""Verifies the SNMP error counters.
By default, all error counters will be checked for any non-zero values.
An optional list of specific error counters can be provided for granular testing.
Expected Results
----------------
* Success: The test will pass if the SNMP error counter(s) are zero/None.
* Failure: The test will fail if the SNMP error counter(s) are non-zero/not None/Not Found or is not configured.
Examples
--------
```yaml
anta.tests.snmp:
- VerifySnmpErrorCounters:
error_counters:
- inVersionErrs
- inBadCommunityNames
"""
name = "VerifySnmpErrorCounters"
description = "Verifies the SNMP error counters."
categories: ClassVar[list[str]] = ["snmp"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)]
class Input(AntaTest.Input):
"""Input model for the VerifySnmpErrorCounters test."""
error_counters: list[SnmpErrorCounter] | None = None
"""Optional list of SNMP error counters to be verified. If not provided, test will verifies all error counters."""
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifySnmpErrorCounters."""
error_counters = self.inputs.error_counters
command_output = self.instance_commands[0].json_output
# Verify SNMP PDU counters.
if not (snmp_counters := get_value(command_output, "counters")):
self.result.is_failure("SNMP counters not found.")
return
# In case SNMP error counters not provided, It will check all the error counters.
if not error_counters:
error_counters = list(get_args(SnmpErrorCounter))
error_counters_not_ok = {counter: value for counter in error_counters if (value := snmp_counters.get(counter))}
# Check if any failures
if not error_counters_not_ok:
self.result.is_success()
else:
self.result.is_failure(f"The following SNMP error counters are not found or have non-zero error counters:\n{error_counters_not_ok}")

View file

@ -7,7 +7,7 @@
# mypy: disable-error-code=attr-defined # mypy: disable-error-code=attr-defined
from __future__ import annotations from __future__ import annotations
from typing import ClassVar, Literal from typing import Any, ClassVar, Literal
from pydantic import Field from pydantic import Field
@ -259,3 +259,64 @@ class VerifySTPRootPriority(AntaTest):
self.result.is_failure(f"The following instance(s) have the wrong STP root priority configured: {wrong_priority_instances}") self.result.is_failure(f"The following instance(s) have the wrong STP root priority configured: {wrong_priority_instances}")
else: else:
self.result.is_success() self.result.is_success()
class VerifyStpTopologyChanges(AntaTest):
"""Verifies the number of changes across all interfaces in the Spanning Tree Protocol (STP) topology is below a threshold.
Expected Results
----------------
* Success: The test will pass if the total number of changes across all interfaces is less than the specified threshold.
* Failure: The test will fail if the total number of changes across all interfaces meets or exceeds the specified threshold,
indicating potential instability in the topology.
Examples
--------
```yaml
anta.tests.stp:
- VerifyStpTopologyChanges:
threshold: 10
```
"""
name = "VerifyStpTopologyChanges"
description = "Verifies the number of changes across all interfaces in the Spanning Tree Protocol (STP) topology is below a threshold."
categories: ClassVar[list[str]] = ["stp"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show spanning-tree topology status detail", revision=1)]
class Input(AntaTest.Input):
"""Input model for the VerifyStpTopologyChanges test."""
threshold: int
"""The threshold number of changes in the STP topology."""
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyStpTopologyChanges."""
failures: dict[str, Any] = {"topologies": {}}
command_output = self.instance_commands[0].json_output
stp_topologies = command_output.get("topologies", {})
# verifies all available topologies except the "NoStp" topology.
stp_topologies.pop("NoStp", None)
# Verify the STP topology(s).
if not stp_topologies:
self.result.is_failure("STP is not configured.")
return
# Verifies the number of changes across all interfaces
for topology, topology_details in stp_topologies.items():
interfaces = {
interface: {"Number of changes": num_of_changes}
for interface, details in topology_details.get("interfaces", {}).items()
if (num_of_changes := details.get("numChanges")) > self.inputs.threshold
}
if interfaces:
failures["topologies"][topology] = interfaces
if failures["topologies"]:
self.result.is_failure(f"The following STP topologies are not configured or number of changes not within the threshold:\n{failures}")
else:
self.result.is_success()

View file

@ -115,3 +115,42 @@ class VerifyStunClient(AntaTest):
if actual_stun_data != expected_stun_data: if actual_stun_data != expected_stun_data:
failed_log = get_failed_logs(expected_stun_data, actual_stun_data) failed_log = get_failed_logs(expected_stun_data, actual_stun_data)
self.result.is_failure(f"For STUN source `{source_address}:{source_port}`:{failed_log}") self.result.is_failure(f"For STUN source `{source_address}:{source_port}`:{failed_log}")
class VerifyStunServer(AntaTest):
"""
Verifies the STUN server status is enabled and running.
Expected Results
----------------
* Success: The test will pass if the STUN server status is enabled and running.
* Failure: The test will fail if the STUN server is disabled or not running.
Examples
--------
```yaml
anta.tests.stun:
- VerifyStunServer:
```
"""
name = "VerifyStunServer"
description = "Verifies the STUN server status is enabled and running."
categories: ClassVar[list[str]] = ["stun"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show stun server status", revision=1)]
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyStunServer."""
command_output = self.instance_commands[0].json_output
status_disabled = not command_output.get("enabled")
not_running = command_output.get("pid") == 0
if status_disabled and not_running:
self.result.is_failure("STUN server status is disabled and not running.")
elif status_disabled:
self.result.is_failure("STUN server status is disabled.")
elif not_running:
self.result.is_failure("STUN server is not running.")
else:
self.result.is_success()

View file

@ -8,10 +8,14 @@
from __future__ import annotations from __future__ import annotations
import re import re
from ipaddress import IPv4Address
from typing import TYPE_CHECKING, ClassVar from typing import TYPE_CHECKING, ClassVar
from anta.custom_types import PositiveInteger from pydantic import BaseModel, Field
from anta.custom_types import Hostname, PositiveInteger
from anta.models import AntaCommand, AntaTest from anta.models import AntaCommand, AntaTest
from anta.tools import get_failed_logs, get_value
if TYPE_CHECKING: if TYPE_CHECKING:
from anta.models import AntaTemplate from anta.models import AntaTemplate
@ -85,9 +89,6 @@ class VerifyReloadCause(AntaTest):
def test(self) -> None: def test(self) -> None:
"""Main test function for VerifyReloadCause.""" """Main test function for VerifyReloadCause."""
command_output = self.instance_commands[0].json_output command_output = self.instance_commands[0].json_output
if "resetCauses" not in command_output:
self.result.is_error(message="No reload causes available")
return
if len(command_output["resetCauses"]) == 0: if len(command_output["resetCauses"]) == 0:
# No reload causes # No reload causes
self.result.is_success() self.result.is_success()
@ -299,3 +300,93 @@ class VerifyNTP(AntaTest):
else: else:
data = command_output.split("\n")[0] data = command_output.split("\n")[0]
self.result.is_failure(f"The device is not synchronized with the configured NTP server(s): '{data}'") self.result.is_failure(f"The device is not synchronized with the configured NTP server(s): '{data}'")
class VerifyNTPAssociations(AntaTest):
"""Verifies the Network Time Protocol (NTP) associations.
Expected Results
----------------
* Success: The test will pass if the Primary NTP server (marked as preferred) has the condition 'sys.peer' and
all other NTP servers have the condition 'candidate'.
* Failure: The test will fail if the Primary NTP server (marked as preferred) does not have the condition 'sys.peer' or
if any other NTP server does not have the condition 'candidate'.
Examples
--------
```yaml
anta.tests.system:
- VerifyNTPAssociations:
ntp_servers:
- server_address: 1.1.1.1
preferred: True
stratum: 1
- server_address: 2.2.2.2
stratum: 2
- server_address: 3.3.3.3
stratum: 2
```
"""
name = "VerifyNTPAssociations"
description = "Verifies the Network Time Protocol (NTP) associations."
categories: ClassVar[list[str]] = ["system"]
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ntp associations")]
class Input(AntaTest.Input):
"""Input model for the VerifyNTPAssociations test."""
ntp_servers: list[NTPServer]
"""List of NTP servers."""
class NTPServer(BaseModel):
"""Model for a NTP server."""
server_address: Hostname | IPv4Address
"""The NTP server address as an IPv4 address or hostname. The NTP server name defined in the running configuration
of the device may change during DNS resolution, which is not handled in ANTA. Please provide the DNS-resolved server name.
For example, 'ntp.example.com' in the configuration might resolve to 'ntp3.example.com' in the device output."""
preferred: bool = False
"""Optional preferred for NTP server. If not provided, it defaults to `False`."""
stratum: int = Field(ge=0, le=16)
"""NTP stratum level (0 to 15) where 0 is the reference clock and 16 indicates unsynchronized.
Values should be between 0 and 15 for valid synchronization and 16 represents an out-of-sync state."""
@AntaTest.anta_test
def test(self) -> None:
"""Main test function for VerifyNTPAssociations."""
failures: str = ""
if not (peer_details := get_value(self.instance_commands[0].json_output, "peers")):
self.result.is_failure("None of NTP peers are not configured.")
return
# Iterate over each NTP server.
for ntp_server in self.inputs.ntp_servers:
server_address = str(ntp_server.server_address)
preferred = ntp_server.preferred
stratum = ntp_server.stratum
# Check if NTP server details exists.
if (peer_detail := get_value(peer_details, server_address, separator="..")) is None:
failures += f"NTP peer {server_address} is not configured.\n"
continue
# Collecting the expected NTP peer details.
expected_peer_details = {"condition": "candidate", "stratum": stratum}
if preferred:
expected_peer_details["condition"] = "sys.peer"
# Collecting the actual NTP peer details.
actual_peer_details = {"condition": get_value(peer_detail, "condition"), "stratum": get_value(peer_detail, "stratumLevel")}
# Collecting failures logs if any.
failure_logs = get_failed_logs(expected_peer_details, actual_peer_details)
if failure_logs:
failures += f"For NTP peer {server_address}:{failure_logs}\n"
# Check if there are any failures.
if not failures:
self.result.is_success()
else:
self.result.is_failure(failures)

View file

@ -8,10 +8,13 @@ from __future__ import annotations
import cProfile import cProfile
import os import os
import pstats import pstats
import re
from functools import wraps from functools import wraps
from time import perf_counter from time import perf_counter
from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast
from anta.constants import ACRONYM_CATEGORIES
from anta.custom_types import REGEXP_PATH_MARKERS
from anta.logger import format_td from anta.logger import format_td
if TYPE_CHECKING: if TYPE_CHECKING:
@ -32,14 +35,17 @@ def get_failed_logs(expected_output: dict[Any, Any], actual_output: dict[Any, An
Returns the failed log or an empty string if there is no difference between the expected and actual output. Returns the failed log or an empty string if there is no difference between the expected and actual output.
Args: Parameters
---- ----------
expected_output (dict): Expected output of a test. expected_output
actual_output (dict): Actual output of a test Expected output of a test.
actual_output
Actual output of a test
Returns Returns
------- -------
str: Failed log of a test. str
Failed log of a test.
""" """
failed_logs = [] failed_logs = []
@ -65,18 +71,20 @@ def custom_division(numerator: float, denominator: float) -> int | float:
Parameters Parameters
---------- ----------
numerator: The numerator. numerator
denominator: The denominator. The numerator.
denominator
The denominator.
Returns Returns
------- -------
Union[int, float]: The result of the division. Union[int, float]
The result of the division.
""" """
result = numerator / denominator result = numerator / denominator
return int(result) if result.is_integer() else result return int(result) if result.is_integer() else result
# pylint: disable=too-many-arguments
def get_dict_superset( def get_dict_superset(
list_of_dicts: list[dict[Any, Any]], list_of_dicts: list[dict[Any, Any]],
input_dict: dict[Any, Any], input_dict: dict[Any, Any],
@ -136,7 +144,6 @@ def get_dict_superset(
return default return default
# pylint: disable=too-many-arguments
def get_value( def get_value(
dictionary: dict[Any, Any], dictionary: dict[Any, Any],
key: str, key: str,
@ -193,7 +200,6 @@ def get_value(
return value return value
# pylint: disable=too-many-arguments
def get_item( def get_item(
list_of_dicts: list[dict[Any, Any]], list_of_dicts: list[dict[Any, Any]],
key: Any, key: Any,
@ -302,13 +308,15 @@ def cprofile(sort_by: str = "cumtime") -> Callable[[F], F]:
profile is conditionally enabled based on the presence of ANTA_CPROFILE environment variable. profile is conditionally enabled based on the presence of ANTA_CPROFILE environment variable.
Expect to decorate an async function. Expect to decorate an async function.
Args: Parameters
---- ----------
sort_by (str): The criterion to sort the profiling results. Default is 'cumtime'. sort_by
The criterion to sort the profiling results. Default is 'cumtime'.
Returns Returns
------- -------
Callable: The decorated function with conditional profiling. Callable
The decorated function with conditional profiling.
""" """
def decorator(func: F) -> F: def decorator(func: F) -> F:
@ -318,13 +326,16 @@ def cprofile(sort_by: str = "cumtime") -> Callable[[F], F]:
If `ANTA_CPROFILE` is set, cProfile is enabled and dumps the stats to the file. If `ANTA_CPROFILE` is set, cProfile is enabled and dumps the stats to the file.
Args: Parameters
---- ----------
*args: Arbitrary positional arguments. *args
**kwargs: Arbitrary keyword arguments. Arbitrary positional arguments.
**kwargs
Arbitrary keyword arguments.
Returns Returns
------- -------
Any
The result of the function call. The result of the function call.
""" """
cprofile_file = os.environ.get("ANTA_CPROFILE") cprofile_file = os.environ.get("ANTA_CPROFILE")
@ -346,3 +357,41 @@ def cprofile(sort_by: str = "cumtime") -> Callable[[F], F]:
return cast(F, wrapper) return cast(F, wrapper)
return decorator return decorator
def safe_command(command: str) -> str:
"""Return a sanitized command.
Parameters
----------
command
The command to sanitize.
Returns
-------
str
The sanitized command.
"""
return re.sub(rf"{REGEXP_PATH_MARKERS}", "_", command)
def convert_categories(categories: list[str]) -> list[str]:
"""Convert categories for reports.
if the category is part of the defined acronym, transform it to upper case
otherwise capitalize the first letter.
Parameters
----------
categories
A list of categories
Returns
-------
list[str]
The list of converted categories
"""
if isinstance(categories, list):
return [" ".join(word.upper() if word.lower() in ACRONYM_CATEGORIES else word.title() for word in category.split()) for category in categories]
msg = f"Wrong input type '{type(categories)}' for convert_categories."
raise TypeError(msg)

View file

@ -37,12 +37,17 @@ async def port_check_url(url: URL, timeout: int = 5) -> bool:
""" """
Open the port designated by the URL given the timeout in seconds. Open the port designated by the URL given the timeout in seconds.
If the port is available then return True; False otherwise.
Parameters Parameters
---------- ----------
url: The URL that provides the target system url
timeout: Time to await for the port to open in seconds The URL that provides the target system.
timeout
Time to await for the port to open in seconds.
Returns
-------
bool
If the port is available then return True; False otherwise.
""" """
port = url.port or socket.getservbyname(url.scheme) port = url.port or socket.getservbyname(url.scheme)

View file

@ -52,8 +52,10 @@ class SessionConfig:
Parameters Parameters
---------- ----------
device: The associated device instance device
name: The name of the config session The associated device instance.
name
The name of the config session.
""" """
self._device = device self._device = device
self._cli = device.cli self._cli = device.cli
@ -87,11 +89,15 @@ class SessionConfig:
Returns Returns
------- -------
Dict object of native EOS eAPI response; see `status` method for dict[str, Any]
Dictionary of native EOS eAPI response; see `status` method for
details. details.
Examples Examples
-------- --------
Return example:
```
{ {
"maxSavedSessions": 1, "maxSavedSessions": 1,
"maxOpenSessions": 5, "maxOpenSessions": 5,
@ -111,6 +117,7 @@ class SessionConfig:
} }
} }
} }
```
""" """
return await self._cli("show configuration sessions detail") # type: ignore[return-value] # json outformat returns dict[str, Any] return await self._cli("show configuration sessions detail") # type: ignore[return-value] # json outformat returns dict[str, Any]
@ -126,13 +133,15 @@ class SessionConfig:
Returns Returns
------- -------
Dict instance of the session status. If the session does not exist, dict[str, Any] | None
Dictionary instance of the session status. If the session does not exist,
then this method will return None. then this method will return None.
The native eAPI results from JSON output, see example:
Examples Examples
-------- --------
The return is the native eAPI results from JSON output:
```
all results: all results:
{ {
"maxSavedSessions": 1, "maxSavedSessions": 1,
@ -153,14 +162,18 @@ class SessionConfig:
} }
} }
} }
```
if the session name was 'jeremy1', then this method would return If the session name was 'jeremy1', then this method would return:
```
{ {
"instances": {}, "instances": {},
"state": "pending", "state": "pending",
"commitUser": "", "commitUser": "",
"description": "" "description": ""
} }
```
""" """
res = await self.status_all() res = await self.status_all()
return res["sessions"].get(self.name) return res["sessions"].get(self.name)
@ -174,13 +187,13 @@ class SessionConfig:
Parameters Parameters
---------- ----------
content: content
The text configuration CLI commands, as a list of strings, that The text configuration CLI commands, as a list of strings, that
will be sent to the device. If the parameter is a string, and not will be sent to the device. If the parameter is a string, and not
a list, then split the string across linebreaks. In either case a list, then split the string across linebreaks. In either case
any empty lines will be discarded before they are send to the any empty lines will be discarded before they are send to the
device. device.
replace: replace
When True, the content will replace the existing configuration When True, the content will replace the existing configuration
on the device. on the device.
""" """
@ -212,6 +225,9 @@ class SessionConfig:
# configure session <name> # configure session <name>
# commit # commit
Parameters
----------
timer
If the timer is specified, format is "hh:mm:ss", then a commit timer is If the timer is specified, format is "hh:mm:ss", then a commit timer is
started. A second commit action must be made to confirm the config started. A second commit action must be made to confirm the config
session before the timer expires; otherwise the config-session is session before the timer expires; otherwise the config-session is
@ -242,6 +258,7 @@ class SessionConfig:
Returns Returns
------- -------
str
Return a string in diff-patch format. Return a string in diff-patch format.
References References
@ -258,17 +275,18 @@ class SessionConfig:
Parameters Parameters
---------- ----------
filename: filename
The name of the configuration file. The caller is required to The name of the configuration file. The caller is required to
specify the filesystem, for example, the specify the filesystem, for example, the
filename="flash:thisfile.cfg" filename="flash:thisfile.cfg".
replace: replace
When True, the contents of the file will completely replace the When True, the contents of the file will completely replace the
session config for a load-replace behavior. session config for a load-replace behavior.
Raises Raises
------ ------
RuntimeError
If there are any issues with loading the configuration file then a If there are any issues with loading the configuration file then a
RuntimeError is raised with the error messages content. RuntimeError is raised with the error messages content.
""" """
@ -278,7 +296,7 @@ class SessionConfig:
commands.append(f"copy {filename} session-config") commands.append(f"copy {filename} session-config")
res: list[dict[str, Any]] = await self._cli(commands=commands) # type: ignore[assignment] # JSON outformat of multiple commands returns list[dict[str, Any]] res: list[dict[str, Any]] = await self._cli(commands=commands) # type: ignore[assignment] # JSON outformat of multiple commands returns list[dict[str, Any]]
checks_re = re.compile(r"error|abort|invalid", flags=re.I) checks_re = re.compile(r"error|abort|invalid", flags=re.IGNORECASE)
messages = res[-1]["messages"] messages = res[-1]["messages"]
if any(map(checks_re.search, messages)): if any(map(checks_re.search, messages)):

View file

@ -54,7 +54,7 @@ class Device(httpx.AsyncClient):
EAPI_OFMT_OPTIONS = ("json", "text") EAPI_OFMT_OPTIONS = ("json", "text")
EAPI_DEFAULT_OFMT = "json" EAPI_DEFAULT_OFMT = "json"
def __init__( # noqa: PLR0913 # pylint: disable=too-many-arguments def __init__(
self, self,
host: str | None = None, host: str | None = None,
username: str | None = None, username: str | None = None,
@ -71,20 +71,28 @@ class Device(httpx.AsyncClient):
Parameters Parameters
---------- ----------
host: The EOS target device, either hostname (DNS) or ipaddress. host
username: The login user-name; requires the password parameter. The EOS target device, either hostname (DNS) or ipaddress.
password: The login password; requires the username parameter. username
proto: The protocol, http or https, to communicate eAPI with the device. The login user-name; requires the password parameter.
port: If not provided, the proto value is used to look up the associated password
The login password; requires the username parameter.
proto
The protocol, http or https, to communicate eAPI with the device.
port
If not provided, the proto value is used to look up the associated
port (http=80, https=443). If provided, overrides the port used to port (http=80, https=443). If provided, overrides the port used to
communite with the device. communite with the device.
kwargs
Other named keyword arguments, some of them are being used in the function
cf Other Parameters section below, others are just passed as is to the httpx.AsyncClient.
Other Parameters Other Parameters
---------------- ----------------
base_url: str base_url : str
If provided, the complete URL to the device eAPI endpoint. If provided, the complete URL to the device eAPI endpoint.
auth: auth :
If provided, used as the httpx authorization initializer value. If If provided, used as the httpx authorization initializer value. If
not provided, then username+password is assumed by the Caller and not provided, then username+password is assumed by the Caller and
used to create a BasicAuth instance. used to create a BasicAuth instance.
@ -111,11 +119,12 @@ class Device(httpx.AsyncClient):
Returns Returns
------- -------
bool
True when the device eAPI is accessible, False otherwise. True when the device eAPI is accessible, False otherwise.
""" """
return await port_check_url(self.base_url) return await port_check_url(self.base_url)
async def cli( # noqa: PLR0913 # pylint: disable=too-many-arguments async def cli( # noqa: PLR0913
self, self,
command: str | dict[str, Any] | None = None, command: str | dict[str, Any] | None = None,
commands: Sequence[str | dict[str, Any]] | None = None, commands: Sequence[str | dict[str, Any]] | None = None,
@ -132,18 +141,18 @@ class Device(httpx.AsyncClient):
Parameters Parameters
---------- ----------
command: command
A single command to execute; results in a single output response A single command to execute; results in a single output response.
commands: commands
A list of commands to execute; results in a list of output responses A list of commands to execute; results in a list of output responses.
ofmt: ofmt
Either 'json' or 'text'; indicates the output format for the CLI commands. Either 'json' or 'text'; indicates the output format for the CLI commands.
version: version
By default the eAPI will use "version 1" for all API object models. By default the eAPI will use "version 1" for all API object models.
This driver will, by default, always set version to "latest" so This driver will, by default, always set version to "latest" so
that the behavior matches the CLI of the device. The caller can that the behavior matches the CLI of the device. The caller can
override the "latest" behavior by explicitly setting the version. override the "latest" behavior by explicitly setting the version.
suppress_error: suppress_error
When not False, then if the execution of the command would-have When not False, then if the execution of the command would-have
raised an EapiCommandError, rather than raising this exception this raised an EapiCommandError, rather than raising this exception this
routine will return the value None. routine will return the value None.
@ -152,13 +161,13 @@ class Device(httpx.AsyncClient):
EapiCommandError, now response would be set to None instead. EapiCommandError, now response would be set to None instead.
response = dev.cli(..., suppress_error=True) response = dev.cli(..., suppress_error=True)
auto_complete: auto_complete
Enabled/disables the command auto-compelete feature of the EAPI. Per the Enabled/disables the command auto-compelete feature of the EAPI. Per the
documentation: documentation:
Allows users to use shorthand commands in eAPI calls. With this Allows users to use shorthand commands in eAPI calls. With this
parameter included a user can send 'sh ver' via eAPI to get the parameter included a user can send 'sh ver' via eAPI to get the
output of 'show version'. output of 'show version'.
expand_aliases: expand_aliases
Enables/disables the command use of User defined alias. Per the Enables/disables the command use of User defined alias. Per the
documentation: documentation:
Allowed users to provide the expandAliases parameter to eAPI Allowed users to provide the expandAliases parameter to eAPI
@ -166,11 +175,12 @@ class Device(httpx.AsyncClient):
For example if an alias is configured as 'sv' for 'show version' For example if an alias is configured as 'sv' for 'show version'
then an API call with sv and the expandAliases parameter will then an API call with sv and the expandAliases parameter will
return the output of show version. return the output of show version.
req_id: req_id
A unique identifier that will be echoed back by the switch. May be a string or number. A unique identifier that will be echoed back by the switch. May be a string or number.
Returns Returns
------- -------
list[dict[str, Any] | str] | dict[str, Any] | str | None
One or List of output responses, per the description above. One or List of output responses, per the description above.
""" """
if not any((command, commands)): if not any((command, commands)):
@ -189,7 +199,7 @@ class Device(httpx.AsyncClient):
return None return None
raise raise
def _jsonrpc_command( # noqa: PLR0913 # pylint: disable=too-many-arguments def _jsonrpc_command( # noqa: PLR0913
self, self,
commands: Sequence[str | dict[str, Any]] | None = None, commands: Sequence[str | dict[str, Any]] | None = None,
ofmt: str | None = None, ofmt: str | None = None,
@ -199,7 +209,42 @@ class Device(httpx.AsyncClient):
expand_aliases: bool = False, expand_aliases: bool = False,
req_id: int | str | None = None, req_id: int | str | None = None,
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Create the JSON-RPC command dictionary object.""" """Create the JSON-RPC command dictionary object.
Parameters
----------
commands
A list of commands to execute; results in a list of output responses.
ofmt
Either 'json' or 'text'; indicates the output format for the CLI commands.
version
By default the eAPI will use "version 1" for all API object models.
This driver will, by default, always set version to "latest" so
that the behavior matches the CLI of the device. The caller can
override the "latest" behavior by explicitly setting the version.
auto_complete
Enabled/disables the command auto-compelete feature of the EAPI. Per the
documentation:
Allows users to use shorthand commands in eAPI calls. With this
parameter included a user can send 'sh ver' via eAPI to get the
output of 'show version'.
expand_aliases
Enables/disables the command use of User defined alias. Per the
documentation:
Allowed users to provide the expandAliases parameter to eAPI
calls. This allows users to use aliased commands via the API.
For example if an alias is configured as 'sv' for 'show version'
then an API call with sv and the expandAliases parameter will
return the output of show version.
req_id
A unique identifier that will be echoed back by the switch. May be a string or number.
Returns
-------
dict[str, Any]:
dict containing the JSON payload to run the command.
"""
cmd: dict[str, Any] = { cmd: dict[str, Any] = {
"jsonrpc": "2.0", "jsonrpc": "2.0",
"method": "runCmds", "method": "runCmds",
@ -224,7 +269,7 @@ class Device(httpx.AsyncClient):
Parameters Parameters
---------- ----------
jsonrpc: jsonrpc
The JSON-RPC as created by the `meth`:_jsonrpc_command(). The JSON-RPC as created by the `meth`:_jsonrpc_command().
Raises Raises
@ -234,6 +279,7 @@ class Device(httpx.AsyncClient):
Returns Returns
------- -------
list[dict[str, Any] | str]
The list of command results; either dict or text depending on the The list of command results; either dict or text depending on the
JSON-RPC format parameter. JSON-RPC format parameter.
""" """
@ -271,21 +317,27 @@ class Device(httpx.AsyncClient):
len_data = len(cmd_data) len_data = len(cmd_data)
err_at = len_data - 1 err_at = len_data - 1
err_msg = err_data["message"] err_msg = err_data["message"]
failed_cmd = commands[err_at]
raise EapiCommandError( raise EapiCommandError(
passed=[get_output(cmd_data[cmd_i]) for cmd_i, cmd in enumerate(commands[:err_at])], passed=[get_output(cmd_data[cmd_i]) for cmd_i, cmd in enumerate(commands[:err_at])],
failed=commands[err_at]["cmd"], failed=failed_cmd["cmd"] if isinstance(failed_cmd, dict) else failed_cmd,
errors=cmd_data[err_at]["errors"], errors=cmd_data[err_at]["errors"],
errmsg=err_msg, errmsg=err_msg,
not_exec=commands[err_at + 1 :], not_exec=commands[err_at + 1 :],
) )
def config_session(self, name: str) -> SessionConfig: def config_session(self, name: str) -> SessionConfig:
""" """Return a SessionConfig instance bound to this device with the given session name.
return a SessionConfig instance bound to this device with the given session name.
Parameters Parameters
---------- ----------
name: The config-session name name
The config-session name.
Returns
-------
SessionConfig
SessionConfig instance bound to this device with the given session name.
""" """
return SessionConfig(self, name) return SessionConfig(self, name)

View file

@ -24,7 +24,7 @@ class EapiCommandError(RuntimeError):
not_exec: a list of commands that were not executed not_exec: a list of commands that were not executed
""" """
def __init__(self, failed: str, errors: list[str], errmsg: str, passed: list[str | dict[str, Any]], not_exec: list[dict[str, Any]]) -> None: # noqa: PLR0913 # pylint: disable=too-many-arguments def __init__(self, failed: str, errors: list[str], errmsg: str, passed: list[str | dict[str, Any]], not_exec: list[dict[str, Any]]) -> None:
"""Initialize for the EapiCommandError exception.""" """Initialize for the EapiCommandError exception."""
self.failed = failed self.failed = failed
self.errmsg = errmsg self.errmsg = errmsg

View file

@ -6,10 +6,10 @@
# Arista Network Test Automation (ANTA) Framework # Arista Network Test Automation (ANTA) Framework
| **Code** | [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) [![Numpy](https://img.shields.io/badge/Docstring_format-numpy-blue)](https://numpydoc.readthedocs.io/en/latest/format.html) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=aristanetworks_anta&metric=alert_status)](https://sonarcloud.io/summary/new_code?id=aristanetworks_anta) | | **Code** | [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) [![Numpy](https://img.shields.io/badge/Docstring_format-numpy-blue)](https://numpydoc.readthedocs.io/en/latest/format.html) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=aristanetworks_anta&metric=alert_status&branch=main)](https://sonarcloud.io/summary/new_code?id=aristanetworks_anta) [![Coverage](https://img.shields.io/sonar/coverage/aristanetworks_anta/main?server=https%3A%2F%2Fsonarcloud.io&logo=sonarcloud&link=https%3A%2F%2Fsonarcloud.io%2Fsummary%2Foverall%3Fid%3Daristanetworks_anta)](https://sonarcloud.io/summary/overall?id=aristanetworks_anta) |
| :------------: | :-------| | :------------: | :-------|
| **License** | [![License](https://img.shields.io/badge/license-Apache%202.0-brightgreen.svg)](https://github.com/aristanetworks/anta/blob/main/LICENSE) | | **License** | [![License](https://img.shields.io/badge/license-Apache%202.0-brightgreen.svg)](https://github.com/aristanetworks/anta/blob/main/LICENSE) |
| **GitHub** | [![CI](https://github.com/aristanetworks/anta/actions/workflows/code-testing.yml/badge.svg)](https://github.com/aristanetworks/anta/actions/workflows/code-testing.yml) ![Coverage](https://raw.githubusercontent.com/aristanetworks/anta/coverage-badge/latest-release-coverage.svg) ![Commit](https://img.shields.io/github/last-commit/aristanetworks/anta) ![GitHub commit activity (branch)](https://img.shields.io/github/commit-activity/m/aristanetworks/anta) [![Github release](https://img.shields.io/github/release/aristanetworks/anta.svg)](https://github.com/aristanetworks/anta/releases/) [![Contributors](https://img.shields.io/github/contributors/aristanetworks/anta)](https://github.com/aristanetworks/anta/graphs/contributors) | | **GitHub** | [![CI](https://github.com/aristanetworks/anta/actions/workflows/code-testing.yml/badge.svg)](https://github.com/aristanetworks/anta/actions/workflows/code-testing.yml) ![Commit](https://img.shields.io/github/last-commit/aristanetworks/anta) ![GitHub commit activity (branch)](https://img.shields.io/github/commit-activity/m/aristanetworks/anta) [![Github release](https://img.shields.io/github/release/aristanetworks/anta.svg)](https://github.com/aristanetworks/anta/releases/) [![Contributors](https://img.shields.io/github/contributors/aristanetworks/anta)](https://github.com/aristanetworks/anta/graphs/contributors) |
| **PyPi** | ![PyPi Version](https://img.shields.io/pypi/v/anta) ![Python Versions](https://img.shields.io/pypi/pyversions/anta) ![Python format](https://img.shields.io/pypi/format/anta) ![PyPI - Downloads](https://img.shields.io/pypi/dm/anta) | | **PyPi** | ![PyPi Version](https://img.shields.io/pypi/v/anta) ![Python Versions](https://img.shields.io/pypi/pyversions/anta) ![Python format](https://img.shields.io/pypi/format/anta) ![PyPI - Downloads](https://img.shields.io/pypi/dm/anta) |
ANTA is Python framework that automates tests for Arista devices. ANTA is Python framework that automates tests for Arista devices.
@ -39,7 +39,9 @@ If you plan to use ANTA only as a CLI tool you can use `pipx` to install it.
[`pipx`](https://pipx.pypa.io/stable/) is a tool to install and run python applications in isolated environments. Refer to `pipx` instructions to install on your system. [`pipx`](https://pipx.pypa.io/stable/) is a tool to install and run python applications in isolated environments. Refer to `pipx` instructions to install on your system.
`pipx` installs ANTA in an isolated python environment and makes it available globally. `pipx` installs ANTA in an isolated python environment and makes it available globally.
<!-- markdownlint-disable no-emphasis-as-heading -->
**This is not recommended if you plan to contribute to ANTA** **This is not recommended if you plan to contribute to ANTA**
<!-- markdownlint-enable no-emphasis-as-heading -->
```bash ```bash
# Install ANTA CLI with pipx # Install ANTA CLI with pipx

View file

@ -11,11 +11,11 @@ ANTA is a Python library that can be used in user applications. This section des
## [AntaDevice](../api/device.md#anta.device.AntaDevice) Abstract Class ## [AntaDevice](../api/device.md#anta.device.AntaDevice) Abstract Class
A device is represented in ANTA as a instance of a subclass of the [AntaDevice](../api/device.md### ::: anta.device.AntaDevice) abstract class. A device is represented in ANTA as a instance of a subclass of the [AntaDevice](../api/device.md#anta.device.AntaDevice) abstract class.
There are few abstract methods that needs to be implemented by child classes: There are few abstract methods that needs to be implemented by child classes:
- The [collect()](../api/device.md#anta.device.AntaDevice.collect) coroutine is in charge of collecting outputs of [AntaCommand](../api/models.md#anta.models.AntaCommand) instances. - The [collect()](../api/device.md#anta.device.AntaDevice.collect) coroutine is in charge of collecting outputs of [AntaCommand](../api/models.md#anta.models.AntaCommand) instances.
- The [refresh()](../api/device.md#anta.device.AntaDevice.refresh) coroutine is in charge of updating attributes of the [AntaDevice](../api/device.md### ::: anta.device.AntaDevice) instance. These attributes are used by [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) to filter out unreachable devices or by [AntaTest](../api/models.md#anta.models.AntaTest) to skip devices based on their hardware models. - The [refresh()](../api/device.md#anta.device.AntaDevice.refresh) coroutine is in charge of updating attributes of the [AntaDevice](../api/device.md#anta.device.AntaDevice) instance. These attributes are used by [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) to filter out unreachable devices or by [AntaTest](../api/models.md#anta.models.AntaTest) to skip devices based on their hardware models.
The [copy()](../api/device.md#anta.device.AntaDevice.copy) coroutine is used to copy files to and from the device. It does not need to be implemented if tests are not using it. The [copy()](../api/device.md#anta.device.AntaDevice.copy) coroutine is used to copy files to and from the device. It does not need to be implemented if tests are not using it.
@ -24,7 +24,7 @@ The [copy()](../api/device.md#anta.device.AntaDevice.copy) coroutine is used to
The [AsyncEOSDevice](../api/device.md#anta.device.AsyncEOSDevice) class is an implementation of [AntaDevice](../api/device.md#anta.device.AntaDevice) for Arista EOS. The [AsyncEOSDevice](../api/device.md#anta.device.AsyncEOSDevice) class is an implementation of [AntaDevice](../api/device.md#anta.device.AntaDevice) for Arista EOS.
It uses the [aio-eapi](https://github.com/jeremyschulman/aio-eapi) eAPI client and the [AsyncSSH](https://github.com/ronf/asyncssh) library. It uses the [aio-eapi](https://github.com/jeremyschulman/aio-eapi) eAPI client and the [AsyncSSH](https://github.com/ronf/asyncssh) library.
- The [collect()](../api/device.md#anta.device.AsyncEOSDevice.collect) coroutine collects [AntaCommand](../api/models.md#anta.models.AntaCommand) outputs using eAPI. - The [_collect()](../api/device.md#anta.device.AsyncEOSDevice._collect) coroutine collects [AntaCommand](../api/models.md#anta.models.AntaCommand) outputs using eAPI.
- The [refresh()](../api/device.md#anta.device.AsyncEOSDevice.refresh) coroutine tries to open a TCP connection on the eAPI port and update the `is_online` attribute accordingly. If the TCP connection succeeds, it sends a `show version` command to gather the hardware model of the device and updates the `established` and `hw_model` attributes. - The [refresh()](../api/device.md#anta.device.AsyncEOSDevice.refresh) coroutine tries to open a TCP connection on the eAPI port and update the `is_online` attribute accordingly. If the TCP connection succeeds, it sends a `show version` command to gather the hardware model of the device and updates the `established` and `hw_model` attributes.
- The [copy()](../api/device.md#anta.device.AsyncEOSDevice.copy) coroutine copies files to and from the device using the SCP protocol. - The [copy()](../api/device.md#anta.device.AsyncEOSDevice.copy) coroutine copies files to and from the device using the SCP protocol.
@ -32,282 +32,26 @@ It uses the [aio-eapi](https://github.com/jeremyschulman/aio-eapi) eAPI client a
The [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) class is a subclass of the standard Python type [dict](https://docs.python.org/3/library/stdtypes.html#dict). The keys of this dictionary are the device names, the values are [AntaDevice](../api/device.md#anta.device.AntaDevice) instances. The [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) class is a subclass of the standard Python type [dict](https://docs.python.org/3/library/stdtypes.html#dict). The keys of this dictionary are the device names, the values are [AntaDevice](../api/device.md#anta.device.AntaDevice) instances.
[AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) provides methods to interact with the ANTA inventory: [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) provides methods to interact with the ANTA inventory:
- The [add_device()](../api/inventory.md#anta.inventory.AntaInventory.add_device) method adds an [AntaDevice](../api/device.md### ::: anta.device.AntaDevice) instance to the inventory. Adding an entry to [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) with a key different from the device name is not allowed. - The [add_device()](../api/inventory.md#anta.inventory.AntaInventory.add_device) method adds an [AntaDevice](../api/device.md#anta.device.AntaDevice) instance to the inventory. Adding an entry to [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) with a key different from the device name is not allowed.
- The [get_inventory()](../api/inventory.md#anta.inventory.AntaInventory.get_inventory) returns a new [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) instance with filtered out devices based on the method inputs. - The [get_inventory()](../api/inventory.md#anta.inventory.AntaInventory.get_inventory) returns a new [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) instance with filtered out devices based on the method inputs.
- The [connect_inventory()](../api/inventory.md#anta.inventory.AntaInventory.connect_inventory) coroutine will execute the [refresh()](../api/device.md#anta.device.AntaDevice.refresh) coroutines of all the devices in the inventory. - The [connect_inventory()](../api/inventory.md#anta.inventory.AntaInventory.connect_inventory) coroutine will execute the [refresh()](../api/device.md#anta.device.AntaDevice.refresh) coroutines of all the devices in the inventory.
- The [parse()](../api/inventory.md#anta.inventory.AntaInventory.parse) static method creates an [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) instance from a YAML file and returns it. The devices are [AsyncEOSDevice](../api/device.md#anta.device.AsyncEOSDevice) instances. - The [parse()](../api/inventory.md#anta.inventory.AntaInventory.parse) static method creates an [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) instance from a YAML file and returns it. The devices are [AsyncEOSDevice](../api/device.md#anta.device.AsyncEOSDevice) instances.
## Examples
To parse a YAML inventory file and print the devices connection status: ### Parse an ANTA inventory file
```python ```python
""" --8<-- "parse_anta_inventory_file.py"
Example
"""
import asyncio
from anta.inventory import AntaInventory
async def main(inv: AntaInventory) -> None:
"""
Take an AntaInventory and:
1. try to connect to every device in the inventory
2. print a message for every device connection status
"""
await inv.connect_inventory()
for device in inv.values():
if device.established:
print(f"Device {device.name} is online")
else:
print(f"Could not connect to device {device.name}")
if __name__ == "__main__":
# Create the AntaInventory instance
inventory = AntaInventory.parse(
filename="inv.yml",
username="arista",
password="@rista123",
)
# Run the main coroutine
res = asyncio.run(main(inventory))
``` ```
??? note "How to create your inventory file" !!! note "How to create your inventory file"
Please visit this [dedicated section](../usage-inventory-catalog.md) for how to use inventory and catalog files. Please visit this [dedicated section](../usage-inventory-catalog.md) for how to use inventory and catalog files.
To run an EOS commands list on the reachable devices from the inventory: ### Run EOS commands
```python
"""
Example
"""
# This is needed to run the script for python < 3.10 for typing annotations
from __future__ import annotations
import asyncio
from pprint import pprint
from anta.inventory import AntaInventory
from anta.models import AntaCommand
async def main(inv: AntaInventory, commands: list[str]) -> dict[str, list[AntaCommand]]:
"""
Take an AntaInventory and a list of commands as string and:
1. try to connect to every device in the inventory
2. collect the results of the commands from each device
Returns:
a dictionary where key is the device name and the value is the list of AntaCommand ran towards the device
"""
await inv.connect_inventory()
# Make a list of coroutine to run commands towards each connected device
coros = []
# dict to keep track of the commands per device
result_dict = {}
for name, device in inv.get_inventory(established_only=True).items():
anta_commands = [AntaCommand(command=command, ofmt="json") for command in commands]
result_dict[name] = anta_commands
coros.append(device.collect_commands(anta_commands))
# Run the coroutines
await asyncio.gather(*coros)
return result_dict
if __name__ == "__main__":
# Create the AntaInventory instance
inventory = AntaInventory.parse(
filename="inv.yml",
username="arista",
password="@rista123",
)
# Create a list of commands with json output
commands = ["show version", "show ip bgp summary"]
# Run the main asyncio entry point
res = asyncio.run(main(inventory, commands))
pprint(res)
```
## Use tests from ANTA
All the test classes inherit from the same abstract Base Class AntaTest. The Class definition indicates which commands are required for the test and the user should focus only on writing the `test` function with optional keywords argument. The instance of the class upon creation instantiates a TestResult object that can be accessed later on to check the status of the test ([unset, skipped, success, failure, error]).
### Test structure
All tests are built on a class named `AntaTest` which provides a complete toolset for a test:
- Object creation
- Test definition
- TestResult definition
- Abstracted method to collect data
This approach means each time you create a test it will be based on this `AntaTest` class. Besides that, you will have to provide some elements:
- `name`: Name of the test
- `description`: A human readable description of your test
- `categories`: a list of categories to sort test.
- `commands`: a list of command to run. This list _must_ be a list of `AntaCommand` which is described in the next part of this document.
Here is an example of a hardware test related to device temperature:
```python ```python
from __future__ import annotations --8<-- "run_eos_commands.py"
import logging
from typing import Any, Dict, List, Optional, cast
from anta.models import AntaTest, AntaCommand
class VerifyTemperature(AntaTest):
"""
Verifies device temparture is currently OK.
"""
# The test name
name = "VerifyTemperature"
# A small description of the test, usually the first line of the class docstring
description = "Verifies device temparture is currently OK"
# The category of the test, usually the module name
categories = ["hardware"]
# The command(s) used for the test. Could be a template instead
commands = [AntaCommand(command="show system environment temperature", ofmt="json")]
# Decorator
@AntaTest.anta_test
# abstract method that must be defined by the child Test class
def test(self) -> None:
"""Run VerifyTemperature validation"""
command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output)
temperature_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
if temperature_status == "temperatureOk":
self.result.is_success()
else:
self.result.is_failure(f"Device temperature is not OK, systemStatus: {temperature_status }")
``` ```
When you run the test, object will automatically call its `anta.models.AntaTest.collect()` method to get device output for each command if no pre-collected data was given to the test. This method does a loop to call `anta.inventory.models.InventoryDevice.collect()` methods which is in charge of managing device connection and how to get data.
??? info "run test offline"
You can also pass eos data directly to your test if you want to validate data collected in a different workflow. An example is provided below just for information:
```python
test = VerifyTemperature(device, eos_data=test_data["eos_data"])
asyncio.run(test.test())
```
The `test` function is always the same and __must__ be defined with the `@AntaTest.anta_test` decorator. This function takes at least one argument which is a `anta.inventory.models.InventoryDevice` object.
In some cases a test would rely on some additional inputs from the user, for instance the number of expected peers or some expected numbers. All parameters __must__ come with a default value and the test function __should__ validate the parameters values (at this stage this is the only place where validation can be done but there are future plans to make this better).
```python
class VerifyTemperature(AntaTest):
...
@AntaTest.anta_test
def test(self) -> None:
pass
class VerifyTransceiversManufacturers(AntaTest):
...
@AntaTest.anta_test
def test(self, manufacturers: Optional[List[str]] = None) -> None:
# validate the manufactures parameter
pass
```
The test itself does not return any value, but the result is directly available from your AntaTest object and exposes a `anta.result_manager.models.TestResult` object with result, name of the test and optional messages:
- `name` (str): Device name where the test has run.
- `test` (str): Test name runs on the device.
- `categories` (List[str]): List of categories the TestResult belongs to, by default the AntaTest categories.
- `description` (str): TestResult description, by default the AntaTest description.
- `results` (str): Result of the test. Can be one of ["unset", "success", "failure", "error", "skipped"].
- `message` (str, optional): Message to report after the test if any.
- `custom_field` (str, optional): Custom field to store a string for flexibility in integrating with ANTA
```python
from anta.tests.hardware import VerifyTemperature
test = VerifyTemperature(device, eos_data=test_data["eos_data"])
asyncio.run(test.test())
assert test.result.result == "success"
```
### Classes for commands
To make it easier to get data, ANTA defines 2 different classes to manage commands to send to devices:
#### [AntaCommand](../api/models.md#anta.models.AntaCommand) Class
Represent a command with following information:
- Command to run
- Output format expected
- eAPI version
- Output of the command
Usage example:
```python
from anta.models import AntaCommand
cmd1 = AntaCommand(command="show zerotouch")
cmd2 = AntaCommand(command="show running-config diffs", ofmt="text")
```
!!! tip "Command revision and version"
* Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use `text` outformat sometimes.
* The model can change across time (adding feature, ... ) and when the model is changed in a non backward-compatible way, the __revision__ number is bumped. The initial model starts with __revision__ 1.
* A __revision__ applies to a particular CLI command whereas a __version__ is global to an eAPI call. The __version__ is internally translated to a specific __revision__ for each CLI command in the RPC call. The currently supported __version__ values are `1` and `latest`.
* A __revision takes precedence over a version__ (e.g. if a command is run with version="latest" and revision=1, the first revision of the model is returned)
* By default, eAPI returns the first revision of each model to ensure that when upgrading, integrations with existing tools are not broken. This is done by using by default `version=1` in eAPI calls.
By default, ANTA uses `version="latest"` in AntaCommand, but when developing tests, the revision MUST be provided when the outformat of the command is `json`. As explained earlier, this is to ensure that the eAPI always returns the same output model and that the test remains always valid from the day it was created. For some commands, you may also want to run them with a different revision or version.
For instance, the `VerifyBFDPeersHealth` test leverages the first revision of `show bfd peers`:
```
# revision 1 as later revision introduce additional nesting for type
commands = [AntaCommand(command="show bfd peers", revision=1)]
```
#### [AntaTemplate](../api/models.md#anta.models.AntaTemplate) Class
Because some command can require more dynamic than just a command with no parameter provided by user, ANTA supports command template: you define a template in your test class and user provide parameters when creating test object.
```python
class RunArbitraryTemplateCommand(AntaTest):
"""
Run an EOS command and return result
Based on AntaTest to build relevant output for pytest
"""
name = "Run aributrary EOS command"
description = "To be used only with anta debug commands"
template = AntaTemplate(template="show interfaces {ifd}")
categories = ["debug"]
@AntaTest.anta_test
def test(self) -> None:
errdisabled_interfaces = [interface for interface, value in response["interfaceStatuses"].items() if value["linkStatus"] == "errdisabled"]
...
params = [{"ifd": "Ethernet2"}, {"ifd": "Ethernet49/1"}]
run_command1 = RunArbitraryTemplateCommand(device_anta, params)
```
In this example, test waits for interfaces to check from user setup and will only check for interfaces in `params`

View file

@ -10,7 +10,7 @@ ANTA is a streamlined Python framework designed for efficient interaction with n
By default, ANTA utilizes [aiocache](https://github.com/aio-libs/aiocache)'s memory cache backend, also called [`SimpleMemoryCache`](https://aiocache.aio-libs.org/en/v0.12.2/caches.html#simplememorycache). This library aims for simplicity and supports asynchronous operations to go along with Python `asyncio` used in ANTA. By default, ANTA utilizes [aiocache](https://github.com/aio-libs/aiocache)'s memory cache backend, also called [`SimpleMemoryCache`](https://aiocache.aio-libs.org/en/v0.12.2/caches.html#simplememorycache). This library aims for simplicity and supports asynchronous operations to go along with Python `asyncio` used in ANTA.
The `_init_cache()` method of the [AntaDevice](../advanced_usages/as-python-lib.md#antadevice-abstract-class) abstract class initializes the cache. Child classes can override this method to tweak the cache configuration: The `_init_cache()` method of the [AntaDevice](../api/device.md#anta.device.AntaDevice) abstract class initializes the cache. Child classes can override this method to tweak the cache configuration:
```python ```python
def _init_cache(self) -> None: def _init_cache(self) -> None:
@ -29,7 +29,7 @@ The cache is initialized per `AntaDevice` and uses the following cache key desig
`<device_name>:<uid>` `<device_name>:<uid>`
The `uid` is an attribute of [AntaCommand](../advanced_usages/as-python-lib.md#antacommand-class), which is a unique identifier generated from the command, version, revision and output format. The `uid` is an attribute of [AntaCommand](../api/models.md#anta.models.AntaCommand), which is a unique identifier generated from the command, version, revision and output format.
Each UID has its own asyncio lock. This design allows coroutines that need to access the cache for different UIDs to do so concurrently. The locks are managed by the `self.cache_locks` dictionary. Each UID has its own asyncio lock. This design allows coroutines that need to access the cache for different UIDs to do so concurrently. The locks are managed by the `self.cache_locks` dictionary.
@ -44,10 +44,13 @@ Caching is enabled by default in ANTA following the previous configuration and m
There might be scenarios where caching is not wanted. You can disable caching in multiple ways in ANTA: There might be scenarios where caching is not wanted. You can disable caching in multiple ways in ANTA:
1. Caching can be disabled globally, for **ALL** commands on **ALL** devices, using the `--disable-cache` global flag when invoking anta at the [CLI](../cli/overview.md#invoking-anta-cli): 1. Caching can be disabled globally, for **ALL** commands on **ALL** devices, using the `--disable-cache` global flag when invoking anta at the [CLI](../cli/overview.md#invoking-anta-cli):
```bash ```bash
anta --disable-cache --username arista --password arista nrfu table anta --disable-cache --username arista --password arista nrfu table
``` ```
2. Caching can be disabled per device, network or range by setting the `disable_cache` key to `True` when defining the ANTA [Inventory](../usage-inventory-catalog.md#create-an-inventory-file) file:
2. Caching can be disabled per device, network or range by setting the `disable_cache` key to `True` when defining the ANTA [Inventory](../usage-inventory-catalog.md#device-inventory) file:
```yaml ```yaml
anta_inventory: anta_inventory:
hosts: hosts:
@ -69,9 +72,10 @@ There might be scenarios where caching is not wanted. You can disable caching in
end: 172.22.22.19 end: 172.22.22.19
disable_cache: True disable_cache: True
``` ```
This approach effectively disables caching for **ALL** commands sent to devices targeted by the `disable_cache` key. This approach effectively disables caching for **ALL** commands sent to devices targeted by the `disable_cache` key.
3. For tests developers, caching can be disabled for a specific [`AntaCommand`](../advanced_usages/as-python-lib.md#antacommand-class) or [`AntaTemplate`](../advanced_usages/as-python-lib.md#antatemplate-class) by setting the `use_cache` attribute to `False`. That means the command output will always be collected on the device and therefore, never use caching. 3. For tests developers, caching can be disabled for a specific [`AntaCommand`](../api/models.md#anta.models.AntaCommand) or [`AntaTemplate`](../api/models.md#anta.models.AntaTemplate) by setting the `use_cache` attribute to `False`. That means the command output will always be collected on the device and therefore, never use caching.
### Disable caching in a child class of `AntaDevice` ### Disable caching in a child class of `AntaDevice`

View file

@ -4,7 +4,7 @@
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
!!! info "" !!! info
This documentation applies for both creating tests in ANTA or creating your own test package. This documentation applies for both creating tests in ANTA or creating your own test package.
ANTA is not only a Python library with a CLI and a collection of built-in tests, it is also a framework you can extend by building your own tests. ANTA is not only a Python library with a CLI and a collection of built-in tests, it is also a framework you can extend by building your own tests.
@ -64,16 +64,13 @@ Full AntaTest API documentation is available in the [API documentation section](
- `name` (`str`): Name of the test. Used during reporting. - `name` (`str`): Name of the test. Used during reporting.
- `description` (`str`): A human readable description of your test. - `description` (`str`): A human readable description of your test.
- `categories` (`list[str]`): A list of categories in which the test belongs. - `categories` (`list[str]`): A list of categories in which the test belongs.
- `commands` (`[list[AntaCommand | AntaTemplate]]`): A list of command to collect from devices. This list __must__ be a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) or [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances. Rendering [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances will be discussed later. - `commands` (`[list[AntaCommand | AntaTemplate]]`): A list of command to collect from devices. This list **must** be a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) or [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances. Rendering [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances will be discussed later.
!!! info !!! info
All these class attributes are mandatory. If any attribute is missing, a `NotImplementedError` exception will be raised during class instantiation. All these class attributes are mandatory. If any attribute is missing, a `NotImplementedError` exception will be raised during class instantiation.
### Instance Attributes ### Instance Attributes
!!! info
You can access an instance attribute in your code using the `self` reference. E.g. you can access the test input values using `self.inputs`.
::: anta.models.AntaTest ::: anta.models.AntaTest
options: options:
show_docstring_attributes: true show_docstring_attributes: true
@ -87,7 +84,6 @@ Full AntaTest API documentation is available in the [API documentation section](
show_root_toc_entry: false show_root_toc_entry: false
heading_level: 10 heading_level: 10
!!! note "Logger object" !!! note "Logger object"
ANTA already provides comprehensive logging at every steps of a test execution. The [AntaTest](../api/models.md#anta.models.AntaTest) class also provides a `logger` attribute that is a Python logger specific to the test instance. See [Python documentation](https://docs.python.org/3/library/logging.html) for more information. ANTA already provides comprehensive logging at every steps of a test execution. The [AntaTest](../api/models.md#anta.models.AntaTest) class also provides a `logger` attribute that is a Python logger specific to the test instance. See [Python documentation](https://docs.python.org/3/library/logging.html) for more information.
@ -140,8 +136,8 @@ Full `ResultOverwrite` model documentation is available in [API documentation se
### Methods ### Methods
- [test(self) -> None](../api/models.md#anta.models.AntaTest.test): This is an abstract method that __must__ be implemented. It contains the test logic that can access the collected command outputs using the `instance_commands` instance attribute, access the test inputs using the `inputs` instance attribute and __must__ set the `result` instance attribute accordingly. It must be implemented using the `AntaTest.anta_test` decorator that provides logging and will collect commands before executing the `test()` method. - [test(self) -> None](../api/models.md#anta.models.AntaTest.test): This is an abstract method that **must** be implemented. It contains the test logic that can access the collected command outputs using the `instance_commands` instance attribute, access the test inputs using the `inputs` instance attribute and **must** set the `result` instance attribute accordingly. It must be implemented using the `AntaTest.anta_test` decorator that provides logging and will collect commands before executing the `test()` method.
- [render(self, template: AntaTemplate) -> list[AntaCommand]](../api/models.md#anta.models.AntaTest.render): This method only needs to be implemented if [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances are present in the `commands` class attribute. It will be called for every [AntaTemplate](../api/models.md#anta.models.AntaTemplate) occurrence and __must__ return a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) using the [AntaTemplate.render()](../api/models.md#anta.models.AntaTemplate.render) method. It can access test inputs using the `inputs` instance attribute. - [render(self, template: AntaTemplate) -> list[AntaCommand]](../api/models.md#anta.models.AntaTest.render): This method only needs to be implemented if [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances are present in the `commands` class attribute. It will be called for every [AntaTemplate](../api/models.md#anta.models.AntaTemplate) occurrence and **must** return a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) using the [AntaTemplate.render()](../api/models.md#anta.models.AntaTemplate.render) method. It can access test inputs using the `inputs` instance attribute.
## Test execution ## Test execution
@ -199,6 +195,22 @@ class <YourTestName>(AntaTest):
] ]
``` ```
!!! tip "Command revision and version"
* Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use `text` outformat sometimes.
* The model can change across time (adding feature, ... ) and when the model is changed in a non backward-compatible way, the **revision** number is bumped. The initial model starts with **revision** 1.
* A **revision** applies to a particular CLI command whereas a **version** is global to an eAPI call. The **version** is internally translated to a specific **revision** for each CLI command in the RPC call. The currently supported **version** values are `1` and `latest`.
* A **revision takes precedence over a version** (e.g. if a command is run with version="latest" and revision=1, the first revision of the model is returned)
* By default, eAPI returns the first revision of each model to ensure that when upgrading, integrations with existing tools are not broken. This is done by using by default `version=1` in eAPI calls.
By default, ANTA uses `version="latest"` in AntaCommand, but when developing tests, the revision MUST be provided when the outformat of the command is `json`. As explained earlier, this is to ensure that the eAPI always returns the same output model and that the test remains always valid from the day it was created. For some commands, you may also want to run them with a different revision or version.
For instance, the `VerifyBFDPeersHealth` test leverages the first revision of `show bfd peers`:
```
# revision 1 as later revision introduce additional nesting for type
commands = [AntaCommand(command="show bfd peers", revision=1)]
```
### Inputs definition ### Inputs definition
If the user needs to provide inputs for your test, you need to define a [pydantic model](https://docs.pydantic.dev/latest/usage/models/) that defines the schema of the test inputs: If the user needs to provide inputs for your test, you need to define a [pydantic model](https://docs.pydantic.dev/latest/usage/models/) that defines the schema of the test inputs:
@ -261,6 +273,7 @@ class <YourTestName>(AntaTest):
``` ```
The logic usually includes the following different stages: The logic usually includes the following different stages:
1. Parse the command outputs using the `self.instance_commands` instance attribute. 1. Parse the command outputs using the `self.instance_commands` instance attribute.
2. If needed, access the test inputs using the `self.inputs` instance attribute and write your conditional logic. 2. If needed, access the test inputs using the `self.inputs` instance attribute and write your conditional logic.
3. Set the `result` instance attribute to reflect the test result by either calling `self.result.is_success()` or `self.result.is_failure("<FAILURE REASON>")`. Sometimes, setting the test result to `skipped` using `self.result.is_skipped("<SKIPPED REASON>")` can make sense (e.g. testing the OSPF neighbor states but no neighbor was found). However, you should not need to catch any exception and set the test result to `error` since the error handling is done by the framework, see below. 3. Set the `result` instance attribute to reflect the test result by either calling `self.result.is_success()` or `self.result.is_failure("<FAILURE REASON>")`. Sometimes, setting the test result to `skipped` using `self.result.is_skipped("<SKIPPED REASON>")` can make sense (e.g. testing the OSPF neighbor states but no neighbor was found). However, you should not need to catch any exception and set the test result to `error` since the error handling is done by the framework, see below.
@ -341,6 +354,7 @@ anta_custom.dc_project:
- VerifyFeatureX: - VerifyFeatureX:
minimum: 1 minimum: 1
``` ```
And now you can run your NRFU tests with the CLI: And now you can run your NRFU tests with the CLI:
```bash ```bash

View file

@ -5,6 +5,7 @@
--> -->
### ::: anta.catalog.AntaCatalog ### ::: anta.catalog.AntaCatalog
options: options:
filters: ["!^_[^_]", "!__str__"] filters: ["!^_[^_]", "!__str__"]

13
docs/api/csv_reporter.md Normal file
View file

@ -0,0 +1,13 @@
---
anta_title: CSV Reporter
---
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
::: anta.reporter.csv_reporter
options:
show_root_heading: false
show_root_toc_entry: false

View file

@ -6,20 +6,20 @@
# AntaDevice base class # AntaDevice base class
## UML representation ![AntaDevice UML model](../imgs/uml/anta.device.AntaDevice.jpeg)
![](../imgs/uml/anta.device.AntaDevice.jpeg) ## ::: anta.device.AntaDevice
### ::: anta.device.AntaDevice
options: options:
filters: ["!^_[^_]", "!__(eq|rich_repr)__"] filters: ["!^_[^_]", "!__(eq|rich_repr)__", "_collect"]
# Async EOS device class # Async EOS device class
## UML representation ![AsyncEOSDevice UML model](../imgs/uml/anta.device.AsyncEOSDevice.jpeg)
![](../imgs/uml/anta.device.AsyncEOSDevice.jpeg) <!-- _collect must be last to be kept -->
## ::: anta.device.AsyncEOSDevice
### ::: anta.device.AsyncEOSDevice
options: options:
filters: ["!^_[^_]", "!__(eq|rich_repr)__"] filters: ["!^_[^_]", "!__(eq|rich_repr)__", "_collect"]

View file

@ -5,6 +5,7 @@
--> -->
### ::: anta.inventory.AntaInventory ### ::: anta.inventory.AntaInventory
options: options:
filters: ["!^_[^_]", "!__str__"] filters: ["!^_[^_]", "!__str__"]

13
docs/api/md_reporter.md Normal file
View file

@ -0,0 +1,13 @@
---
anta_title: Markdown Reporter
---
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
::: anta.reporter.md_reporter
options:
show_root_heading: false
show_root_toc_entry: false

View file

@ -6,20 +6,18 @@
# Test definition # Test definition
## UML Diagram ![AntaTest UML model](../imgs/uml/anta.models.AntaTest.jpeg)
![](../imgs/uml/anta.models.AntaTest.jpeg) ## ::: anta.models.AntaTest
### ::: anta.models.AntaTest
options: options:
filters: ["!^_[^_]", "!__init_subclass__", "!update_progress"] filters: ["!^_[^_]", "!__init_subclass__", "!update_progress"]
# Command definition # Command definition
## UML Diagram ![AntaCommand UML model](../imgs/uml/anta.models.AntaCommand.jpeg)
![](../imgs/uml/anta.models.AntaCommand.jpeg) ## ::: anta.models.AntaCommand
### ::: anta.models.AntaCommand
!!! warning !!! warning
CLI commands are protected to avoid execution of critical commands such as `reload` or `write erase`. CLI commands are protected to avoid execution of critical commands such as `reload` or `write erase`.
@ -30,8 +28,6 @@
# Template definition # Template definition
## UML Diagram ![AntaTemplate UML model](../imgs/uml/anta.models.AntaTemplate.jpeg)
![](../imgs/uml/anta.models.AntaTemplate.jpeg) ## ::: anta.models.AntaTemplate
### ::: anta.models.AntaTemplate

View file

@ -4,4 +4,7 @@
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
### ::: anta.reporter.ReportTable ::: anta.reporter
options:
show_root_heading: false
show_root_toc_entry: false

View file

@ -6,10 +6,9 @@
# Result Manager definition # Result Manager definition
## UML Diagram ![ResultManager UML model](../imgs/uml/anta.result_manager.ResultManager.jpeg)
![](../imgs/uml/anta.result_manager.ResultManager.jpeg) ## ::: anta.result_manager.ResultManager
### ::: anta.result_manager.ResultManager
options: options:
filters: ["!^_[^_]", "!^__len__"] filters: ["!^_[^_]", "!^__len__"]

View file

@ -6,10 +6,9 @@
# Test Result model # Test Result model
## UML Diagram ![TestResult UML model](../imgs/uml/anta.result_manager.models.TestResult.jpeg)
![](../imgs/uml/anta.result_manager.models.TestResult.jpeg) ## ::: anta.result_manager.models.TestResult
### ::: anta.result_manager.models.TestResult
options: options:
filters: ["!^_[^_]", "!__str__"] filters: ["!^_[^_]", "!__str__"]

View file

@ -5,5 +5,6 @@
--> -->
### ::: anta.runner ### ::: anta.runner
options: options:
filters: ["!^_[^_]", "!__str__"] filters: ["!^_[^_]", "!__str__"]

View file

@ -0,0 +1,20 @@
---
anta_title: ANTA catalog for flow tracking tests
---
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
::: anta.tests.flow_tracking
options:
show_root_heading: false
show_root_toc_entry: false
show_bases: false
merge_init_into_class: false
anta_hide_test_module_description: true
show_labels: true
filters:
- "!test"
- "!render"

View file

@ -1,11 +1,12 @@
---
anta_title: ANTA Tests Landing Page
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# ANTA Tests Landing Page
This section describes all the available tests provided by the ANTA package. This section describes all the available tests provided by the ANTA package.
## Available Tests ## Available Tests
@ -17,7 +18,8 @@ Here are the tests that we currently provide:
- [BFD](tests.bfd.md) - [BFD](tests.bfd.md)
- [Configuration](tests.configuration.md) - [Configuration](tests.configuration.md)
- [Connectivity](tests.connectivity.md) - [Connectivity](tests.connectivity.md)
- [Field Notice](tests.field_notices.md) - [Field Notices](tests.field_notices.md)
- [Flow Tracking](tests.flow_tracking.md)
- [GreenT](tests.greent.md) - [GreenT](tests.greent.md)
- [Hardware](tests.hardware.md) - [Hardware](tests.hardware.md)
- [Interfaces](tests.interfaces.md) - [Interfaces](tests.interfaces.md)
@ -30,6 +32,7 @@ Here are the tests that we currently provide:
- [Router Path Selection](tests.path_selection.md) - [Router Path Selection](tests.path_selection.md)
- [Routing Generic](tests.routing.generic.md) - [Routing Generic](tests.routing.generic.md)
- [Routing BGP](tests.routing.bgp.md) - [Routing BGP](tests.routing.bgp.md)
- [Routing ISIS](tests.routing.isis.md)
- [Routing OSPF](tests.routing.ospf.md) - [Routing OSPF](tests.routing.ospf.md)
- [Security](tests.security.md) - [Security](tests.security.md)
- [Services](tests.services.md) - [Services](tests.services.md)

View file

@ -18,3 +18,4 @@ anta_title: ANTA catalog for BGP tests
filters: filters:
- "!test" - "!test"
- "!render" - "!render"
- "!^_[^_]"

View file

@ -18,3 +18,4 @@ anta_title: ANTA catalog for IS-IS tests
filters: filters:
- "!test" - "!test"
- "!render" - "!render"
- "!^_[^_]"

View file

@ -18,3 +18,4 @@ anta_title: ANTA catalog for OSPF tests
filters: filters:
- "!test" - "!test"
- "!render" - "!render"
- "!^_[^_]"

View file

@ -5,6 +5,7 @@
--> -->
### ::: anta.custom_types ### ::: anta.custom_types
options: options:
show_if_no_docstring: true show_if_no_docstring: true
show_root_full_path: true show_root_full_path: true

View file

@ -1,11 +1,12 @@
---
anta_title: ANTA check commands
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# ANTA check commands
The ANTA check command allow to execute some checks on the ANTA input files. The ANTA check command allow to execute some checks on the ANTA input files.
Only checking the catalog is currently supported. Only checking the catalog is currently supported.
@ -27,10 +28,12 @@ Commands:
```bash ```bash
Usage: anta check catalog [OPTIONS] Usage: anta check catalog [OPTIONS]
Check that the catalog is valid Check that the catalog is valid.
Options: Options:
-c, --catalog FILE Path to the test catalog YAML file [env var: -c, --catalog FILE Path to the test catalog file [env var:
ANTA_CATALOG; required] ANTA_CATALOG; required]
--catalog-format [yaml|json] Format of the catalog file, either 'yaml' or
'json' [env var: ANTA_CATALOG_FORMAT]
--help Show this message and exit. --help Show this message and exit.
``` ```

View file

@ -1,11 +1,12 @@
---
anta_title: ANTA debug commands
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# ANTA debug commands
The ANTA CLI includes a set of debugging tools, making it easier to build and test ANTA content. This functionality is accessed via the `debug` subcommand and offers the following options: The ANTA CLI includes a set of debugging tools, making it easier to build and test ANTA content. This functionality is accessed via the `debug` subcommand and offers the following options:
- Executing a command on a device from your inventory and retrieving the result. - Executing a command on a device from your inventory and retrieving the result.
@ -14,7 +15,7 @@ The ANTA CLI includes a set of debugging tools, making it easier to build and te
These tools are especially helpful in building the tests, as they give a visual access to the output received from the eAPI. They also facilitate the extraction of output content for use in unit tests, as described in our [contribution guide](../contribution.md). These tools are especially helpful in building the tests, as they give a visual access to the output received from the eAPI. They also facilitate the extraction of output content for use in unit tests, as described in our [contribution guide](../contribution.md).
!!! warning !!! warning
The `debug` tools require a device from your inventory. Thus, you MUST use a valid [ANTA Inventory](../usage-inventory-catalog.md#create-an-inventory-file). The `debug` tools require a device from your inventory. Thus, you must use a valid [ANTA Inventory](../usage-inventory-catalog.md#device-inventory).
## Executing an EOS command ## Executing an EOS command
@ -52,8 +53,6 @@ Options:
ANTA_DISABLE_CACHE] ANTA_DISABLE_CACHE]
-i, --inventory FILE Path to the inventory YAML file. [env var: -i, --inventory FILE Path to the inventory YAML file. [env var:
ANTA_INVENTORY; required] ANTA_INVENTORY; required]
--tags TEXT List of tags using comma as separator:
tag1,tag2,tag3. [env var: ANTA_TAGS]
--ofmt [json|text] EOS eAPI format to use. can be text or json --ofmt [json|text] EOS eAPI format to use. can be text or json
-v, --version [1|latest] EOS eAPI version -v, --version [1|latest] EOS eAPI version
-r, --revision INTEGER eAPI command revision -r, --revision INTEGER eAPI command revision
@ -97,8 +96,9 @@ Usage: anta debug run-template [OPTIONS] PARAMS...
Takes a list of arguments (keys followed by a value) to build a dictionary Takes a list of arguments (keys followed by a value) to build a dictionary
used as template parameters. used as template parameters.
Example: ------- anta debug run-template -d leaf1a -t 'show vlan {vlan_id}' Example
vlan_id 1 -------
anta debug run-template -d leaf1a -t 'show vlan {vlan_id}' vlan_id 1
Options: Options:
-u, --username TEXT Username to connect to EOS [env var: -u, --username TEXT Username to connect to EOS [env var:
@ -125,8 +125,6 @@ Options:
ANTA_DISABLE_CACHE] ANTA_DISABLE_CACHE]
-i, --inventory FILE Path to the inventory YAML file. [env var: -i, --inventory FILE Path to the inventory YAML file. [env var:
ANTA_INVENTORY; required] ANTA_INVENTORY; required]
--tags TEXT List of tags using comma as separator:
tag1,tag2,tag3. [env var: ANTA_TAGS]
--ofmt [json|text] EOS eAPI format to use. can be text or json --ofmt [json|text] EOS eAPI format to use. can be text or json
-v, --version [1|latest] EOS eAPI version -v, --version [1|latest] EOS eAPI version
-r, --revision INTEGER eAPI command revision -r, --revision INTEGER eAPI command revision
@ -161,11 +159,12 @@ Run templated command 'show vlan {vlan_id}' with {'vlan_id': '10'} on DC1-LEAF1A
'sourceDetail': '' 'sourceDetail': ''
} }
``` ```
!!! warning
If multiple arguments of the same key are provided, only the last argument value will be kept in the template parameters.
### Example of multiple arguments ### Example of multiple arguments
!!! warning
If multiple arguments of the same key are provided, only the last argument value will be kept in the template parameters.
```bash ```bash
anta -log DEBUG debug run-template --template "ping {dst} source {src}" dst "8.8.8.8" src Loopback0 --device DC1-SPINE1     anta -log DEBUG debug run-template --template "ping {dst} source {src}" dst "8.8.8.8" src Loopback0 --device DC1-SPINE1    
> {'dst': '8.8.8.8', 'src': 'Loopback0'} > {'dst': '8.8.8.8', 'src': 'Loopback0'}

View file

@ -1,14 +1,16 @@
---
anta_title: Executing Commands on Devices
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# Executing Commands on Devices
ANTA CLI provides a set of entrypoints to facilitate remote command execution on EOS devices. ANTA CLI provides a set of entrypoints to facilitate remote command execution on EOS devices.
### EXEC Command overview ## EXEC command overview
```bash ```bash
anta exec --help anta exec --help
Usage: anta exec [OPTIONS] COMMAND [ARGS]... Usage: anta exec [OPTIONS] COMMAND [ARGS]...
@ -133,6 +135,7 @@ json_format:
text_format: text_format:
- show bfd peers - show bfd peers
``` ```
### Example ### Example
```bash ```bash

View file

@ -1,14 +1,61 @@
---
anta_title: Retrieving Inventory Information
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# Retrieving Inventory Information The ANTA CLI offers multiple commands to access data from your local inventory.
The ANTA CLI offers multiple entrypoints to access data from your local inventory. ## List devices in inventory
## Inventory used of examples This command will list all devices available in the inventory. Using the `--tags` option, you can filter this list to only include devices with specific tags (visit [this page](tag-management.md) to learn more about tags). The `--connected` option allows to display only the devices where a connection has been established.
### Command overview
```bash
Usage: anta get inventory [OPTIONS]
Show inventory loaded in ANTA.
Options:
-u, --username TEXT Username to connect to EOS [env var:
ANTA_USERNAME; required]
-p, --password TEXT Password to connect to EOS that must be
provided. It can be prompted using '--prompt'
option. [env var: ANTA_PASSWORD]
--enable-password TEXT Password to access EOS Privileged EXEC mode.
It can be prompted using '--prompt' option.
Requires '--enable' option. [env var:
ANTA_ENABLE_PASSWORD]
--enable Some commands may require EOS Privileged EXEC
mode. This option tries to access this mode
before sending a command to the device. [env
var: ANTA_ENABLE]
-P, --prompt Prompt for passwords if they are not
provided. [env var: ANTA_PROMPT]
--timeout FLOAT Global API timeout. This value will be used
for all devices. [env var: ANTA_TIMEOUT;
default: 30.0]
--insecure Disable SSH Host Key validation. [env var:
ANTA_INSECURE]
--disable-cache Disable cache globally. [env var:
ANTA_DISABLE_CACHE]
-i, --inventory FILE Path to the inventory YAML file. [env var:
ANTA_INVENTORY; required]
--tags TEXT List of tags using comma as separator:
tag1,tag2,tag3. [env var: ANTA_TAGS]
--connected / --not-connected Display inventory after connection has been
created
--help Show this message and exit.
```
!!! tip
By default, `anta get inventory` only provides information that doesn't rely on a device connection. If you are interested in obtaining connection-dependent details, like the hardware model, use the `--connected` option.
### Example
Let's consider the following inventory: Let's consider the following inventory:
@ -65,123 +112,15 @@ anta_inventory:
tags: ["BL", "DC2"] tags: ["BL", "DC2"]
``` ```
## Obtaining all configured tags
As most of ANTA's commands accommodate tag filtering, this particular command is useful for enumerating all tags configured in the inventory. Running the `anta get tags` command will return a list of all tags that have been configured in the inventory.
### Command overview
```bash
Usage: anta get tags [OPTIONS]
Get list of configured tags in user inventory.
Options:
-u, --username TEXT Username to connect to EOS [env var: ANTA_USERNAME;
required]
-p, --password TEXT Password to connect to EOS that must be provided. It
can be prompted using '--prompt' option. [env var:
ANTA_PASSWORD]
--enable-password TEXT Password to access EOS Privileged EXEC mode. It can
be prompted using '--prompt' option. Requires '--
enable' option. [env var: ANTA_ENABLE_PASSWORD]
--enable Some commands may require EOS Privileged EXEC mode.
This option tries to access this mode before sending
a command to the device. [env var: ANTA_ENABLE]
-P, --prompt Prompt for passwords if they are not provided. [env
var: ANTA_PROMPT]
--timeout FLOAT Global API timeout. This value will be used for all
devices. [env var: ANTA_TIMEOUT; default: 30.0]
--insecure Disable SSH Host Key validation. [env var:
ANTA_INSECURE]
--disable-cache Disable cache globally. [env var:
ANTA_DISABLE_CACHE]
-i, --inventory FILE Path to the inventory YAML file. [env var:
ANTA_INVENTORY; required]
--tags TEXT List of tags using comma as separator:
tag1,tag2,tag3. [env var: ANTA_TAGS]
--help Show this message and exit.
```
### Example
To get the list of all configured tags in the inventory, run the following command:
```bash
anta get tags
Tags found:
[
"BL",
"DC1",
"DC2",
"LEAF",
"SPINE"
]
* note that tag all has been added by anta
```
!!! note
Even if you haven't explicitly configured the `all` tag in the inventory, it is automatically added. This default tag allows to execute commands on all devices in the inventory when no tag is specified.
## List devices in inventory
This command will list all devices available in the inventory. Using the `--tags` option, you can filter this list to only include devices with specific tags. The `--connected` option allows to display only the devices where a connection has been established.
### Command overview
```bash
Usage: anta get inventory [OPTIONS]
Show inventory loaded in ANTA.
Options:
-u, --username TEXT Username to connect to EOS [env var:
ANTA_USERNAME; required]
-p, --password TEXT Password to connect to EOS that must be
provided. It can be prompted using '--prompt'
option. [env var: ANTA_PASSWORD]
--enable-password TEXT Password to access EOS Privileged EXEC mode.
It can be prompted using '--prompt' option.
Requires '--enable' option. [env var:
ANTA_ENABLE_PASSWORD]
--enable Some commands may require EOS Privileged EXEC
mode. This option tries to access this mode
before sending a command to the device. [env
var: ANTA_ENABLE]
-P, --prompt Prompt for passwords if they are not
provided. [env var: ANTA_PROMPT]
--timeout FLOAT Global API timeout. This value will be used
for all devices. [env var: ANTA_TIMEOUT;
default: 30.0]
--insecure Disable SSH Host Key validation. [env var:
ANTA_INSECURE]
--disable-cache Disable cache globally. [env var:
ANTA_DISABLE_CACHE]
-i, --inventory FILE Path to the inventory YAML file. [env var:
ANTA_INVENTORY; required]
--tags TEXT List of tags using comma as separator:
tag1,tag2,tag3. [env var: ANTA_TAGS]
--connected / --not-connected Display inventory after connection has been
created
--help Show this message and exit.
```
!!! tip
In its default mode, `anta get inventory` provides only information that doesn't rely on a device connection. If you are interested in obtaining connection-dependent details, like the hardware model, please use the `--connected` option.
### Example
To retrieve a comprehensive list of all devices along with their details, execute the following command. It will provide all the data loaded into the ANTA inventory from your [inventory file](../usage-inventory-catalog.md). To retrieve a comprehensive list of all devices along with their details, execute the following command. It will provide all the data loaded into the ANTA inventory from your [inventory file](../usage-inventory-catalog.md).
```bash ```bash
anta get inventory --tags SPINE $ anta get inventory --tags SPINE
Current inventory content is: Current inventory content is:
{ {
'DC1-SPINE1': AsyncEOSDevice( 'DC1-SPINE1': AsyncEOSDevice(
name='DC1-SPINE1', name='DC1-SPINE1',
tags=['SPINE', 'DC1'], tags={'DC1-SPINE1', 'DC1', 'SPINE'},
hw_model=None, hw_model=None,
is_online=False, is_online=False,
established=False, established=False,
@ -189,13 +128,12 @@ Current inventory content is:
host='172.20.20.101', host='172.20.20.101',
eapi_port=443, eapi_port=443,
username='arista', username='arista',
enable=True, enable=False,
enable_password='arista',
insecure=False insecure=False
), ),
'DC1-SPINE2': AsyncEOSDevice( 'DC1-SPINE2': AsyncEOSDevice(
name='DC1-SPINE2', name='DC1-SPINE2',
tags=['SPINE', 'DC1'], tags={'DC1', 'SPINE', 'DC1-SPINE2'},
hw_model=None, hw_model=None,
is_online=False, is_online=False,
established=False, established=False,
@ -203,12 +141,12 @@ Current inventory content is:
host='172.20.20.102', host='172.20.20.102',
eapi_port=443, eapi_port=443,
username='arista', username='arista',
enable=True, enable=False,
insecure=False insecure=False
), ),
'DC2-SPINE1': AsyncEOSDevice( 'DC2-SPINE1': AsyncEOSDevice(
name='DC2-SPINE1', name='DC2-SPINE1',
tags=['SPINE', 'DC2'], tags={'DC2', 'DC2-SPINE1', 'SPINE'},
hw_model=None, hw_model=None,
is_online=False, is_online=False,
established=False, established=False,
@ -216,12 +154,12 @@ Current inventory content is:
host='172.20.20.201', host='172.20.20.201',
eapi_port=443, eapi_port=443,
username='arista', username='arista',
enable=True, enable=False,
insecure=False insecure=False
), ),
'DC2-SPINE2': AsyncEOSDevice( 'DC2-SPINE2': AsyncEOSDevice(
name='DC2-SPINE2', name='DC2-SPINE2',
tags=['SPINE', 'DC2'], tags={'DC2', 'DC2-SPINE2', 'SPINE'},
hw_model=None, hw_model=None,
is_online=False, is_online=False,
established=False, established=False,
@ -229,7 +167,7 @@ Current inventory content is:
host='172.20.20.202', host='172.20.20.202',
eapi_port=443, eapi_port=443,
username='arista', username='arista',
enable=True, enable=False,
insecure=False insecure=False
) )
} }

View file

@ -1,14 +1,15 @@
---
anta_title: Create an Inventory from Ansible inventory
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# Create an Inventory from Ansible inventory
In large setups, it might be beneficial to construct your inventory based on your Ansible inventory. The `from-ansible` entrypoint of the `get` command enables the user to create an ANTA inventory from Ansible. In large setups, it might be beneficial to construct your inventory based on your Ansible inventory. The `from-ansible` entrypoint of the `get` command enables the user to create an ANTA inventory from Ansible.
### Command overview ## Command overview
```bash ```bash
$ anta get from-ansible --help $ anta get from-ansible --help
@ -35,7 +36,6 @@ Options:
`anta get from-ansible` does not support inline vaulted variables, comment them out to generate your inventory. `anta get from-ansible` does not support inline vaulted variables, comment them out to generate your inventory.
If the vaulted variable is necessary to build the inventory (e.g. `ansible_host`), it needs to be unvaulted for `from-ansible` command to work." If the vaulted variable is necessary to build the inventory (e.g. `ansible_host`), it needs to be unvaulted for `from-ansible` command to work."
The output is an inventory where the name of the container is added as a tag for each host: The output is an inventory where the name of the container is added as a tag for each host:
```yaml ```yaml
@ -54,8 +54,7 @@ anta_inventory:
By default, if user does not provide `--output` file, anta will save output to configured anta inventory (`anta --inventory`). If the output file has content, anta will ask user to overwrite when running in interactive console. This mechanism can be controlled by triggers in case of CI usage: `--overwrite` to force anta to overwrite file. If not set, anta will exit By default, if user does not provide `--output` file, anta will save output to configured anta inventory (`anta --inventory`). If the output file has content, anta will ask user to overwrite when running in interactive console. This mechanism can be controlled by triggers in case of CI usage: `--overwrite` to force anta to overwrite file. If not set, anta will exit
## Command output
### Command output
`host` value is coming from the `ansible_host` key in your inventory while `name` is the name you defined for your host. Below is an ansible inventory example used to generate previous inventory: `host` value is coming from the `ansible_host` key in your inventory while `name` is the name you defined for your host. Below is an ansible inventory example used to generate previous inventory:

View file

@ -1,17 +1,18 @@
---
anta_title: Create an Inventory from CloudVision
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# Create an Inventory from CloudVision
In large setups, it might be beneficial to construct your inventory based on CloudVision. The `from-cvp` entrypoint of the `get` command enables the user to create an ANTA inventory from CloudVision. In large setups, it might be beneficial to construct your inventory based on CloudVision. The `from-cvp` entrypoint of the `get` command enables the user to create an ANTA inventory from CloudVision.
!!! info !!! info
The current implementation only works with on-premises CloudVision instances, not with CloudVision as a Service (CVaaS). The current implementation only works with on-premises CloudVision instances, not with CloudVision as a Service (CVaaS).
### Command overview ## Command overview
```bash ```bash
Usage: anta get from-cvp [OPTIONS] Usage: anta get from-cvp [OPTIONS]
@ -54,7 +55,7 @@ anta_inventory:
!!! warning !!! warning
The current implementation only considers devices directly attached to a specific container when using the `--cvp-container` option. The current implementation only considers devices directly attached to a specific container when using the `--cvp-container` option.
### Creating an inventory from multiple containers ## Creating an inventory from multiple containers
If you need to create an inventory from multiple containers, you can use a bash command and then manually concatenate files to create a single inventory file: If you need to create an inventory from multiple containers, you can use a bash command and then manually concatenate files to create a single inventory file:

View file

@ -1,17 +1,20 @@
---
anta_title: Execute Network Readiness For Use (NRFU) Testing
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# Execute Network Readiness For Use (NRFU) Testing
ANTA provides a set of commands for performing NRFU tests on devices. These commands are under the `anta nrfu` namespace and offer multiple output format options: ANTA provides a set of commands for performing NRFU tests on devices. These commands are under the `anta nrfu` namespace and offer multiple output format options:
- [Text view](#performing-nrfu-with-text-rendering) - [Text report](#performing-nrfu-with-text-rendering)
- [Table view](#performing-nrfu-with-table-rendering) - [Table report](#performing-nrfu-with-table-rendering)
- [JSON view](#performing-nrfu-with-json-rendering) - [JSON report](#performing-nrfu-with-json-rendering)
- [Custom template view](#performing-nrfu-with-custom-reports) - [Custom template report](#performing-nrfu-with-custom-reports)
- [CSV report](#performing-nrfu-and-saving-results-in-a-csv-file)
- [Markdown report](#performing-nrfu-and-saving-results-in-a-markdown-file)
## NRFU Command overview ## NRFU Command overview
@ -28,16 +31,7 @@ All commands under the `anta nrfu` namespace require a catalog yaml file specifi
### Tag management ### Tag management
The `--tags` option can be used to target specific devices in your inventory and run only tests configured with this specific tags from your catalog. The default tag is set to `all` and is implicit. Expected behaviour is provided below: The `--tags` option can be used to target specific devices in your inventory and run only tests configured with this specific tags from your catalog. Refer to the [dedicated page](tag-management.md) for more information.
| Command | Description |
| ------- | ----------- |
| `none` | Run all tests on all devices according `tag` definition in your inventory and test catalog. And tests with no tag are executed on all devices|
| `--tags leaf` | Run all tests marked with `leaf` tag on all devices configured with `leaf` tag.<br/> All other tags are ignored |
| `--tags leaf,spine` | Run all tests marked with `leaf` tag on all devices configured with `leaf` tag.<br/>Run all tests marked with `spine` tag on all devices configured with `spine` tag.<br/> All other tags are ignored |
!!! info
[More examples](tag-management.md) available on this dedicated page.
### Device and test filtering ### Device and test filtering
@ -45,7 +39,7 @@ Options `--device` and `--test` can be used to target one or multiple devices an
### Hide results ### Hide results
Option `--hide` can be used to hide test results in the output based on their status. The option can be repeated. Example: `anta nrfu --hide error --hide skipped`. Option `--hide` can be used to hide test results in the output or report file based on their status. The option can be repeated. Example: `anta nrfu --hide error --hide skipped`.
## Performing NRFU with text rendering ## Performing NRFU with text rendering
@ -67,6 +61,7 @@ Options:
```bash ```bash
anta nrfu --device DC1-LEAF1A text anta nrfu --device DC1-LEAF1A text
``` ```
![anta nrfu text results](../imgs/anta-nrfu-text-output.png){ loading=lazy width="1600" } ![anta nrfu text results](../imgs/anta-nrfu-text-output.png){ loading=lazy width="1600" }
## Performing NRFU with table rendering ## Performing NRFU with table rendering
@ -92,6 +87,7 @@ The `--group-by` option show a summarized view of the test results per host or p
```bash ```bash
anta nrfu --tags LEAF table anta nrfu --tags LEAF table
``` ```
![anta nrfu table results](../imgs/anta-nrfu-table-output.png){ loading=lazy width="1600" } ![anta nrfu table results](../imgs/anta-nrfu-table-output.png){ loading=lazy width="1600" }
For larger setups, you can also group the results by host or test to get a summarized view: For larger setups, you can also group the results by host or test to get a summarized view:
@ -99,11 +95,13 @@ For larger setups, you can also group the results by host or test to get a summa
```bash ```bash
anta nrfu table --group-by device anta nrfu table --group-by device
``` ```
![$1anta nrfu table group_by_host_output](../imgs/anta-nrfu-table-group-by-host-output.png){ loading=lazy width="1600" } ![$1anta nrfu table group_by_host_output](../imgs/anta-nrfu-table-group-by-host-output.png){ loading=lazy width="1600" }
```bash ```bash
anta nrfu table --group-by test anta nrfu table --group-by test
``` ```
![$1anta nrfu table group_by_test_output](../imgs/anta-nrfu-table-group-by-test-output.png){ loading=lazy width="1600" } ![$1anta nrfu table group_by_test_output](../imgs/anta-nrfu-table-group-by-test-output.png){ loading=lazy width="1600" }
To get more specific information, it is possible to filter on a single device or a single test: To get more specific information, it is possible to filter on a single device or a single test:
@ -111,16 +109,18 @@ To get more specific information, it is possible to filter on a single device or
```bash ```bash
anta nrfu --device spine1 table anta nrfu --device spine1 table
``` ```
![$1anta nrfu table filter_host_output](../imgs/anta-nrfu-table-filter-host-output.png){ loading=lazy width="1600" } ![$1anta nrfu table filter_host_output](../imgs/anta-nrfu-table-filter-host-output.png){ loading=lazy width="1600" }
```bash ```bash
anta nrfu --test VerifyZeroTouch table anta nrfu --test VerifyZeroTouch table
``` ```
![$1anta nrfu table filter_test_output](../imgs/anta-nrfu-table-filter-test-output.png){ loading=lazy width="1600" } ![$1anta nrfu table filter_test_output](../imgs/anta-nrfu-table-filter-test-output.png){ loading=lazy width="1600" }
## Performing NRFU with JSON rendering ## Performing NRFU with JSON rendering
The JSON rendering command in NRFU testing is useful in generating a JSON output that can subsequently be passed on to another tool for reporting purposes. The JSON rendering command in NRFU testing will generate an output of all test results in JSON format.
### Command overview ### Command overview
@ -131,20 +131,66 @@ Usage: anta nrfu json [OPTIONS]
ANTA command to check network state with JSON result. ANTA command to check network state with JSON result.
Options: Options:
-o, --output FILE Path to save report as a file [env var: -o, --output FILE Path to save report as a JSON file [env var:
ANTA_NRFU_JSON_OUTPUT] ANTA_NRFU_JSON_OUTPUT]
--help Show this message and exit. --help Show this message and exit.
``` ```
The `--output` option allows you to save the JSON report as a file. The `--output` option allows you to save the JSON report as a file. If specified, no output will be displayed in the terminal. This is useful for further processing or integration with other tools.
### Example ### Example
```bash ```bash
anta nrfu --tags LEAF json anta nrfu --tags LEAF json
``` ```
![$1anta nrfu json results](../imgs/anta-nrfu-json-output.png){ loading=lazy width="1600" } ![$1anta nrfu json results](../imgs/anta-nrfu-json-output.png){ loading=lazy width="1600" }
## Performing NRFU and saving results in a CSV file
The `csv` command in NRFU testing is useful for generating a CSV file with all tests result. This file can be easily analyzed and filtered by operator for reporting purposes.
### Command overview
```bash
anta nrfu csv --help
Usage: anta nrfu csv [OPTIONS]
ANTA command to check network states with CSV result.
Options:
--csv-output FILE Path to save report as a CSV file [env var:
ANTA_NRFU_CSV_CSV_OUTPUT]
--help Show this message and exit.
```
### Example
![anta nrfu csv results](../imgs/anta_nrfu_csv.png){ loading=lazy width="1600" }
## Performing NRFU and saving results in a Markdown file
The `md-report` command in NRFU testing generates a comprehensive Markdown report containing various sections, including detailed statistics for devices and test categories.
### Command overview
```bash
anta nrfu md-report --help
Usage: anta nrfu md-report [OPTIONS]
ANTA command to check network state with Markdown report.
Options:
--md-output FILE Path to save the report as a Markdown file [env var:
ANTA_NRFU_MD_REPORT_MD_OUTPUT; required]
--help Show this message and exit.
```
### Example
![anta nrfu md-report results](../imgs/anta-nrfu-md-report-output.png){ loading=lazy width="1600" }
## Performing NRFU with custom reports ## Performing NRFU with custom reports
ANTA offers a CLI option for creating custom reports. This leverages the Jinja2 template system, allowing you to tailor reports to your specific needs. ANTA offers a CLI option for creating custom reports. This leverages the Jinja2 template system, allowing you to tailor reports to your specific needs.
@ -164,6 +210,7 @@ Options:
ANTA_NRFU_TPL_REPORT_OUTPUT] ANTA_NRFU_TPL_REPORT_OUTPUT]
--help Show this message and exit. --help Show this message and exit.
``` ```
The `--template` option is used to specify the Jinja2 template file for generating the custom report. The `--template` option is used to specify the Jinja2 template file for generating the custom report.
The `--output` option allows you to choose the path where the final report will be saved. The `--output` option allows you to choose the path where the final report will be saved.
@ -173,6 +220,7 @@ The `--output` option allows you to choose the path where the final report will
```bash ```bash
anta nrfu --tags LEAF tpl-report --template ./custom_template.j2 anta nrfu --tags LEAF tpl-report --template ./custom_template.j2
``` ```
![$1anta nrfu tpl_results](../imgs/anta-nrfu-tpl-report-output.png){ loading=lazy width="1600" } ![$1anta nrfu tpl_results](../imgs/anta-nrfu-tpl-report-output.png){ loading=lazy width="1600" }
The template `./custom_template.j2` is a simple Jinja2 template: The template `./custom_template.j2` is a simple Jinja2 template:
@ -183,7 +231,7 @@ The template `./custom_template.j2` is a simple Jinja2 template:
{% endfor %} {% endfor %}
``` ```
The Jinja2 template has access to all `TestResult` elements and their values, as described in this [documentation](../api/result_manager_models.md#testresult-entry). The Jinja2 template has access to all `TestResult` elements and their values, as described in this [documentation](../api/result_manager_models.md#anta.result_manager.models.TestResult).
You can also save the report result to a file using the `--output` option: You can also save the report result to a file using the `--output` option:

View file

@ -1,11 +1,12 @@
---
anta_title: Overview of ANTA's Command-Line Interface (CLI)
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# Overview of ANTA's Command-Line Interface (CLI)
ANTA provides a powerful Command-Line Interface (CLI) to perform a wide range of operations. This document provides a comprehensive overview of ANTA CLI usage and its commands. ANTA provides a powerful Command-Line Interface (CLI) to perform a wide range of operations. This document provides a comprehensive overview of ANTA CLI usage and its commands.
ANTA can also be used as a Python library, allowing you to build your own tools based on it. Visit this [page](../advanced_usages/as-python-lib.md) for more details. ANTA can also be used as a Python library, allowing you to build your own tools based on it. Visit this [page](../advanced_usages/as-python-lib.md) for more details.
@ -35,7 +36,7 @@ To set them as environment variables:
export ANTA_USERNAME=admin export ANTA_USERNAME=admin
export ANTA_PASSWORD=arista123 export ANTA_PASSWORD=arista123
export ANTA_INVENTORY=inventory.yml export ANTA_INVENTORY=inventory.yml
export ANTA_INVENTORY=tests.yml export ANTA_CATALOG=tests.yml
``` ```
Then, run the CLI without options: Then, run the CLI without options:

View file

@ -4,162 +4,250 @@
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# Tag management ANTA commands can be used with a `--tags` option. This option **filters the inventory** with the specified tag(s) when running the command.
## Overview Tags can also be used to **restrict a specific test** to a set of devices when using `anta nrfu`.
Some of the ANTA commands like `anta nrfu` command come with a `--tags` option. ## Defining tags
For `nrfu`, this allows users to specify a set of tests, marked with a given tag, to be run on devices marked with the same tag. For instance, you can run tests dedicated to leaf devices on your leaf devices only and not on other devices. ### Device tags
Tags are string defined by the user and can be anything considered as a string by Python. A [default one](#default-tags) is present for all tests and devices. Device tags can be defined in the inventory:
The next table provides a short summary of the scope of tags using CLI ```yaml
anta_inventory:
| Command | Description |
| ------- | ----------- |
| `none` | Run all tests on all devices according `tag` definition in your inventory and test catalog. And tests with no tag are executed on all devices|
| `--tags leaf` | Run all tests marked with `leaf` tag on all devices configured with `leaf` tag.<br/> All other tags are ignored |
| `--tags leaf,spine` | Run all tests marked with `leaf` tag on all devices configured with `leaf` tag.<br/>Run all tests marked with `spine` tag on all devices configured with `spine` tag.<br/> All other tags are ignored |
## Inventory and Catalog for tests
All commands in this page are based on the following inventory and test catalog.
=== "Inventory"
```yaml
---
anta_inventory:
hosts: hosts:
- host: 192.168.0.10 - name: leaf1
name: spine01 host: leaf1.anta.arista.com
tags: ['fabric', 'spine'] tags: ["leaf"]
- host: 192.168.0.11 - name: leaf2
name: spine02 host: leaf2.anta.arista.com
tags: ['fabric', 'spine'] tags: ["leaf"]
- host: 192.168.0.12 - name: spine1
name: leaf01 host: spine1.anta.arista.com
tags: ['fabric', 'leaf'] tags: ["spine"]
- host: 192.168.0.13 ```
name: leaf02
tags: ['fabric', 'leaf']
- host: 192.168.0.14
name: leaf03
tags: ['fabric', 'leaf']
- host: 192.168.0.15
name: leaf04
tags: ['fabric', 'leaf'
```
=== "Test Catalog" Each device also has its own name automatically added as a tag:
```yaml ```bash
anta.tests.system: $ anta get inventory
Current inventory content is:
{
'leaf1': AsyncEOSDevice(
name='leaf1',
tags={'leaf', 'leaf1'}, <--
[...]
host='leaf1.anta.arista.com',
[...]
),
'leaf2': AsyncEOSDevice(
name='leaf2',
tags={'leaf', 'leaf2'}, <--
[...]
host='leaf2.anta.arista.com',
[...]
),
'spine1': AsyncEOSDevice(
name='spine1',
tags={'spine1', 'spine'}, <--
[...]
host='spine1.anta.arista.com',
[...]
)
}
```
### Test tags
Tags can be defined in the test catalog to restrict tests to tagged devices:
```yaml
anta.tests.system:
- VerifyUptime: - VerifyUptime:
minimum: 10 minimum: 10
filters: filters:
tags: ['fabric'] tags: ['spine']
- VerifyUptime:
minimum: 9
filters:
tags: ['leaf']
- VerifyReloadCause: - VerifyReloadCause:
tags: ['leaf', spine'] filters:
tags: ['spine', 'leaf']
- VerifyCoredump: - VerifyCoredump:
- VerifyAgentLogs: - VerifyAgentLogs:
- VerifyCPUUtilization: - VerifyCPUUtilization:
filters:
tags: ['spine', 'leaf']
- VerifyMemoryUtilization: - VerifyMemoryUtilization:
- VerifyFileSystemUtilization: - VerifyFileSystemUtilization:
- VerifyNTP: - VerifyNTP:
anta.tests.mlag: anta.tests.mlag:
- VerifyMlagStatus: - VerifyMlagStatus:
filters:
tags: ['leaf']
anta.tests.interfaces:
anta.tests.interfaces:
- VerifyL3MTU: - VerifyL3MTU:
mtu: 1500 mtu: 1500
filters: filters:
tags: ['demo'] tags: ['spine']
``` ```
## Default tags > A tag used to filter a test can also be a device name
By default, ANTA uses a default tag for both devices and tests. This default tag is `all` and it can be explicit if you want to make it visible in your inventory and also implicit since the framework injects this tag if it is not defined. !!! tip "Use different input values for a specific test"
Leverage tags to define different input values for a specific test. See the `VerifyUptime` example above.
So this command will run all tests from your catalog on all devices. With a mapping for `tags` defined in your inventory and catalog. If no `tags` configured, then tests are executed against all devices. ## Using tags
| Command | Description |
| ------- | ----------- |
| No `--tags` option | Run all tests on all devices according to the `tag` definitions in your inventory and test catalog.<br/> Tests without tags are executed on all devices. |
| `--tags leaf` | Run all tests marked with the `leaf` tag on all devices configured with the `leaf` tag.<br/> All other tests are ignored. |
| `--tags leaf,spine` | Run all tests marked with the `leaf` tag on all devices configured with the `leaf` tag.<br/>Run all tests marked with the `spine` tag on all devices configured with the `spine` tag.<br/> All other tests are ignored. |
### Examples
The following examples use the inventory and test catalog defined above.
#### No `--tags` option
Tests without tags are run on all devices.
Tests with tags will only run on devices with matching tags.
```bash ```bash
$ anta nrfu -c .personal/catalog-class.yml table --group-by device $ anta nrfu table --group-by device
╭────────────────────── Settings ──────────────────────╮ ╭────────────────────── Settings ──────────────────────╮
│ Running ANTA tests: │ │ - ANTA Inventory contains 3 devices (AsyncEOSDevice) │
│ - ANTA Inventory contains 6 devices (AsyncEOSDevice) │ │ - Tests catalog contains 11 tests │
│ - Tests catalog contains 10 tests │
╰──────────────────────────────────────────────────────╯ ╰──────────────────────────────────────────────────────╯
┏━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ --- ANTA NRFU Run Information ---
Number of devices: 3 (3 established)
Total number of selected tests: 27
---------------------------------
Summary per device
┏━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Device ┃ # of success ┃ # of skipped ┃ # of failure ┃ # of errors ┃ List of failed or error test cases ┃ ┃ Device ┃ # of success ┃ # of skipped ┃ # of failure ┃ # of errors ┃ List of failed or error test cases ┃
┡━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ ┡━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ spine01 │ 5 │ 1 │ 1 │ 0 │ ['VerifyCPUUtilization'] │ │ leaf1 │ 9 │ 0 │ 0 │ 0 │ │
│ spine02 │ 5 │ 1 │ 1 │ 0 │ ['VerifyCPUUtilization'] │ ├────────┼──────────────┼──────────────┼──────────────┼─────────────┼────────────────────────────────────┤
│ leaf01 │ 6 │ 0 │ 1 │ 0 │ ['VerifyCPUUtilization'] │ │ leaf2 │ 7 │ 1 │ 1 │ 0 │ VerifyAgentLogs │
│ leaf02 │ 6 │ 0 │ 1 │ 0 │ ['VerifyCPUUtilization'] │ ├────────┼──────────────┼──────────────┼──────────────┼─────────────┼────────────────────────────────────┤
│ leaf03 │ 6 │ 0 │ 1 │ 0 │ ['VerifyCPUUtilization'] │ │ spine1 │ 9 │ 0 │ 0 │ 0 │ │
│ leaf04 │ 6 │ 0 │ 1 │ 0 │ ['VerifyCPUUtilization'] │ └────────┴──────────────┴──────────────┴──────────────┴─────────────┴────────────────────────────────────┘
└─────────┴──────────────┴──────────────┴──────────────┴─────────────┴────────────────────────────────────┘
``` ```
## Use a single tag in CLI #### Single tag
The most used approach is to use a single tag in your CLI to filter tests & devices configured with this one. With a tag specified, only tests matching this tag will be run on matching devices.
In such scenario, ANTA will run tests marked with `$tag` only on devices marked with `$tag`. All other tests and devices will be ignored
```bash ```bash
$ anta nrfu -c .personal/catalog-class.yml --tags leaf text $ anta nrfu --tags leaf text
╭────────────────────── Settings ──────────────────────╮ ╭────────────────────── Settings ──────────────────────╮
│ Running ANTA tests: │ │ - ANTA Inventory contains 3 devices (AsyncEOSDevice) │
│ - ANTA Inventory contains 6 devices (AsyncEOSDevice) │ │ - Tests catalog contains 11 tests │
│ - Tests catalog contains 10 tests │
╰──────────────────────────────────────────────────────╯ ╰──────────────────────────────────────────────────────╯
leaf01 :: VerifyUptime :: SUCCESS --- ANTA NRFU Run Information ---
leaf01 :: VerifyReloadCause :: SUCCESS Number of devices: 3 (2 established)
leaf01 :: VerifyCPUUtilization :: SUCCESS Total number of selected tests: 6
leaf02 :: VerifyUptime :: SUCCESS ---------------------------------
leaf02 :: VerifyReloadCause :: SUCCESS
leaf02 :: VerifyCPUUtilization :: SUCCESS leaf1 :: VerifyReloadCause :: SUCCESS
leaf03 :: VerifyUptime :: SUCCESS leaf1 :: VerifyUptime :: SUCCESS
leaf03 :: VerifyReloadCause :: SUCCESS leaf1 :: VerifyMlagStatus :: SUCCESS
leaf03 :: VerifyCPUUtilization :: SUCCESS leaf2 :: VerifyReloadCause :: SUCCESS
leaf04 :: VerifyUptime :: SUCCESS leaf2 :: VerifyUptime :: SUCCESS
leaf04 :: VerifyReloadCause :: SUCCESS leaf2 :: VerifyMlagStatus :: SKIPPED (MLAG is disabled)
leaf04 :: VerifyCPUUtilization :: SUCCESS
``` ```
In this case, only `leaf` devices defined in your [inventory](#inventory-and-catalog-for-tests) are used to run tests marked with `leaf` in your [test catalog](#inventory-and-catalog-for-tests) In this case, only `leaf` devices defined in the inventory are used to run tests marked with the `leaf` in the test catalog.
## Use multiple tags in CLI #### Multiple tags
A more advanced usage of the tag feature is to list multiple tags in your CLI using `--tags $tag1,$tag2` syntax. It is possible to use multiple tags using the `--tags tag1,tag2` syntax.
In such scenario, all devices marked with `$tag1` will be selected and ANTA will run tests with `$tag1`, then devices with `$tag2` will be selected and will be tested with tests marked with `$tag2`
```bash ```bash
anta nrfu -c .personal/catalog-class.yml --tags leaf,fabric text $ anta nrfu --tags leaf,spine text
╭────────────────────── Settings ──────────────────────╮
│ - ANTA Inventory contains 3 devices (AsyncEOSDevice) │
│ - Tests catalog contains 11 tests │
╰──────────────────────────────────────────────────────╯
spine01 :: VerifyUptime :: SUCCESS --- ANTA NRFU Run Information ---
spine02 :: VerifyUptime :: SUCCESS Number of devices: 3 (3 established)
leaf01 :: VerifyUptime :: SUCCESS Total number of selected tests: 15
leaf01 :: VerifyReloadCause :: SUCCESS ---------------------------------
leaf01 :: VerifyCPUUtilization :: SUCCESS
leaf02 :: VerifyUptime :: SUCCESS leaf1 :: VerifyReloadCause :: SUCCESS
leaf02 :: VerifyReloadCause :: SUCCESS leaf1 :: VerifyMlagStatus :: SUCCESS
leaf02 :: VerifyCPUUtilization :: SUCCESS leaf1 :: VerifyUptime :: SUCCESS
leaf03 :: VerifyUptime :: SUCCESS leaf1 :: VerifyL3MTU :: SUCCESS
leaf03 :: VerifyReloadCause :: SUCCESS leaf1 :: VerifyUptime :: SUCCESS
leaf03 :: VerifyCPUUtilization :: SUCCESS leaf2 :: VerifyReloadCause :: SUCCESS
leaf04 :: VerifyUptime :: SUCCESS leaf2 :: VerifyMlagStatus :: SKIPPED (MLAG is disabled)
leaf04 :: VerifyReloadCause :: SUCCESS leaf2 :: VerifyUptime :: SUCCESS
leaf04 :: VerifyCPUUtilization :: SUCCESS leaf2 :: VerifyL3MTU :: SUCCESS
leaf2 :: VerifyUptime :: SUCCESS
spine1 :: VerifyReloadCause :: SUCCESS
spine1 :: VerifyMlagStatus :: SUCCESS
spine1 :: VerifyUptime :: SUCCESS
spine1 :: VerifyL3MTU :: SUCCESS
spine1 :: VerifyUptime :: SUCCESS
```
## Obtaining all configured tags
As most ANTA commands accommodate tag filtering, this command is useful for enumerating all tags configured in the inventory. Running the `anta get tags` command will return a list of all tags configured in the inventory.
### Command overview
```bash
Usage: anta get tags [OPTIONS]
Get list of configured tags in user inventory.
Options:
-u, --username TEXT Username to connect to EOS [env var: ANTA_USERNAME;
required]
-p, --password TEXT Password to connect to EOS that must be provided. It
can be prompted using '--prompt' option. [env var:
ANTA_PASSWORD]
--enable-password TEXT Password to access EOS Privileged EXEC mode. It can
be prompted using '--prompt' option. Requires '--
enable' option. [env var: ANTA_ENABLE_PASSWORD]
--enable Some commands may require EOS Privileged EXEC mode.
This option tries to access this mode before sending
a command to the device. [env var: ANTA_ENABLE]
-P, --prompt Prompt for passwords if they are not provided. [env
var: ANTA_PROMPT]
--timeout FLOAT Global API timeout. This value will be used for all
devices. [env var: ANTA_TIMEOUT; default: 30.0]
--insecure Disable SSH Host Key validation. [env var:
ANTA_INSECURE]
--disable-cache Disable cache globally. [env var:
ANTA_DISABLE_CACHE]
-i, --inventory FILE Path to the inventory YAML file. [env var:
ANTA_INVENTORY; required]
--tags TEXT List of tags using comma as separator:
tag1,tag2,tag3. [env var: ANTA_TAGS]
--help Show this message and exit.
```
### Example
To get the list of all configured tags in the inventory, run the following command:
```bash
$ anta get tags
Tags found:
[
"leaf",
"leaf1",
"leaf2",
"spine",
"spine1"
]
``` ```

View file

@ -1,11 +1,12 @@
---
anta_title: How to contribute to ANTA
---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0 ~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# How to contribute to ANTA
Contribution model is based on a fork-model. Don't push to aristanetworks/anta directly. Always do a branch in your forked repository and create a PR. Contribution model is based on a fork-model. Don't push to aristanetworks/anta directly. Always do a branch in your forked repository and create a PR.
To help development, open your PR as soon as possible even in draft mode. It helps other to know on what you are working on and avoid duplicate PRs. To help development, open your PR as soon as possible even in draft mode. It helps other to know on what you are working on and avoid duplicate PRs.
@ -28,7 +29,7 @@ $ pip install -e .[dev,cli]
$ pip list -e $ pip list -e
Package Version Editable project location Package Version Editable project location
------- ------- ------------------------- ------- ------- -------------------------
anta 1.0.0 /mnt/lab/projects/anta anta 1.1.0 /mnt/lab/projects/anta
``` ```
Then, [`tox`](https://tox.wiki/) is configured with few environments to run CI locally: Then, [`tox`](https://tox.wiki/) is configured with few environments to run CI locally:
@ -39,10 +40,10 @@ default environments:
clean -> Erase previous coverage reports clean -> Erase previous coverage reports
lint -> Check the code style lint -> Check the code style
type -> Check typing type -> Check typing
py38 -> Run pytest with py38
py39 -> Run pytest with py39 py39 -> Run pytest with py39
py310 -> Run pytest with py310 py310 -> Run pytest with py310
py311 -> Run pytest with py311 py311 -> Run pytest with py311
py312 -> Run pytest with py312
report -> Generate coverage report report -> Generate coverage report
``` ```
@ -51,21 +52,22 @@ report -> Generate coverage report
```bash ```bash
tox -e lint tox -e lint
[...] [...]
lint: commands[0]> black --check --diff --color . lint: commands[0]> ruff check .
All done! ✨ 🍰 ✨ All checks passed!
104 files would be left unchanged. lint: commands[1]> ruff format . --check
lint: commands[1]> isort --check --diff --color . 158 files already formatted
Skipped 7 files lint: commands[2]> pylint anta
lint: commands[2]> flake8 --max-line-length=165 --config=/dev/null anta
lint: commands[3]> flake8 --max-line-length=165 --config=/dev/null tests
lint: commands[4]> pylint anta
-------------------------------------------------------------------- --------------------------------------------------------------------
Your code has been rated at 10.00/10 (previous run: 10.00/10, +0.00) Your code has been rated at 10.00/10 (previous run: 10.00/10, +0.00)
.pkg: _exit> python /Users/guillaumemulocher/.pyenv/versions/3.8.13/envs/anta/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta lint: commands[3]> pylint tests
lint: OK (19.26=setup[5.83]+cmd[1.50,0.76,1.19,1.20,8.77] seconds)
congratulations :) (19.56 seconds) --------------------------------------------------------------------
Your code has been rated at 10.00/10 (previous run: 10.00/10, +0.00)
lint: OK (22.69=setup[2.19]+cmd[0.02,0.02,9.71,10.75] seconds)
congratulations :) (22.72 seconds)
``` ```
### Code Typing ### Code Typing
@ -75,10 +77,11 @@ tox -e type
[...] [...]
type: commands[0]> mypy --config-file=pyproject.toml anta type: commands[0]> mypy --config-file=pyproject.toml anta
Success: no issues found in 52 source files Success: no issues found in 68 source files
.pkg: _exit> python /Users/guillaumemulocher/.pyenv/versions/3.8.13/envs/anta/lib/python3.8/site-packages/pyproject_api/_backend.py True setuptools.build_meta type: commands[1]> mypy --config-file=pyproject.toml tests
type: OK (46.66=setup[24.20]+cmd[22.46] seconds) Success: no issues found in 82 source files
congratulations :) (47.01 seconds) type: OK (31.15=setup[14.62]+cmd[6.05,10.48] seconds)
congratulations :) (31.18 seconds)
``` ```
> NOTE: Typing is configured quite strictly, do not hesitate to reach out if you have any questions, struggles, nightmares. > NOTE: Typing is configured quite strictly, do not hesitate to reach out if you have any questions, struggles, nightmares.
@ -92,7 +95,7 @@ All submodule should have its own pytest section under `tests/units/anta_tests/<
### How to write a unit test for an AntaTest subclass ### How to write a unit test for an AntaTest subclass
The Python modules in the `tests/units/anta_tests` folder define test parameters for AntaTest subclasses unit tests. The Python modules in the `tests/units/anta_tests` folder define test parameters for AntaTest subclasses unit tests.
A generic test function is written for all unit tests in `tests.lib.anta` module. A generic test function is written for all unit tests in `tests.units.anta_tests` module.
The `pytest_generate_tests` function definition in `conftest.py` is called during test collection. The `pytest_generate_tests` function definition in `conftest.py` is called during test collection.
@ -102,21 +105,20 @@ See https://docs.pytest.org/en/7.3.x/how-to/parametrize.html#basic-pytest-genera
The `DATA` structure is a list of dictionaries used to parametrize the test. The list elements have the following keys: The `DATA` structure is a list of dictionaries used to parametrize the test. The list elements have the following keys:
- `name` (str): Test name as displayed by Pytest. - `name` (str): Test name as displayed by Pytest.
- `test` (AntaTest): An AntaTest subclass imported in the test module - e.g. VerifyUptime. - `test` (AntaTest): An AntaTest subclass imported in the test module - e.g. VerifyUptime.
- `eos_data` (list[dict]): List of data mocking EOS returned data to be passed to the test. - `eos_data` (list[dict]): List of data mocking EOS returned data to be passed to the test.
- `inputs` (dict): Dictionary to instantiate the `test` inputs as defined in the class from `test`. - `inputs` (dict): Dictionary to instantiate the `test` inputs as defined in the class from `test`.
- `expected` (dict): Expected test result structure, a dictionary containing a key - `expected` (dict): Expected test result structure, a dictionary containing a key
`result` containing one of the allowed status (`Literal['success', 'failure', 'unset', 'skipped', 'error']`) and optionally a key `messages` which is a list(str) and each message is expected to be a substring of one of the actual messages in the TestResult object. `result` containing one of the allowed status (`Literal['success', 'failure', 'unset', 'skipped', 'error']`) and optionally a key `messages` which is a list(str) and each message is expected to be a substring of one of the actual messages in the TestResult object.
In order for your unit tests to be correctly collected, you need to import the generic test function even if not used in the Python module. In order for your unit tests to be correctly collected, you need to import the generic test function even if not used in the Python module.
Test example for `anta.tests.system.VerifyUptime` AntaTest. Test example for `anta.tests.system.VerifyUptime` AntaTest.
``` python ``` python
# Import the generic test function # Import the generic test function
from tests.lib.anta import test # noqa: F401 from tests.units.anta_tests import test
# Import your AntaTest # Import your AntaTest
from anta.tests.system import VerifyUptime from anta.tests.system import VerifyUptime
@ -157,20 +159,21 @@ pre-commit install
When running a commit or a pre-commit check: When running a commit or a pre-commit check:
``` bash ``` bash
echo "import foobaz" > test.py && git add test.py
pre-commit pre-commit
pylint...................................................................Failed trim trailing whitespace.................................................Passed
- hook id: pylint fix end of files.........................................................Passed
- exit code: 22 check for added large files..............................................Passed
check for merge conflicts................................................Passed
************* Module test Check and insert license on Python files.................................Passed
test.py:1:0: C0114: Missing module docstring (missing-module-docstring) Check and insert license on Markdown files...............................Passed
test.py:1:0: E0401: Unable to import 'foobaz' (import-error) Run Ruff linter..........................................................Passed
test.py:1:0: W0611: Unused import foobaz (unused-import) Run Ruff formatter.......................................................Passed
Check code style with pylint.............................................Passed
Checks for common misspellings in text files.............................Passed
Check typing with mypy...................................................Passed
Check Markdown files style...............................................Passed
``` ```
> NOTE: It could happen that pre-commit and tox disagree on something, in that case please open an issue on Github so we can take a look.. It is most probably wrong configuration on our side.
## Configure MYPYPATH ## Configure MYPYPATH
In some cases, mypy can complain about not having `MYPYPATH` configured in your shell. It is especially the case when you update both an anta test and its unit test. So you can configure this environment variable with: In some cases, mypy can complain about not having `MYPYPATH` configured in your shell. It is especially the case when you update both an anta test and its unit test. So you can configure this environment variable with:
@ -229,4 +232,4 @@ muffet -c 2 --color=always http://127.0.0.1:8000 -e fonts.gstatic.com -b 8192
## Continuous Integration ## Continuous Integration
GitHub actions is used to test git pushes and pull requests. The workflows are defined in this [directory](https://github.com/aristanetworks/anta/tree/main/.github/workflows). We can view the results [here](https://github.com/aristanetworks/anta/actions). GitHub actions is used to test git pushes and pull requests. The workflows are defined in this [directory](https://github.com/aristanetworks/anta/tree/main/.github/workflows). The results can be viewed [here](https://github.com/aristanetworks/anta/actions).

View file

@ -1,5 +1,6 @@
--- ---
toc_depth: 2 toc_depth: 3
anta_title: Frequently Asked Questions (FAQ)
--- ---
<!-- <!--
~ Copyright (c) 2023-2024 Arista Networks, Inc. ~ Copyright (c) 2023-2024 Arista Networks, Inc.
@ -7,7 +8,7 @@ toc_depth: 2
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
<style> <style>
.md-typeset h2 { .md-typeset h3 {
visibility: hidden; visibility: hidden;
font-size: 0em; font-size: 0em;
height: 0em; height: 0em;
@ -21,9 +22,8 @@ toc_depth: 2
} }
</style> </style>
# Frequently Asked Questions (FAQ)
## A local OS error occurred while connecting to a device ## A local OS error occurred while connecting to a device
???+ faq "A local OS error occurred while connecting to a device" ???+ faq "A local OS error occurred while connecting to a device"
When running ANTA, you can receive `A local OS error occurred while connecting to <device>` errors. The underlying [`OSError`](https://docs.python.org/3/library/exceptions.html#OSError) exception can have various reasons: `[Errno 24] Too many open files` or `[Errno 16] Device or resource busy`. When running ANTA, you can receive `A local OS error occurred while connecting to <device>` errors. The underlying [`OSError`](https://docs.python.org/3/library/exceptions.html#OSError) exception can have various reasons: `[Errno 24] Too many open files` or `[Errno 16] Device or resource busy`.
@ -43,8 +43,8 @@ toc_depth: 2
The `user` is the one with which the ANTA process is started. The `user` is the one with which the ANTA process is started.
The `value` is the new hard limit. The maximum value depends on the system. A hard limit of 16384 should be sufficient for ANTA to run in most high scale scenarios. After creating this file, log out the current session and log in again. The `value` is the new hard limit. The maximum value depends on the system. A hard limit of 16384 should be sufficient for ANTA to run in most high scale scenarios. After creating this file, log out the current session and log in again.
## `Timeout` error in the logs ## `Timeout` error in the logs
???+ faq "`Timeout` error in the logs" ???+ faq "`Timeout` error in the logs"
When running ANTA, you can receive `<Foo>Timeout` errors in the logs (could be ReadTimeout, WriteTimeout, ConnectTimeout or PoolTimeout). More details on the timeouts of the underlying library are available here: https://www.python-httpx.org/advanced/timeouts. When running ANTA, you can receive `<Foo>Timeout` errors in the logs (could be ReadTimeout, WriteTimeout, ConnectTimeout or PoolTimeout). More details on the timeouts of the underlying library are available here: https://www.python-httpx.org/advanced/timeouts.
@ -63,8 +63,8 @@ toc_depth: 2
The timeout is increased to 50s to allow ANTA to wait for API calls a little longer. The timeout is increased to 50s to allow ANTA to wait for API calls a little longer.
## `ImportError` related to `urllib3` ## `ImportError` related to `urllib3`
???+ faq "`ImportError` related to `urllib3` when running ANTA"
???+ faq "`ImportError` related to `urllib3` when running ANTA"
When running the `anta --help` command, some users might encounter the following error: When running the `anta --help` command, some users might encounter the following error:
@ -90,9 +90,9 @@ toc_depth: 2
As per the [urllib3 v2 migration guide](https://urllib3.readthedocs.io/en/latest/v2-migration-guide.html), the root cause of this error is an incompatibility with older OpenSSL versions. For example, users on RHEL7 might consider upgrading to RHEL8, which supports the required OpenSSL version. As per the [urllib3 v2 migration guide](https://urllib3.readthedocs.io/en/latest/v2-migration-guide.html), the root cause of this error is an incompatibility with older OpenSSL versions. For example, users on RHEL7 might consider upgrading to RHEL8, which supports the required OpenSSL version.
##`AttributeError: module 'lib' has no attribute 'OpenSSL_add_all_algorithms'` ## `AttributeError: module 'lib' has no attribute 'OpenSSL_add_all_algorithms'`
???+ faq "`AttributeError: module 'lib' has no attribute 'OpenSSL_add_all_algorithms'` when running ANTA"
???+ faq "`AttributeError: module 'lib' has no attribute 'OpenSSL_add_all_algorithms'` when running ANTA"
When running the `anta` commands after installation, some users might encounter the following error: When running the `anta` commands after installation, some users might encounter the following error:
@ -111,8 +111,8 @@ toc_depth: 2
``` ```
## `__NSCFConstantString initialize` error on OSX ## `__NSCFConstantString initialize` error on OSX
???+ faq "`__NSCFConstantString initialize` error on OSX"
???+ faq "`__NSCFConstantString initialize` error on OSX"
This error occurs because of added security to restrict multithreading in macOS High Sierra and later versions of macOS. https://www.wefearchange.org/2018/11/forkmacos.rst.html This error occurs because of added security to restrict multithreading in macOS High Sierra and later versions of macOS. https://www.wefearchange.org/2018/11/forkmacos.rst.html

View file

@ -4,8 +4,6 @@
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# Getting Started
This section shows how to use ANTA with basic configuration. All examples are based on Arista Test Drive (ATD) topology you can access by reaching out to your preferred SE. This section shows how to use ANTA with basic configuration. All examples are based on Arista Test Drive (ATD) topology you can access by reaching out to your preferred SE.
## Installation ## Installation
@ -72,7 +70,7 @@ anta_inventory:
tags: ['fabric', 'leaf'] tags: ['fabric', 'leaf']
``` ```
> You can read more details about how to build your inventory [here](usage-inventory-catalog.md#create-an-inventory-file) > You can read more details about how to build your inventory [here](usage-inventory-catalog.md#device-inventory)
## Test Catalog ## Test Catalog
@ -257,7 +255,7 @@ $ anta nrfu \
] ]
``` ```
You can find more information under the __usage__ section of the website You can find more information under the **usage** section of the website
### Basic usage in a Python script ### Basic usage in a Python script

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

BIN
docs/imgs/anta_nrfu_csv.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

View file

@ -16,6 +16,8 @@
{{app}} {{app}}
{% endblock %} {% endblock %}
{# Keeping this for future announcement if required
{% block announce %} {% block announce %}
ANTA code has moved to a new house in aristanetworks organization and so has the documentation. <strong>Please update your bookmark to use <a href="https://anta.arista.com">anta.arista.com<a/></strong> ANTA code has moved to a new house in aristanetworks organization and so has the documentation. <strong>Please update your bookmark to use <a href="https://anta.arista.com">anta.arista.com<a/></strong>
{% endblock %} {% endblock %}
#}

View file

@ -4,8 +4,6 @@
~ that can be found in the LICENSE file. ~ that can be found in the LICENSE file.
--> -->
# ANTA Requirements
## Python version ## Python version
Python 3 (`>=3.9`) is required: Python 3 (`>=3.9`) is required:
@ -21,7 +19,6 @@ This installation will deploy tests collection, scripts and all their Python req
The ANTA package and the cli require some packages that are not part of the Python standard library. They are indicated in the [pyproject.toml](https://github.com/aristanetworks/anta/blob/main/pyproject.toml) file, under dependencies. The ANTA package and the cli require some packages that are not part of the Python standard library. They are indicated in the [pyproject.toml](https://github.com/aristanetworks/anta/blob/main/pyproject.toml) file, under dependencies.
### Install library from Pypi server ### Install library from Pypi server
```bash ```bash
@ -31,14 +28,12 @@ pip install anta
!!! Warning !!! Warning
* This command alone **will not** install the ANTA CLI requirements. * This command alone **will not** install the ANTA CLI requirements.
* When using ANTA mode in [AVD](https://avd.arista.com) `eos_validate` role, (currently in preview), ensure you install the documented supported ANTA version for your AVD version.</br>
The latest documented version can be found at: https://avd.arista.com/stable/roles/eos_validate_state/ANTA-Preview.html
### Install ANTA CLI as an application with `pipx` ### Install ANTA CLI as an application with `pipx`
[`pipx`](https://pipx.pypa.io/stable/) is a tool to install and run python applications in isolated environments. If you plan to use ANTA only as a CLI tool you can use `pipx` to install it. `pipx` installs ANTA in an isolated python environment and makes it available globally. [`pipx`](https://pipx.pypa.io/stable/) is a tool to install and run python applications in isolated environments. If you plan to use ANTA only as a CLI tool you can use `pipx` to install it. `pipx` installs ANTA in an isolated python environment and makes it available globally.
``` ```bash
pipx install anta[cli] pipx install anta[cli]
``` ```
@ -46,7 +41,6 @@ pipx install anta[cli]
Please take the time to read through the installation instructions of `pipx` before getting started. Please take the time to read through the installation instructions of `pipx` before getting started.
### Install CLI from Pypi server ### Install CLI from Pypi server
Alternatively, pip install with `cli` extra is enough to install the ANTA CLI. Alternatively, pip install with `cli` extra is enough to install the ANTA CLI.
@ -57,7 +51,6 @@ pip install anta[cli]
### Install ANTA from github ### Install ANTA from github
```bash ```bash
pip install git+https://github.com/aristanetworks/anta.git pip install git+https://github.com/aristanetworks/anta.git
pip install git+https://github.com/aristanetworks/anta.git#egg=anta[cli] pip install git+https://github.com/aristanetworks/anta.git#egg=anta[cli]
@ -93,7 +86,7 @@ which anta
```bash ```bash
# Check ANTA version # Check ANTA version
anta --version anta --version
anta, version v1.0.0 anta, version v1.1.0
``` ```
## EOS Requirements ## EOS Requirements

View file

@ -24,6 +24,7 @@ from unittest.mock import patch
from rich.console import Console from rich.console import Console
from rich.logging import RichHandler from rich.logging import RichHandler
from rich.progress import Progress
from anta.cli.console import console from anta.cli.console import console
from anta.cli.nrfu.utils import anta_progress_bar from anta.cli.nrfu.utils import anta_progress_bar
@ -37,7 +38,7 @@ root.addHandler(r)
OUTPUT_DIR = pathlib.Path(__file__).parent.parent / "imgs" OUTPUT_DIR = pathlib.Path(__file__).parent.parent / "imgs"
def custom_progress_bar() -> None: def custom_progress_bar() -> Progress:
"""Set the console of progress_bar to main anta console. """Set the console of progress_bar to main anta console.
Caveat: this capture all steps of the progress bar.. Caveat: this capture all steps of the progress bar..

View file

@ -29,8 +29,10 @@ Options:
ANTA_INVENTORY; required] ANTA_INVENTORY; required]
--tags TEXT List of tags using comma as separator: --tags TEXT List of tags using comma as separator:
tag1,tag2,tag3. [env var: ANTA_TAGS] tag1,tag2,tag3. [env var: ANTA_TAGS]
-c, --catalog FILE Path to the test catalog YAML file [env -c, --catalog FILE Path to the test catalog file [env var:
var: ANTA_CATALOG; required] ANTA_CATALOG; required]
--catalog-format [yaml|json] Format of the catalog file, either 'yaml' or
'json' [env var: ANTA_CATALOG_FORMAT]
-d, --device TEXT Run tests on a specific device. Can be -d, --device TEXT Run tests on a specific device. Can be
provided multiple times. provided multiple times.
-t, --test TEXT Run a specific test. Can be provided -t, --test TEXT Run a specific test. Can be provided
@ -41,7 +43,8 @@ Options:
or 1 if any test failed. [env var: or 1 if any test failed. [env var:
ANTA_NRFU_IGNORE_ERROR] ANTA_NRFU_IGNORE_ERROR]
--hide [success|failure|error|skipped] --hide [success|failure|error|skipped]
Group result by test or device. Hide results by type: success / failure /
error / skipped'.
--dry-run Run anta nrfu command but stop before --dry-run Run anta nrfu command but stop before
starting to execute the tests. Considers all starting to execute the tests. Considers all
devices as connected. [env var: devices as connected. [env var:
@ -49,7 +52,9 @@ Options:
--help Show this message and exit. --help Show this message and exit.
Commands: Commands:
json ANTA command to check network state with JSON result. csv ANTA command to check network state with CSV report.
table ANTA command to check network states with table result. json ANTA command to check network state with JSON results.
text ANTA command to check network states with text result. md-report ANTA command to check network state with Markdown report.
table ANTA command to check network state with table results.
text ANTA command to check network state with text results.
tpl-report ANTA command to check network state with templated report. tpl-report ANTA command to check network state with templated report.

View file

@ -126,36 +126,29 @@
line-height: 1em; line-height: 1em;
font-size: 1.3rem; font-size: 1.3rem;
margin: 1em 0; margin: 1em 0;
/* font-weight: 700; */
letter-spacing: -.01em; letter-spacing: -.01em;
color: var(--md-default-fg-color--light); color: var(--md-default-fg-color--light);
text-transform: capitalize;
font-style: normal; font-style: normal;
font-weight: bold; font-weight: bold;
} }
.md-typeset h4 { .md-typeset h4 {
font-size: 0.9rem; font-size: 1.1rem;
margin: 1em 0; margin: 1em 0;
font-weight: 700; font-weight: 700;
letter-spacing: -.01em; letter-spacing: -.01em;
line-height: 1em; line-height: 1em;
color: var(--md-default-fg-color--light); color: var(--md-default-fg-color--light);
font-style: italic; font-style: italic;
text-transform: capitalize;
} }
.md-typeset h5, .md-typeset h5,
.md-typeset h6 { .md-typeset h6 {
font-size: 0.9rem; font-size: 0.9rem;
margin: 1em 0; margin: 1em 0;
/* font-weight: 700; */
letter-spacing: -.01em; letter-spacing: -.01em;
/* line-height: 2em; */
color: var(--md-default-fg-color--light); color: var(--md-default-fg-color--light);
font-style: italic; font-style: italic;
text-transform: capitalize;
text-decoration: underline;
} }
.md-typeset table:not([class]) th { .md-typeset table:not([class]) th {
@ -163,17 +156,13 @@
padding: .6rem .8rem; padding: .6rem .8rem;
color: var(--md-default-fg-color); color: var(--md-default-fg-color);
vertical-align: top; vertical-align: top;
/* background-color: var(--md-accent-bg-color); */
text-align: left; text-align: left;
/* min-width: 100%; */
/* display: table; */
} }
.md-typeset table:not([class]) td { .md-typeset table:not([class]) td {
/* padding: .9375em 1.25em; */ /* padding: .9375em 1.25em; */
border-collapse: collapse; border-collapse: collapse;
vertical-align: center; vertical-align: center;
text-align: left; text-align: left;
/* border-bottom: 1px solid var(--md-default-fg-color--light); */
} }
.md-typeset code { .md-typeset code {
padding: 0 .2941176471em; padding: 0 .2941176471em;
@ -250,3 +239,7 @@ div.doc-contents {
padding-left: 25px; padding-left: 25px;
border-left: .05rem solid var(--md-typeset-table-color); border-left: .05rem solid var(--md-typeset-table-color);
} }
h5.doc-heading {
/* Avoid to capitalize h5 headers for mkdocstrings */
text-transform: none;
}

Some files were not shown because too many files have changed in this diff Show more