1
0
Fork 0

Adding upstream version 0.13.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-05 11:32:35 +01:00
parent c0ae77e0f6
commit ecf5ca3300
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
272 changed files with 33172 additions and 0 deletions

View file

@ -0,0 +1,39 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/python
{
"name": "Python 3",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
"image": "mcr.microsoft.com/devcontainers/python:1-3.11-bookworm",
"features": {
"ghcr.io/devcontainers/features/github-cli:1": {},
"ghcr.io/devcontainers-contrib/features/direnv:1": {},
"ghcr.io/devcontainers-contrib/features/pre-commit:2": {}
},
// Configure tool-specific properties.
"customizations": {
"vscode": {
"settings": {},
"extensions": [
"ms-python.black-formatter",
"ms-python.isort",
"formulahendry.github-actions",
"matangover.mypy",
"ms-python.mypy-type-checker",
"ms-python.pylint",
"LittleFoxTeam.vscode-python-test-adapter",
"njqdev.vscode-python-typehint",
"hbenl.vscode-test-explorer"
]
}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "bash .devcontainer/startup.sh"
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
}

13
.devcontainer/startup.sh Normal file
View file

@ -0,0 +1,13 @@
#!/bin/sh
# echo "Configure direnv"
# echo "eval \"$(direnv hook bash)\"" >> ~/.bashrc
echo "Upgrading pip"
pip install --upgrade pip
echo "Installing ANTA package from git"
pip install -e .
echo "Installing development tools"
pip install -e ".[dev]"

93
.dockerignore Normal file
View file

@ -0,0 +1,93 @@
# Git
.git
.gitignore
.gitattributes
# CI
.codeclimate.yml
.travis.yml
.taskcluster.yml
# Docker
docker-compose.yml
Dockerfile
.docker
.dockerignore
# Byte-compiled / optimized / DLL files
**/__pycache__/
**/*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Virtual environment
.env
.venv/
venv/
# PyCharm
.idea
# Python mode for VIM
.ropeproject
**/.ropeproject
# Vim swap files
**/*.swp
# VS Code
.vscode/
tests/**
examples/**

View file

@ -0,0 +1,41 @@
name: "rn-pr-labeler"
author: "@gmuloc"
description: "Parse a conventional commit compliant PR title and add it as a label to the PR with the prefix 'rn: '"
inputs:
auto_create_label:
description: "Boolean to indicate if the label should be auto created"
required: false
default: false
runs:
using: "composite"
steps:
- name: 'Looking up existing "rn:" label'
run: |
echo "OLD_LABEL=$(gh pr view ${{ github.event.pull_request.number }} --json labels -q .labels[].name | grep 'rn: ')" >> $GITHUB_ENV
shell: bash
- name: 'Delete existing "rn:" label if found'
run: gh pr edit ${{ github.event.pull_request.number }} --remove-label "${{ env.OLD_LABEL }}"
shell: bash
if: ${{ env.OLD_LABEL }}
- name: Set Label
# Using toJSON to support ' and " in commit messages
# https://stackoverflow.com/questions/73363167/github-actions-how-to-escape-characters-in-commit-message
run: echo "LABEL=$(echo ${{ toJSON(github.event.pull_request.title) }} | cut -d ':' -f 1 | tr -d ' ')" >> $GITHUB_ENV
shell: bash
# an alternative to verifying if the target label already exist is to
# create the label with --force in the next step, it will keep on changing
# the color of the label though so it may not be desirable.
- name: Check if label exist
run: |
EXIST=$(gh label list -L 100 --search "rn:" --json name -q '.[] | select(.name=="rn: ${{ env.LABEL }}").name')
echo "EXIST=$EXIST" >> $GITHUB_ENV
shell: bash
- name: Create Label if auto-create and label does not exist already
run: |
gh label create "rn: ${{ env.LABEL }}"
shell: bash
if: ${{ inputs.auto_create_label && ! env.EXIST }}
- name: Labelling PR
run: |
gh pr edit ${{ github.event.pull_request.number }} --add-label "rn: ${{ env.LABEL }}"
shell: bash

435
.github/changelog.sh vendored Normal file
View file

@ -0,0 +1,435 @@
#!/usr/bin/env zsh
##############################
# CHANGELOG SCRIPT CONSTANTS #
##############################
#* Holds the list of valid types recognized in a commit subject
#* and the display string of such type
local -A TYPES
TYPES=(
BUILD "Build system"
CHORE "Chore"
CI "CI"
CUT "Features removed"
DOC "Documentation"
FEAT "Features"
FIX "Bug fixes"
LICENSE "License update"
MAKE "Build system"
OPTIMIZE "Code optimization"
PERF "Performance"
REFACTOR "Code Refactoring"
REFORMAT "Code Reformating"
REVERT "Revert"
TEST "Testing"
)
#* Types that will be displayed in their own section,
#* in the order specified here.
local -a MAIN_TYPES
MAIN_TYPES=(FEAT FIX PERF REFACTOR DOCS DOC)
#* Types that will be displayed under the category of other changes
local -a OTHER_TYPES
OTHER_TYPES=(MAKE TEST STYLE CI OTHER)
#* Commit types that don't appear in $MAIN_TYPES nor $OTHER_TYPES
#* will not be displayed and will simply be ignored.
############################
# COMMIT PARSING UTILITIES #
############################
function parse-commit {
# This function uses the following globals as output: commits (A),
# subjects (A), scopes (A) and breaking (A). All associative arrays (A)
# have $hash as the key.
# - commits holds the commit type
# - subjects holds the commit subject
# - scopes holds the scope of a commit
# - breaking holds the breaking change warning if a commit does
# make a breaking change
function commit:type {
local commit_message="$1"
local type="$(sed -E 's/^([a-zA-Z_\-]+)(\(.+\))?!?: .+$/\1/' <<< "$commit_message"| tr '[:lower:]' '[:upper:]')"
# If $type doesn't appear in $TYPES array mark it as 'other'
if [[ -n "${(k)TYPES[(i)${type}]}" ]]; then
echo $type
else
echo other
fi
}
function commit:scope {
local scope
# Try to find scope in "type(<scope>):" format
# Scope will be formatted in lower cases
scope=$(sed -nE 's/^[a-zA-Z_\-]+\((.+)\)!?: .+$/\1/p' <<< "$1")
if [[ -n "$scope" ]]; then
echo "$scope" | tr '[:upper:]' '[:lower:]'
return
fi
# If no scope found, try to find it in "<scope>:" format
# Make sure it's not a type before printing it
scope=$(sed -nE 's/^([a-zA-Z_\-]+): .+$/\1/p' <<< "$1")
if [[ -z "${(k)TYPES[(i)$scope]}" ]]; then
echo "$scope"
fi
}
function commit:subject {
# Only display the relevant part of the commit, i.e. if it has the format
# type[(scope)!]: subject, where the part between [] is optional, only
# displays subject. If it doesn't match the format, returns the whole string.
sed -E 's/^[a-zA-Z_\-]+(\(.+\))?!?: (.+)$/\2/' <<< "$1"
}
# Return subject if the body or subject match the breaking change format
function commit:is-breaking {
local subject="$1" body="$2" message
if [[ "$body" =~ "BREAKING CHANGE: (.*)" || \
"$subject" =~ '^[^ :\)]+\)?!: (.*)$' ]]; then
message="${match[1]}"
# remove CR characters (might be inserted in GitHub UI commit description form)
message="${message//$'\r'/}"
# skip next paragraphs (separated by two newlines or more)
message="${message%%$'\n\n'*}"
# ... and replace newlines with spaces
echo "${message//$'\n'/ }"
else
return 1
fi
}
# Return truncated hash of the reverted commit
function commit:is-revert {
local subject="$1" body="$2"
if [[ "$subject" = Revert* && \
"$body" =~ "This reverts commit ([^.]+)\." ]]; then
echo "${match[1]:0:7}"
else
return 1
fi
}
# Parse commit with hash $1
local hash="$1" subject body warning rhash
subject="$(command git show -s --format=%s $hash)"
body="$(command git show -s --format=%b $hash)"
# Commits following Conventional Commits (https://www.conventionalcommits.org/)
# have the following format, where parts between [] are optional:
#
# type[(scope)][!]: subject
#
# commit body
# [BREAKING CHANGE: warning]
# commits holds the commit type
commits[$hash]="$(commit:type "$subject")"
# scopes holds the commit scope
scopes[$hash]="$(commit:scope "$subject")"
# subjects holds the commit subject
subjects[$hash]="$(commit:subject "$subject")"
# breaking holds whether a commit has breaking changes
# and its warning message if it does
if warning=$(commit:is-breaking "$subject" "$body"); then
breaking[$hash]="$warning"
fi
# reverts holds commits reverted in the same release
if rhash=$(commit:is-revert "$subject" "$body"); then
reverts[$hash]=$rhash
fi
}
#############################
# RELEASE CHANGELOG DISPLAY #
#############################
function display-release {
# This function uses the following globals: output, version,
# commits (A), subjects (A), scopes (A), breaking (A) and reverts (A).
#
# - output is the output format to use when formatting (raw|text|md)
# - version is the version in which the commits are made
# - commits, subjects, scopes, breaking, and reverts are associative arrays
# with commit hashes as keys
# Remove commits that were reverted
local hash rhash
for hash rhash in ${(kv)reverts}; do
if (( ${+commits[$rhash]} )); then
# Remove revert commit
unset "commits[$hash]" "subjects[$hash]" "scopes[$hash]" "breaking[$hash]"
# Remove reverted commit
unset "commits[$rhash]" "subjects[$rhash]" "scopes[$rhash]" "breaking[$rhash]"
fi
done
# If no commits left skip displaying the release
if (( $#commits == 0 )); then
return
fi
##* Formatting functions
# Format the hash according to output format
# If no parameter is passed, assume it comes from `$hash`
function fmt:hash {
#* Uses $hash from outer scope
local hash="${1:-$hash}"
case "$output" in
raw) printf "$hash" ;;
text) printf "\e[33m$hash\e[0m" ;; # red
md) printf "[\`$hash\`](https://github.com/aristanetworks/ansible-avd/commit/$hash)" ;;
esac
}
# Format headers according to output format
# Levels 1 to 2 are considered special, the rest are formatted
# the same, except in md output format.
function fmt:header {
local header="$1" level="$2"
case "$output" in
raw)
case "$level" in
1) printf "$header\n$(printf '%.0s=' {1..${#header}})\n\n" ;;
2) printf "$header\n$(printf '%.0s-' {1..${#header}})\n\n" ;;
*) printf "$header:\n\n" ;;
esac ;;
text)
case "$level" in
1|2) printf "\e[1;4m$header\e[0m\n\n" ;; # bold, underlined
*) printf "\e[1m$header:\e[0m\n\n" ;; # bold
esac ;;
md) printf "$(printf '%.0s#' {1..${level}}) $header\n\n" ;;
esac
}
function fmt:scope {
#* Uses $scopes (A) and $hash from outer scope
local scope="${1:-${scopes[$hash]}}"
# Get length of longest scope for padding
local max_scope=0 padding=0
for hash in ${(k)scopes}; do
max_scope=$(( max_scope < ${#scopes[$hash]} ? ${#scopes[$hash]} : max_scope ))
done
# If no scopes, exit the function
if [[ $max_scope -eq 0 ]]; then
return
fi
# Get how much padding is required for this scope
padding=$(( max_scope < ${#scope} ? 0 : max_scope - ${#scope} ))
padding="${(r:$padding:: :):-}"
# If no scope, print padding and 3 spaces (equivalent to "[] ")
if [[ -z "$scope" ]]; then
printf "${padding} "
return
fi
# Print [scope]
case "$output" in
raw|md) printf "[$scope]${padding} " ;;
text) printf "[\e[38;5;9m$scope\e[0m]${padding} " ;; # red 9
esac
}
# If no parameter is passed, assume it comes from `$subjects[$hash]`
function fmt:subject {
#* Uses $subjects (A) and $hash from outer scope
local subject="${1:-${subjects[$hash]}}"
# Capitalize first letter of the subject
subject="${(U)subject:0:1}${subject:1}"
case "$output" in
raw) printf "$subject" ;;
# In text mode, highlight (#<issue>) and dim text between `backticks`
text) sed -E $'s|#([0-9]+)|\e[32m#\\1\e[0m|g;s|`([^`]+)`|`\e[2m\\1\e[0m`|g' <<< "$subject" ;;
# In markdown mode, link to (#<issue>) issues
md) sed -E 's|#([0-9]+)|[#\1](https://github.com/aristanetworks/ansible-avd/issues/\1)|g' <<< "$subject" ;;
esac
}
function fmt:type {
#* Uses $type from outer scope
local type="${1:-${TYPES[$type]:-${(C)type}}}"
[[ -z "$type" ]] && return 0
case "$output" in
raw|md) printf "$type: " ;;
text) printf "\e[4m$type\e[24m: " ;; # underlined
esac
}
##* Section functions
function display:version {
fmt:header "$version" 2
}
function display:breaking {
(( $#breaking != 0 )) || return 0
case "$output" in
raw) fmt:header "BREAKING CHANGES" 3 ;;
text|md) fmt:header "⚠ BREAKING CHANGES" 3 ;;
esac
local hash subject
for hash message in ${(kv)breaking}; do
echo " - $(fmt:hash) $(fmt:scope)$(fmt:subject "${message}")"
done | sort
echo
}
function display:type {
local hash type="$1"
local -a hashes
hashes=(${(k)commits[(R)$type]})
# If no commits found of type $type, go to next type
(( $#hashes != 0 )) || return 0
fmt:header "${TYPES[$type]}" 3
for hash in $hashes; do
echo " - $(fmt:hash) $(fmt:scope)$(fmt:subject)"
done | sort -k3 # sort by scope
echo
}
function display:others {
local hash type
# Commits made under types considered other changes
local -A changes
changes=(${(kv)commits[(R)${(j:|:)OTHER_TYPES}]})
# If no commits found under "other" types, don't display anything
(( $#changes != 0 )) || return 0
fmt:header "Other changes" 3
for hash type in ${(kv)changes}; do
case "$type" in
other) echo " - $(fmt:hash) $(fmt:scope)$(fmt:subject)" ;;
*) echo " - $(fmt:hash) $(fmt:scope)$(fmt:type)$(fmt:subject)" ;;
esac
done | sort -k3 # sort by scope
echo
}
##* Release sections order
# Display version header
display:version
# Display breaking changes first
display:breaking
# Display changes for commit types in the order specified
for type in $MAIN_TYPES; do
display:type "$type"
done
# Display other changes
display:others
}
function main {
# $1 = until commit, $2 = since commit
local until="$1" since="$2"
# $3 = output format (--text|--raw|--md)
# --md: uses markdown formatting
# --raw: outputs without style
# --text: uses ANSI escape codes to style the output
local output=${${3:-"--text"}#--*}
if [[ -z "$until" ]]; then
until=HEAD
fi
if [[ -z "$since" ]]; then
# If $since is not specified:
# 1) try to find the version used before updating
# 2) try to find the first version tag before $until
since=$(command git config --get ansible-avd.lastVersion 2>/dev/null) || \
since=$(command git describe --abbrev=0 --tags "$until^" 2>/dev/null) || \
unset since
elif [[ "$since" = --all ]]; then
unset since
fi
# Commit classification arrays
local -A commits subjects scopes breaking reverts
local truncate=0 read_commits=0
local hash version tag
# Get the first version name:
# 1) try tag-like version, or
# 2) try name-rev, or
# 3) try branch name, or
# 4) try short hash
version=$(command git describe --tags $until 2>/dev/null) \
|| version=$(command git name-rev --no-undefined --name-only --exclude="remotes/*" $until 2>/dev/null) \
|| version=$(command git symbolic-ref --quiet --short $until 2>/dev/null) \
|| version=$(command git rev-parse --short $until 2>/dev/null)
# Get commit list from $until commit until $since commit, or until root
# commit if $since is unset, in short hash form.
# --first-parent is used when dealing with merges: it only prints the
# merge commit, not the commits of the merged branch.
command git rev-list --first-parent --abbrev-commit --abbrev=7 ${since:+$since..}$until | while read hash; do
# Truncate list on versions with a lot of commits
if [[ -z "$since" ]] && (( ++read_commits > 35 )); then
truncate=1
break
fi
# If we find a new release (exact tag)
if tag=$(command git describe --exact-match --tags $hash 2>/dev/null); then
# Output previous release
display-release
# Reinitialize commit storage
commits=()
subjects=()
scopes=()
breaking=()
reverts=()
# Start work on next release
version="$tag"
read_commits=1
fi
parse-commit "$hash"
done
display-release
if (( truncate )); then
echo " ...more commits omitted"
echo
fi
}
# Use raw output if stdout is not a tty
if [[ ! -t 1 && -z "$3" ]]; then
main "$1" "$2" --raw
else
main "$@"
fi

34
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,34 @@
# Basic set up for three package managers
version: 2
updates:
# Maintain dependencies for Python
# Dependabot supports updates to pyproject.toml files
# if they follow the PEP 621 standard.
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "daily"
reviewers:
- "titom73"
- "gmuloc"
- "mtache"
- "carl-baillargeon"
labels:
- 'dependencies'
pull-request-branch-name:
separator: "/"
commit-message:
prefix: "chore: "
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
reviewers:
- "titom73"
- "gmuloc"
labels:
- 'CI'
commit-message:
prefix: "ci: "

117
.github/generate_release.py vendored Normal file
View file

@ -0,0 +1,117 @@
#!/usr/bin/env python
"""
generate_release.py
This script is used to generate the release.yml file as per
https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes
"""
import yaml
SCOPES = [
"anta",
"anta.tests",
"anta.cli",
]
# CI and Test are excluded from Release Notes
CATEGORIES = {
"feat": "Features",
"fix": "Bug Fixes",
"cut": "Cut",
"doc": "Documentation",
# "CI": "CI",
"bump": "Bump",
# "test": "Test",
"revert": "Revert",
"refactor": "Refactoring",
}
class SafeDumper(yaml.SafeDumper):
"""
Make yamllint happy
https://github.com/yaml/pyyaml/issues/234#issuecomment-765894586
"""
# pylint: disable=R0901,W0613,W1113
def increase_indent(self, flow=False, *args, **kwargs):
return super().increase_indent(flow=flow, indentless=False)
if __name__ == "__main__":
exclude_list = []
categories_list = []
# First add exclude labels
for scope in SCOPES:
exclude_list.append(f"rn: test({scope})")
exclude_list.append(f"rn: ci({scope})")
exclude_list.extend(["rn: test", "rn: ci"])
# Then add the categories
# First add Breaking Changes
breaking_label_categories = ["feat", "fix", "cut", "revert", "refactor", "bump"]
breaking_labels = [f"rn: {cc_type}({scope})!" for cc_type in breaking_label_categories for scope in SCOPES]
breaking_labels.extend([f"rn: {cc_type}!" for cc_type in breaking_label_categories])
categories_list.append(
{
"title": "Breaking Changes",
"labels": breaking_labels,
}
)
# Add new features
feat_labels = [f"rn: feat({scope})" for scope in SCOPES]
feat_labels.append("rn: feat")
categories_list.append(
{
"title": "New features and enhancements",
"labels": feat_labels,
}
)
# Add fixes
fixes_labels = [f"rn: fix({scope})" for scope in SCOPES]
fixes_labels.append("rn: fix")
categories_list.append(
{
"title": "Fixed issues",
"labels": fixes_labels,
}
)
# Add Documentation
doc_labels = [f"rn: doc({scope})" for scope in SCOPES]
doc_labels.append("rn: doc")
categories_list.append(
{
"title": "Documentation",
"labels": doc_labels,
}
)
# Add the catch all
categories_list.append(
{
"title": "Other Changes",
"labels": ["*"],
}
)
with open(r"release.yml", "w", encoding="utf-8") as release_file:
yaml.dump(
{
"changelog": {
"exclude": {"labels": exclude_list},
"categories": categories_list,
}
},
release_file,
Dumper=SafeDumper,
sort_keys=False,
)

3
.github/license-short.txt vendored Normal file
View file

@ -0,0 +1,3 @@
Copyright (c) 2023 Arista Networks, Inc.
Use of this source code is governed by the Apache License 2.0
that can be found in the LICENSE file.

16
.github/pull_request_template.md vendored Normal file
View file

@ -0,0 +1,16 @@
# Description
<!-- PR description !-->
Fixes # (issue id)
# Checklist:
<!-- Delete not relevant items !-->
- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my code
- [ ] I have run pre-commit for code linting and typing (`pre-commit run`)
- [ ] I have made corresponding changes to the documentation
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes (`tox -e testenv`)

103
.github/release.md vendored Normal file
View file

@ -0,0 +1,103 @@
# Notes
Notes regarding how to release anta package
## Package requirements
- `bumpver`
- `build`
- `twine`
Also, [Github CLI](https://cli.github.com/) can be helpful and is recommended
## Bumping version
In a branch specific for this, use the `bumpver` tool.
It is configured to update:
* pyproject.toml
* docs/contribution.md
* docs/requirements-and-installation.md
For instance to bump a patch version:
```
bumpver update --patch
```
and for a minor version
```
bumpver update --minor
```
Tip: It is possible to check what the changes would be using `--dry`
```
bumpver update --minor --dry
```
## Creating release on Github
Create the release on Github with the appropriate tag `vx.x.x`
## Release version `x.x.x`
> [!IMPORTANT]
> TODO - make this a github workflow
`x.x.x` is the version to be released
This is to be executed at the top of the repo
1. Checkout the latest version of `main` with the correct tag for the release
2. Create a new branch for release
```bash
git switch -c rel/vx.x.x
```
3. [Optional] Clean dist if required
4. Build the package locally
```bash
python -m build
```
5. Check the package with `twine` (replace with your vesion)
```bash
twine check dist/*
```
6. Upload the package to test.pypi
```bash
twine upload -r testpypi dist/anta-x.x.x.*
```
7. Verify the package by installing it in a local venv and checking it installs
and run correctly (run the tests)
```bash
# In a brand new venv
pip install -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple --no-cache anta
```
8. Push to anta repository and create a Pull Request
```bash
git push origin HEAD
gh pr create --title 'bump: ANTA vx.x.x'
```
9. Merge PR after review and wait for [workflow](https://github.com/arista-netdevops-community/anta/actions/workflows/release.yml) to be executed.
```bash
gh pr merge --squash
```
10. Like 7 but for normal pypi
```bash
# In a brand new venv
pip install anta
```
11. Test installed version
```bash
anta --version
```

59
.github/release.yml vendored Normal file
View file

@ -0,0 +1,59 @@
changelog:
exclude:
labels:
- 'rn: test(anta)'
- 'rn: ci(anta)'
- 'rn: test(anta.tests)'
- 'rn: ci(anta.tests)'
- 'rn: test(anta.cli)'
- 'rn: ci(anta.cli)'
- 'rn: test'
- 'rn: ci'
categories:
- title: Breaking Changes
labels:
- 'rn: feat(anta)!'
- 'rn: feat(anta.tests)!'
- 'rn: feat(anta.cli)!'
- 'rn: fix(anta)!'
- 'rn: fix(anta.tests)!'
- 'rn: fix(anta.cli)!'
- 'rn: cut(anta)!'
- 'rn: cut(anta.tests)!'
- 'rn: cut(anta.cli)!'
- 'rn: revert(anta)!'
- 'rn: revert(anta.tests)!'
- 'rn: revert(anta.cli)!'
- 'rn: refactor(anta)!'
- 'rn: refactor(anta.tests)!'
- 'rn: refactor(anta.cli)!'
- 'rn: bump(anta)!'
- 'rn: bump(anta.tests)!'
- 'rn: bump(anta.cli)!'
- 'rn: feat!'
- 'rn: fix!'
- 'rn: cut!'
- 'rn: revert!'
- 'rn: refactor!'
- 'rn: bump!'
- title: New features and enhancements
labels:
- 'rn: feat(anta)'
- 'rn: feat(anta.tests)'
- 'rn: feat(anta.cli)'
- 'rn: feat'
- title: Fixed issues
labels:
- 'rn: fix(anta)'
- 'rn: fix(anta.tests)'
- 'rn: fix(anta.cli)'
- 'rn: fix'
- title: Documentation
labels:
- 'rn: doc(anta)'
- 'rn: doc(anta.tests)'
- 'rn: doc(anta.cli)'
- 'rn: doc'
- title: Other Changes
labels:
- '*'

144
.github/workflows/code-testing.yml vendored Normal file
View file

@ -0,0 +1,144 @@
---
name: Linting and Testing Anta
on:
push:
branches:
- main
pull_request:
jobs:
file-changes:
runs-on: ubuntu-latest
outputs:
code: ${{ steps.filter.outputs.code }}
docs: ${{ steps.filter.outputs.docs }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@v3
id: filter
with:
filters: |
code:
- 'anta/*'
- 'anta/**'
- 'tests/*'
- 'tests/**'
core:
- 'anta/*'
- 'anta/reporter/*'
- 'anta/result_manager/*'
- 'anta/tools/*'
cli:
- 'anta/cli/*'
- 'anta/cli/**'
tests:
- 'anta/tests/*'
- 'anta/tests/**'
docs:
- '.github/workflows/pull-request-management.yml'
- 'mkdocs.yml'
- 'docs/*'
- 'docs/**'
- 'README.md'
check-requirements:
runs-on: ubuntu-20.04
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
needs: file-changes
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: install requirements
run: |
pip install .
- name: install dev requirements
run: pip install .[dev]
missing-documentation:
name: "Warning documentation is missing"
runs-on: ubuntu-20.04
needs: [file-changes]
if: needs.file-changes.outputs.cli == 'true' && needs.file-changes.outputs.docs == 'false'
steps:
- name: Documentation is missing
uses: GrantBirki/comment@v2.0.9
with:
body: |
Please consider that documentation is missing under `docs/` folder.
You should update documentation to reflect your change, or maybe not :)
lint-yaml:
name: Run linting for yaml files
runs-on: ubuntu-20.04
needs: [file-changes, check-requirements]
if: needs.file-changes.outputs.code == 'true'
steps:
- uses: actions/checkout@v4
- name: yaml-lint
uses: ibiqlik/action-yamllint@v3
with:
config_file: .yamllint.yml
file_or_dir: .
lint-python:
name: Run isort, black, flake8 and pylint
runs-on: ubuntu-20.04
needs: file-changes
if: needs.file-changes.outputs.code == 'true'
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install dependencies
run: pip install tox
- name: "Run tox linting environment"
run: tox -e lint
type-python:
name: Run mypy
runs-on: ubuntu-20.04
needs: file-changes
if: needs.file-changes.outputs.code == 'true'
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install dependencies
run: pip install tox
- name: "Run tox typing environment"
run: tox -e type
test-python:
name: Pytest across all supported python versions
runs-on: ubuntu-20.04
needs: [lint-python, type-python]
strategy:
matrix:
python: ["3.8", "3.9", "3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python }}
- name: Install dependencies
run: pip install tox tox-gh-actions
- name: "Run pytest via tox for ${{ matrix.python }}"
run: tox
test-documentation:
name: Build offline documentation for testing
runs-on: ubuntu-20.04
needs: [lint-python, type-python, test-python]
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install dependencies
run: pip install .[doc]
- name: "Build mkdocs documentation offline"
run: mkdocs build

37
.github/workflows/main-doc.yml vendored Normal file
View file

@ -0,0 +1,37 @@
---
# This is deploying the latest commits on main to main documentation
name: Mkdocs
on:
push:
branches:
- main
paths:
# Run only if any of the following paths are changed when pushing to main
# May need to update this
- "docs/**"
- "mkdocs.yml"
workflow_dispatch:
jobs:
'build_latest_doc':
name: 'Update Public main documentation'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: 'Setup Python 3 on runner'
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Setup Git config
run: |
git config --global user.name 'github-actions[bot]'
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
- name: 'Build mkdocs content and deploy to gh-pages to main'
run: |
pip install .[doc]
mike deploy --push main

49
.github/workflows/on-demand.yml vendored Normal file
View file

@ -0,0 +1,49 @@
name: 'Build docker on-demand'
on:
workflow_dispatch:
inputs:
tag:
description: 'docker container tag'
required: true
type: string
default: 'dev'
jobs:
docker:
name: Docker Image Build
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- linux/amd64
- linux/arm64
- linux/arm/v7
- linux/arm/v8
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Docker meta for TAG
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
tags: |
type=raw,value=${{ inputs.tag }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile
push: true
platforms: linux/amd64
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

18
.github/workflows/pr-conflicts.yml vendored Normal file
View file

@ -0,0 +1,18 @@
name: "PR Conflicts checker"
on:
pull_request_target:
types: [synchronize]
jobs:
Conflict_Check:
name: 'Check PR status: conflicts and resolution'
runs-on: ubuntu-latest
steps:
- name: check if PRs are dirty
uses: eps1lon/actions-label-merge-conflict@releases/2.x
with:
dirtyLabel: "state: conflict"
removeOnDirtyLabel: "state: conflict resolved"
repoToken: "${{ secrets.GITHUB_TOKEN }}"
commentOnDirty: "This pull request has conflicts, please resolve those before we can evaluate the pull request."
commentOnClean: "Conflicts have been resolved. A maintainer will review the pull request shortly."

73
.github/workflows/pr-triage.yml vendored Normal file
View file

@ -0,0 +1,73 @@
name: "Pull Request Triage"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
jobs:
assign_author:
name: "Assign Author to PR"
# https://github.com/marketplace/actions/auto-author-assign
runs-on: ubuntu-latest
steps:
- uses: toshimaru/auto-author-assign@v2.1.0
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
check_pr_semantic:
runs-on: ubuntu-latest
steps:
# Please look up the latest version from
# https://github.com/amannn/action-semantic-pull-request/releases
- uses: amannn/action-semantic-pull-request@v5.4.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
# Configure which types are allowed.
# Default: https://github.com/commitizen/conventional-commit-types
# Updated as part of PR 1930
types: |
feat
fix
cut
doc
ci
bump
test
refactor
revert
make
chore
# Configure which scopes are allowed.
scopes: |
anta
anta.tests
anta.cli
# Configure that a scope must always be provided.
requireScope: false
# Configure additional validation for the subject based on a regex.
# This example ensures the subject doesn't start with an uppercase character.
# subjectPattern: ^(?![A-Z]).+$
# If `subjectPattern` is configured, you can use this property to override
# the default error message that is shown when the pattern doesn't match.
# The variables `subject` and `title` can be used within the message.
subjectPatternError: |
The subject "{subject}" found in the pull request title "{title}"
didn't match the configured pattern. Please ensure that the subject
doesn't start with an uppercase character.
# When using "Squash and merge" on a PR with only one commit, GitHub
# will suggest using that commit message instead of the PR title for the
# merge commit, and it's easy to commit this by mistake. Enable this option
# to also validate the commit message for one commit PRs.
# Update 13-Jul-2022 CH: GitHub now offers a toggle for this behavior.
# We have set that to always use the PR title, so this check is no longer needed.
validateSingleCommit: false
# Related to `validateSingleCommit` you can opt-in to validate that the PR
# title matches a single commit to avoid confusion.
validateSingleCommitMatchesPrTitle: true
ignoreLabels: |
bot
ignore-semantic-pull-request

View file

@ -0,0 +1,27 @@
# This workflow is triggered after a PR is merged or when the title of a PR is
# changed post merge
name: "Label for Release Notes"
on:
pull_request_target:
types:
- closed
- edited # interested in post merge title changes
jobs:
###################################################
# Assign labels on merge to generate Release Notes
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-when-a-pull-request-merges
###################################################
if_merged:
name: "PR was merged"
if: (github.event.pull_request.merged == true) && ( github.event.action == 'closed' || (github.event.action == 'edited' && github.event.changes.title != null) )
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/rn-pr-labeler-action
with:
auto_create_label: true

110
.github/workflows/release.yml vendored Normal file
View file

@ -0,0 +1,110 @@
---
name: "Tag & Release management"
on:
release:
types:
- published
jobs:
pypi:
name: Publish version to Pypi servers
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools wheel build
- name: Build package
run: |
python -m build
- name: Publish package to Pypi
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_API_TOKEN }}
release-coverage:
name: Updated ANTA release coverage badge
runs-on: ubuntu-20.04
needs: [pypi]
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install dependencies
run: pip install genbadge[coverage] tox tox-gh-actions
- name: "Run pytest via tox for ${{ matrix.python }}"
run: tox
- name: Generate coverage badge
run: genbadge coverage -i .coverage.xml -o badge/latest-release-coverage.svg
- name: Publish coverage badge to gh-pages branch
uses: JamesIves/github-pages-deploy-action@v4
with:
branch: coverage-badge
folder: badge
release-doc:
name: "Publish documentation for release ${{github.ref_name}}"
runs-on: ubuntu-latest
needs: [release-coverage]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: 'Setup Python 3 on runner'
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Setup Git config
run: |
git config --global user.name 'github-actions[bot]'
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
- name: 'Build mkdocs content to site folder'
run: |
pip install .[doc]
mike deploy --update-alias --push ${{github.ref_name}} stable
docker:
name: Docker Image Build
runs-on: ubuntu-latest
needs: [pypi]
strategy:
matrix:
platform:
- linux/amd64
- linux/arm64
- linux/arm/v7
- linux/arm/v8
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Docker meta for TAG
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
tags: |
type=semver,pattern={{version}}
type=raw,value=latest
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile
push: true
platforms: linux/amd64
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

114
.gitignore vendored Normal file
View file

@ -0,0 +1,114 @@
__pycache__
*.pyc
.pages
.coverage
.pytest_cache
build
dist
*.egg-info
scripts/test*.py
examples/tests_*
.personal/*
*.env
*.swp
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
./lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
.flake8
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
report.html
# Sphinx documentation
docs/_build/
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
poetry.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
.envrc
# mkdocs documentation
/site
# VScode settings
.vscode
test.env
tech-support/
tech-support/*
2*
**/report.html
.*report.html
# direnv file
.envrc
clab-atd-anta/*
clab-atd-anta/

105
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,105 @@
---
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
files: ^(anta|docs|scripts|tests)/
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-added-large-files
- id: check-merge-conflict
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.4
hooks:
- name: Check and insert license on Python files
id: insert-license
# exclude:
files: .*\.py$
args:
- --license-filepath
- .github/license-short.txt
- --use-current-year
- --allow-past-years
- --fuzzy-match-generates-todo
- --no-extra-eol
- name: Check and insert license on Markdown files
id: insert-license
files: .*\.md$
# exclude:
args:
- --license-filepath
- .github/license-short.txt
- --use-current-year
- --allow-past-years
- --fuzzy-match-generates-todo
- --comment-style
- '<!--| ~| -->'
- repo: https://github.com/pycqa/isort
rev: 5.13.2
hooks:
- id: isort
name: Check for changes when running isort on all python files
- repo: https://github.com/psf/black
rev: 24.1.1
hooks:
- id: black
name: Check for changes when running Black on all python files
- repo: https://github.com/pycqa/flake8
rev: 7.0.0
hooks:
- id: flake8
name: Check for PEP8 error on Python files
args:
- --config=/dev/null
- --max-line-length=165
- repo: local # as per https://pylint.pycqa.org/en/latest/user_guide/installation/pre-commit-integration.html
hooks:
- id: pylint
entry: pylint
language: python
name: Check for Linting error on Python files
description: This hook runs pylint.
types: [python]
args:
- -rn # Only display messages
- -sn # Don't display the score
- --rcfile=pylintrc # Link to config file
# Prepare to turn on ruff
# - repo: https://github.com/astral-sh/ruff-pre-commit
# # Ruff version.
# rev: v0.0.280
# hooks:
# - id: ruff
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.7.1
hooks:
- id: mypy
args:
- --config-file=pyproject.toml
additional_dependencies:
- "aio-eapi==0.3.0"
- "click==8.1.3"
- "click-help-colors==0.9.1"
- "cvprac~=1.3"
- "netaddr==0.8.0"
- "pydantic~=2.0"
- "PyYAML==6.0"
- "requests>=2.27"
- "rich~=13.4"
- "asyncssh==2.13.1"
- "Jinja2==3.1.2"
- types-PyYAML
- types-paramiko
- types-requests
files: ^(anta|tests)/

61
.sourcery.yaml Normal file
View file

@ -0,0 +1,61 @@
# 🪄 This is your project's Sourcery configuration file.
# You can use it to get Sourcery working in the way you want, such as
# ignoring specific refactorings, skipping directories in your project,
# or writing custom rules.
# 📚 For a complete reference to this file, see the documentation at
# https://docs.sourcery.ai/Configuration/Project-Settings/
# This file was auto-generated by Sourcery on 2022-07-29 at 10:15.
version: '1' # The schema version of this config file
ignore: # A list of paths or files which Sourcery will ignore.
- .git
- venv
- .venv
- env
- .env
refactor:
include: []
skip: [] # A list of rule IDs Sourcery will never suggest.
rule_types:
- refactoring
- suggestion
- comment
python_version: '3.7' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version.
# rules: # A list of custom rules Sourcery will include in its analysis.
# - id: no-print-statements
# description: Disallows print statements anywhere in code.
# pattern: print
# replacement:
# explanation:
# paths:
# include:
# - test
# exclude:
# - conftest.py
# tests: []
# metrics:
# quality_threshold: 25.0
# github:
# labels: []
# ignore_labels:
# - sourcery-ignore
# request_review: author
# sourcery_branch: sourcery/{base_branch}
# clone_detection:
# min_lines: 3
# min_duplicates: 2
# identical_clones_only: false
# proxy:
# url:
# ssl_certs_file:
# no_ssl_verify: false

30
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,30 @@
{
"black-formatter.importStrategy": "fromEnvironment",
"pylint.importStrategy": "fromEnvironment",
"pylint.args": [
"--rcfile=pylintrc"
],
"flake8.importStrategy": "fromEnvironment",
"flake8.args": [
"--config=/dev/null",
"--max-line-length=165"
],
"mypy-type-checker.importStrategy": "fromEnvironment",
"mypy-type-checker.args": [
"--config-file=pyproject.toml"
],
"pylint.severity": {
"refactor": "Warning"
},
"pylint.args": [
"--load-plugins pylint_pydantic",
"--rcfile=pylintrc"
],
"python.testing.pytestArgs": [
"tests"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"isort.importStrategy": "fromEnvironment",
"isort.check": true,
}

4
.yamllint.yml Normal file
View file

@ -0,0 +1,4 @@
rules:
line-length:
max: 350
truthy: disable

46
Dockerfile Normal file
View file

@ -0,0 +1,46 @@
ARG PYTHON_VER=3.9
ARG IMG_OPTION=alpine
### BUILDER
FROM python:${PYTHON_VER}-${IMG_OPTION} as BUILDER
RUN pip install --upgrade pip
WORKDIR /local
COPY . /local
ENV PYTHONPATH=/local
ENV PATH=$PATH:/root/.local/bin
RUN pip --no-cache-dir install --user .
# ----------------------------------- #
### BASE
FROM python:${PYTHON_VER}-${IMG_OPTION} as BASE
# Opencontainer labels
# Labels version and revision will be updating
# during the CI with accurate information
# To configure version and revision, you can use:
# docker build --label org.opencontainers.image.version=<your version> -t ...
# Doc: https://docs.docker.com/engine/reference/commandline/run/#label
LABEL "org.opencontainers.image.title"="anta" \
"org.opencontainers.artifact.description"="network-test-automation in a Python package and Python scripts to test Arista devices." \
"org.opencontainers.image.description"="network-test-automation in a Python package and Python scripts to test Arista devices." \
"org.opencontainers.image.source"="https://github.com/arista-netdevops-community/anta" \
"org.opencontainers.image.url"="https://www.anta.ninja" \
"org.opencontainers.image.documentation"="https://www.anta.ninja" \
"org.opencontainers.image.licenses"="Apache-2.0" \
"org.opencontainers.image.vendor"="The anta contributors." \
"org.opencontainers.image.authors"="Khelil Sator, Angélique Phillipps, Colin MacGiollaEáin, Matthieu Tache, Onur Gashi, Paul Lavelle, Guillaume Mulocher, Thomas Grimonet" \
"org.opencontainers.image.base.name"="python" \
"org.opencontainers.image.revision"="dev" \
"org.opencontainers.image.version"="dev"
COPY --from=BUILDER /root/.local/ /root/.local
ENV PATH=$PATH:/root/.local/bin
ENTRYPOINT [ "/root/.local/bin/anta" ]

201
LICENSE Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Arista Networks
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

47
anta/__init__.py Normal file
View file

@ -0,0 +1,47 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Arista Network Test Automation (ANTA) Framework."""
import importlib.metadata
import os
__version__ = f"v{importlib.metadata.version('anta')}"
__credits__ = [
"Angélique Phillipps",
"Colin MacGiollaEáin",
"Khelil Sator",
"Matthieu Tâche",
"Onur Gashi",
"Paul Lavelle",
"Guillaume Mulocher",
"Thomas Grimonet",
]
__copyright__ = "Copyright 2022, Arista EMEA AS"
# Global ANTA debug mode environment variable
__DEBUG__ = bool(os.environ.get("ANTA_DEBUG", "").lower() == "true")
# Source: https://rich.readthedocs.io/en/stable/appendix/colors.html
# pylint: disable=R0903
class RICH_COLOR_PALETTE:
"""Color code for text rendering."""
ERROR = "indian_red"
FAILURE = "bold red"
SUCCESS = "green4"
SKIPPED = "bold orange4"
HEADER = "cyan"
UNSET = "grey74"
# Dictionary to use in a Rich.Theme: custom_theme = Theme(RICH_COLOR_THEME)
RICH_COLOR_THEME = {
"success": RICH_COLOR_PALETTE.SUCCESS,
"skipped": RICH_COLOR_PALETTE.SKIPPED,
"failure": RICH_COLOR_PALETTE.FAILURE,
"error": RICH_COLOR_PALETTE.ERROR,
"unset": RICH_COLOR_PALETTE.UNSET,
}
GITHUB_SUGGESTION = "Please reach out to the maintainer team or open an issue on Github: https://github.com/arista-netdevops-community/anta."

108
anta/aioeapi.py Normal file
View file

@ -0,0 +1,108 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Patch for aioeapi waiting for https://github.com/jeremyschulman/aio-eapi/pull/13"""
from __future__ import annotations
from typing import Any, AnyStr
import aioeapi
Device = aioeapi.Device
class EapiCommandError(RuntimeError):
"""
Exception class for EAPI command errors
Attributes
----------
failed: str - the failed command
errmsg: str - a description of the failure reason
errors: list[str] - the command failure details
passed: list[dict] - a list of command results of the commands that passed
not_exec: list[str] - a list of commands that were not executed
"""
# pylint: disable=too-many-arguments
def __init__(self, failed: str, errors: list[str], errmsg: str, passed: list[str | dict[str, Any]], not_exec: list[dict[str, Any]]):
"""Initializer for the EapiCommandError exception"""
self.failed = failed
self.errmsg = errmsg
self.errors = errors
self.passed = passed
self.not_exec = not_exec
super().__init__()
def __str__(self) -> str:
"""returns the error message associated with the exception"""
return self.errmsg
aioeapi.EapiCommandError = EapiCommandError
async def jsonrpc_exec(self, jsonrpc: dict) -> list[dict | AnyStr]: # type: ignore
"""
Execute the JSON-RPC dictionary object.
Parameters
----------
jsonrpc: dict
The JSON-RPC as created by the `meth`:jsonrpc_command().
Raises
------
EapiCommandError
In the event that a command resulted in an error response.
Returns
-------
The list of command results; either dict or text depending on the
JSON-RPC format pameter.
"""
res = await self.post("/command-api", json=jsonrpc)
res.raise_for_status()
body = res.json()
commands = jsonrpc["params"]["cmds"]
ofmt = jsonrpc["params"]["format"]
get_output = (lambda _r: _r["output"]) if ofmt == "text" else (lambda _r: _r)
# if there are no errors then return the list of command results.
if (err_data := body.get("error")) is None:
return [get_output(cmd_res) for cmd_res in body["result"]]
# ---------------------------------------------------------------------
# if we are here, then there were some command errors. Raise a
# EapiCommandError exception with args (commands that failed, passed,
# not-executed).
# ---------------------------------------------------------------------
# -------------------------- eAPI specification ----------------------
# On an error, no result object is present, only an error object, which
# is guaranteed to have the following attributes: code, messages, and
# data. Similar to the result object in the successful response, the
# data object is a list of objects corresponding to the results of all
# commands up to, and including, the failed command. If there was a an
# error before any commands were executed (e.g. bad credentials), data
# will be empty. The last object in the data array will always
# correspond to the failed command. The command failure details are
# always stored in the errors array.
cmd_data = err_data["data"]
len_data = len(cmd_data)
err_at = len_data - 1
err_msg = err_data["message"]
raise EapiCommandError(
passed=[get_output(cmd_data[cmd_i]) for cmd_i, cmd in enumerate(commands[:err_at])],
failed=commands[err_at]["cmd"],
errors=cmd_data[err_at]["errors"],
errmsg=err_msg,
not_exec=commands[err_at + 1 :], # noqa: E203
)
aioeapi.Device.jsonrpc_exec = jsonrpc_exec

291
anta/catalog.py Normal file
View file

@ -0,0 +1,291 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Catalog related functions
"""
from __future__ import annotations
import importlib
import logging
from inspect import isclass
from pathlib import Path
from types import ModuleType
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from pydantic import BaseModel, ConfigDict, RootModel, ValidationError, ValidationInfo, field_validator, model_validator
from pydantic.types import ImportString
from yaml import YAMLError, safe_load
from anta.logger import anta_log_exception
from anta.models import AntaTest
logger = logging.getLogger(__name__)
# { <module_name> : [ { <test_class_name>: <input_as_dict_or_None> }, ... ] }
RawCatalogInput = Dict[str, List[Dict[str, Optional[Dict[str, Any]]]]]
# [ ( <AntaTest class>, <input_as AntaTest.Input or dict or None > ), ... ]
ListAntaTestTuples = List[Tuple[Type[AntaTest], Optional[Union[AntaTest.Input, Dict[str, Any]]]]]
class AntaTestDefinition(BaseModel):
"""
Define a test with its associated inputs.
test: An AntaTest concrete subclass
inputs: The associated AntaTest.Input subclass instance
"""
model_config = ConfigDict(frozen=True)
test: Type[AntaTest]
inputs: AntaTest.Input
def __init__(self, **data: Any) -> None:
"""
Inject test in the context to allow to instantiate Input in the BeforeValidator
https://docs.pydantic.dev/2.0/usage/validators/#using-validation-context-with-basemodel-initialization
"""
self.__pydantic_validator__.validate_python(
data,
self_instance=self,
context={"test": data["test"]},
)
super(BaseModel, self).__init__()
@field_validator("inputs", mode="before")
@classmethod
def instantiate_inputs(cls, data: AntaTest.Input | dict[str, Any] | None, info: ValidationInfo) -> AntaTest.Input:
"""
If the test has no inputs, allow the user to omit providing the `inputs` field.
If the test has inputs, allow the user to provide a valid dictionary of the input fields.
This model validator will instantiate an Input class from the `test` class field.
"""
if info.context is None:
raise ValueError("Could not validate inputs as no test class could be identified")
# Pydantic guarantees at this stage that test_class is a subclass of AntaTest because of the ordering
# of fields in the class definition - so no need to check for this
test_class = info.context["test"]
if not (isclass(test_class) and issubclass(test_class, AntaTest)):
raise ValueError(f"Could not validate inputs as no test class {test_class} is not a subclass of AntaTest")
if data is None:
return test_class.Input()
if isinstance(data, AntaTest.Input):
return data
if isinstance(data, dict):
return test_class.Input(**data)
raise ValueError(f"Coud not instantiate inputs as type {type(data).__name__} is not valid")
@model_validator(mode="after")
def check_inputs(self) -> "AntaTestDefinition":
"""
The `inputs` class attribute needs to be an instance of the AntaTest.Input subclass defined in the class `test`.
"""
if not isinstance(self.inputs, self.test.Input):
raise ValueError(f"Test input has type {self.inputs.__class__.__qualname__} but expected type {self.test.Input.__qualname__}")
return self
class AntaCatalogFile(RootModel[Dict[ImportString[Any], List[AntaTestDefinition]]]): # pylint: disable=too-few-public-methods
"""
This model represents an ANTA Test Catalog File.
A valid test catalog file must have the following structure:
<Python module>:
- <AntaTest subclass>:
<AntaTest.Input compliant dictionary>
"""
root: Dict[ImportString[Any], List[AntaTestDefinition]]
@model_validator(mode="before")
@classmethod
def check_tests(cls, data: Any) -> Any:
"""
Allow the user to provide a Python data structure that only has string values.
This validator will try to flatten and import Python modules, check if the tests classes
are actually defined in their respective Python module and instantiate Input instances
with provided value to validate test inputs.
"""
def flatten_modules(data: dict[str, Any], package: str | None = None) -> dict[ModuleType, list[Any]]:
"""
Allow the user to provide a data structure with nested Python modules.
Example:
```
anta.tests.routing:
generic:
- <AntaTestDefinition>
bgp:
- <AntaTestDefinition>
```
`anta.tests.routing.generic` and `anta.tests.routing.bgp` are importable Python modules.
"""
modules: dict[ModuleType, list[Any]] = {}
for module_name, tests in data.items():
if package and not module_name.startswith("."):
module_name = f".{module_name}"
try:
module: ModuleType = importlib.import_module(name=module_name, package=package)
except Exception as e: # pylint: disable=broad-exception-caught
# A test module is potentially user-defined code.
# We need to catch everything if we want to have meaningful logs
module_str = f"{module_name[1:] if module_name.startswith('.') else module_name}{f' from package {package}' if package else ''}"
message = f"Module named {module_str} cannot be imported. Verify that the module exists and there is no Python syntax issues."
anta_log_exception(e, message, logger)
raise ValueError(message) from e
if isinstance(tests, dict):
# This is an inner Python module
modules.update(flatten_modules(data=tests, package=module.__name__))
else:
if not isinstance(tests, list):
raise ValueError(f"Syntax error when parsing: {tests}\nIt must be a list of ANTA tests. Check the test catalog.")
# This is a list of AntaTestDefinition
modules[module] = tests
return modules
if isinstance(data, dict):
typed_data: dict[ModuleType, list[Any]] = flatten_modules(data)
for module, tests in typed_data.items():
test_definitions: list[AntaTestDefinition] = []
for test_definition in tests:
if not isinstance(test_definition, dict):
raise ValueError(f"Syntax error when parsing: {test_definition}\nIt must be a dictionary. Check the test catalog.")
if len(test_definition) != 1:
raise ValueError(
f"Syntax error when parsing: {test_definition}\nIt must be a dictionary with a single entry. Check the indentation in the test catalog."
)
for test_name, test_inputs in test_definition.copy().items():
test: type[AntaTest] | None = getattr(module, test_name, None)
if test is None:
raise ValueError(
f"{test_name} is not defined in Python module {module.__name__}{f' (from {module.__file__})' if module.__file__ is not None else ''}"
)
test_definitions.append(AntaTestDefinition(test=test, inputs=test_inputs))
typed_data[module] = test_definitions
return typed_data
class AntaCatalog:
"""
Class representing an ANTA Catalog.
It can be instantiated using its contructor or one of the static methods: `parse()`, `from_list()` or `from_dict()`
"""
def __init__(self, tests: list[AntaTestDefinition] | None = None, filename: str | Path | None = None) -> None:
"""
Constructor of AntaCatalog.
Args:
tests: A list of AntaTestDefinition instances.
filename: The path from which the catalog is loaded.
"""
self._tests: list[AntaTestDefinition] = []
if tests is not None:
self._tests = tests
self._filename: Path | None = None
if filename is not None:
if isinstance(filename, Path):
self._filename = filename
else:
self._filename = Path(filename)
@property
def filename(self) -> Path | None:
"""Path of the file used to create this AntaCatalog instance"""
return self._filename
@property
def tests(self) -> list[AntaTestDefinition]:
"""List of AntaTestDefinition in this catalog"""
return self._tests
@tests.setter
def tests(self, value: list[AntaTestDefinition]) -> None:
if not isinstance(value, list):
raise ValueError("The catalog must contain a list of tests")
for t in value:
if not isinstance(t, AntaTestDefinition):
raise ValueError("A test in the catalog must be an AntaTestDefinition instance")
self._tests = value
@staticmethod
def parse(filename: str | Path) -> AntaCatalog:
"""
Create an AntaCatalog instance from a test catalog file.
Args:
filename: Path to test catalog YAML file
"""
try:
with open(file=filename, mode="r", encoding="UTF-8") as file:
data = safe_load(file)
except (TypeError, YAMLError, OSError) as e:
message = f"Unable to parse ANTA Test Catalog file '{filename}'"
anta_log_exception(e, message, logger)
raise
return AntaCatalog.from_dict(data, filename=filename)
@staticmethod
def from_dict(data: RawCatalogInput, filename: str | Path | None = None) -> AntaCatalog:
"""
Create an AntaCatalog instance from a dictionary data structure.
See RawCatalogInput type alias for details.
It is the data structure returned by `yaml.load()` function of a valid
YAML Test Catalog file.
Args:
data: Python dictionary used to instantiate the AntaCatalog instance
filename: value to be set as AntaCatalog instance attribute
"""
tests: list[AntaTestDefinition] = []
if data is None:
logger.warning("Catalog input data is empty")
return AntaCatalog(filename=filename)
if not isinstance(data, dict):
raise ValueError(f"Wrong input type for catalog data{f' (from {filename})' if filename is not None else ''}, must be a dict, got {type(data).__name__}")
try:
catalog_data = AntaCatalogFile(**data) # type: ignore[arg-type]
except ValidationError as e:
anta_log_exception(e, f"Test catalog is invalid!{f' (from {filename})' if filename is not None else ''}", logger)
raise
for t in catalog_data.root.values():
tests.extend(t)
return AntaCatalog(tests, filename=filename)
@staticmethod
def from_list(data: ListAntaTestTuples) -> AntaCatalog:
"""
Create an AntaCatalog instance from a list data structure.
See ListAntaTestTuples type alias for details.
Args:
data: Python list used to instantiate the AntaCatalog instance
"""
tests: list[AntaTestDefinition] = []
try:
tests.extend(AntaTestDefinition(test=test, inputs=inputs) for test, inputs in data)
except ValidationError as e:
anta_log_exception(e, "Test catalog is invalid!", logger)
raise
return AntaCatalog(tests)
def get_tests_by_tags(self, tags: list[str], strict: bool = False) -> list[AntaTestDefinition]:
"""
Return all the tests that have matching tags in their input filters.
If strict=True, returns only tests that match all the tags provided as input.
If strict=False, return all the tests that match at least one tag provided as input.
"""
result: list[AntaTestDefinition] = []
for test in self.tests:
if test.inputs.filters and (f := test.inputs.filters.tags):
if (strict and all(t in tags for t in f)) or (not strict and any(t in tags for t in f)):
result.append(test)
return result

72
anta/cli/__init__.py Normal file
View file

@ -0,0 +1,72 @@
#!/usr/bin/env python
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
ANTA CLI
"""
from __future__ import annotations
import logging
import pathlib
import sys
import click
from anta import GITHUB_SUGGESTION, __version__
from anta.cli.check import check as check_command
from anta.cli.debug import debug as debug_command
from anta.cli.exec import exec as exec_command
from anta.cli.get import get as get_command
from anta.cli.nrfu import nrfu as nrfu_command
from anta.cli.utils import AliasedGroup, ExitCode
from anta.logger import Log, LogLevel, anta_log_exception, setup_logging
logger = logging.getLogger(__name__)
@click.group(cls=AliasedGroup)
@click.pass_context
@click.version_option(__version__)
@click.option(
"--log-file",
help="Send the logs to a file. If logging level is DEBUG, only INFO or higher will be sent to stdout.",
show_envvar=True,
type=click.Path(file_okay=True, dir_okay=False, writable=True, path_type=pathlib.Path),
)
@click.option(
"--log-level",
"-l",
help="ANTA logging level",
default=logging.getLevelName(logging.INFO),
show_envvar=True,
show_default=True,
type=click.Choice(
[Log.CRITICAL, Log.ERROR, Log.WARNING, Log.INFO, Log.DEBUG],
case_sensitive=False,
),
)
def anta(ctx: click.Context, log_level: LogLevel, log_file: pathlib.Path) -> None:
"""Arista Network Test Automation (ANTA) CLI"""
ctx.ensure_object(dict)
setup_logging(log_level, log_file)
anta.add_command(nrfu_command)
anta.add_command(check_command)
anta.add_command(exec_command)
anta.add_command(get_command)
anta.add_command(debug_command)
def cli() -> None:
"""Entrypoint for pyproject.toml"""
try:
anta(obj={}, auto_envvar_prefix="ANTA")
except Exception as e: # pylint: disable=broad-exception-caught
anta_log_exception(e, f"Uncaught Exception when running ANTA CLI\n{GITHUB_SUGGESTION}", logger)
sys.exit(ExitCode.INTERNAL_ERROR)
if __name__ == "__main__":
cli()

View file

@ -0,0 +1,17 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Click commands to validate configuration files
"""
import click
from anta.cli.check import commands
@click.group
def check() -> None:
"""Commands to validate configuration files"""
check.add_command(commands.catalog)

View file

@ -0,0 +1,29 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
# pylint: disable = redefined-outer-name
"""
Click commands to validate configuration files
"""
from __future__ import annotations
import logging
import click
from rich.pretty import pretty_repr
from anta.catalog import AntaCatalog
from anta.cli.console import console
from anta.cli.utils import catalog_options
logger = logging.getLogger(__name__)
@click.command
@catalog_options
def catalog(catalog: AntaCatalog) -> None:
"""
Check that the catalog is valid
"""
console.print(f"[bold][green]Catalog is valid: {catalog.filename}")
console.print(pretty_repr(catalog.tests))

14
anta/cli/console.py Normal file
View file

@ -0,0 +1,14 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
ANTA Top-level Console
https://rich.readthedocs.io/en/stable/console.html#console-api
"""
from rich.console import Console
from rich.theme import Theme
from anta import RICH_COLOR_THEME
console = Console(theme=Theme(RICH_COLOR_THEME))

View file

@ -0,0 +1,18 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Click commands to execute EOS commands on remote devices
"""
import click
from anta.cli.debug import commands
@click.group
def debug() -> None:
"""Commands to execute EOS commands on remote devices"""
debug.add_command(commands.run_cmd)
debug.add_command(commands.run_template)

View file

@ -0,0 +1,75 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
# pylint: disable = redefined-outer-name
"""
Click commands to execute EOS commands on remote devices
"""
from __future__ import annotations
import asyncio
import logging
from typing import Literal
import click
from anta.cli.console import console
from anta.cli.debug.utils import debug_options
from anta.cli.utils import ExitCode
from anta.device import AntaDevice
from anta.models import AntaCommand, AntaTemplate
logger = logging.getLogger(__name__)
@click.command
@debug_options
@click.pass_context
@click.option("--command", "-c", type=str, required=True, help="Command to run")
def run_cmd(ctx: click.Context, device: AntaDevice, command: str, ofmt: Literal["json", "text"], version: Literal["1", "latest"], revision: int) -> None:
"""Run arbitrary command to an ANTA device"""
console.print(f"Run command [green]{command}[/green] on [red]{device.name}[/red]")
# I do not assume the following line, but click make me do it
v: Literal[1, "latest"] = version if version == "latest" else 1
c = AntaCommand(command=command, ofmt=ofmt, version=v, revision=revision)
asyncio.run(device.collect(c))
if not c.collected:
console.print(f"[bold red] Command '{c.command}' failed to execute!")
ctx.exit(ExitCode.USAGE_ERROR)
elif ofmt == "json":
console.print(c.json_output)
elif ofmt == "text":
console.print(c.text_output)
@click.command
@debug_options
@click.pass_context
@click.option("--template", "-t", type=str, required=True, help="Command template to run. E.g. 'show vlan {vlan_id}'")
@click.argument("params", required=True, nargs=-1)
def run_template(
ctx: click.Context, device: AntaDevice, template: str, params: list[str], ofmt: Literal["json", "text"], version: Literal["1", "latest"], revision: int
) -> None:
# pylint: disable=too-many-arguments
"""Run arbitrary templated command to an ANTA device.
Takes a list of arguments (keys followed by a value) to build a dictionary used as template parameters.
Example:
anta debug run-template -d leaf1a -t 'show vlan {vlan_id}' vlan_id 1
"""
template_params = dict(zip(params[::2], params[1::2]))
console.print(f"Run templated command [blue]'{template}'[/blue] with [orange]{template_params}[/orange] on [red]{device.name}[/red]")
# I do not assume the following line, but click make me do it
v: Literal[1, "latest"] = version if version == "latest" else 1
t = AntaTemplate(template=template, ofmt=ofmt, version=v, revision=revision)
c = t.render(**template_params) # type: ignore
asyncio.run(device.collect(c))
if not c.collected:
console.print(f"[bold red] Command '{c.command}' failed to execute!")
ctx.exit(ExitCode.USAGE_ERROR)
elif ofmt == "json":
console.print(c.json_output)
elif ofmt == "text":
console.print(c.text_output)

41
anta/cli/debug/utils.py Normal file
View file

@ -0,0 +1,41 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Utils functions to use with anta.cli.debug module.
"""
from __future__ import annotations
import functools
import logging
from typing import Any
import click
from anta.cli.utils import ExitCode, inventory_options
from anta.inventory import AntaInventory
logger = logging.getLogger(__name__)
def debug_options(f: Any) -> Any:
"""Click common options required to execute a command on a specific device"""
@inventory_options
@click.option("--ofmt", type=click.Choice(["json", "text"]), default="json", help="EOS eAPI format to use. can be text or json")
@click.option("--version", "-v", type=click.Choice(["1", "latest"]), default="latest", help="EOS eAPI version")
@click.option("--revision", "-r", type=int, help="eAPI command revision", required=False)
@click.option("--device", "-d", type=str, required=True, help="Device from inventory to use")
@click.pass_context
@functools.wraps(f)
def wrapper(ctx: click.Context, *args: tuple[Any], inventory: AntaInventory, tags: list[str] | None, device: str, **kwargs: dict[str, Any]) -> Any:
# pylint: disable=unused-argument
try:
d = inventory[device]
except KeyError as e:
message = f"Device {device} does not exist in Inventory"
logger.error(e, message)
ctx.exit(ExitCode.USAGE_ERROR)
return f(*args, device=d, **kwargs)
return wrapper

19
anta/cli/exec/__init__.py Normal file
View file

@ -0,0 +1,19 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Click commands to execute various scripts on EOS devices
"""
import click
from anta.cli.exec import commands
@click.group
def exec() -> None: # pylint: disable=redefined-builtin
"""Commands to execute various scripts on EOS devices"""
exec.add_command(commands.clear_counters)
exec.add_command(commands.snapshot)
exec.add_command(commands.collect_tech_support)

78
anta/cli/exec/commands.py Normal file
View file

@ -0,0 +1,78 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Click commands to execute various scripts on EOS devices
"""
from __future__ import annotations
import asyncio
import logging
import sys
from datetime import datetime
from pathlib import Path
import click
from yaml import safe_load
from anta.cli.exec.utils import clear_counters_utils, collect_commands, collect_scheduled_show_tech
from anta.cli.utils import inventory_options
from anta.inventory import AntaInventory
logger = logging.getLogger(__name__)
@click.command
@inventory_options
def clear_counters(inventory: AntaInventory, tags: list[str] | None) -> None:
"""Clear counter statistics on EOS devices"""
asyncio.run(clear_counters_utils(inventory, tags=tags))
@click.command()
@inventory_options
@click.option(
"--commands-list",
"-c",
help="File with list of commands to collect",
required=True,
show_envvar=True,
type=click.Path(file_okay=True, dir_okay=False, exists=True, readable=True, path_type=Path),
)
@click.option(
"--output",
"-o",
show_envvar=True,
type=click.Path(file_okay=False, dir_okay=True, exists=False, writable=True, path_type=Path),
help="Directory to save commands output.",
default=f"anta_snapshot_{datetime.now().strftime('%Y-%m-%d_%H_%M_%S')}",
show_default=True,
)
def snapshot(inventory: AntaInventory, tags: list[str] | None, commands_list: Path, output: Path) -> None:
"""Collect commands output from devices in inventory"""
print(f"Collecting data for {commands_list}")
print(f"Output directory is {output}")
try:
with open(commands_list, "r", encoding="UTF-8") as file:
file_content = file.read()
eos_commands = safe_load(file_content)
except FileNotFoundError:
logger.error(f"Error reading {commands_list}")
sys.exit(1)
asyncio.run(collect_commands(inventory, eos_commands, output, tags=tags))
@click.command()
@inventory_options
@click.option("--output", "-o", default="./tech-support", show_default=True, help="Path for test catalog", type=click.Path(path_type=Path), required=False)
@click.option("--latest", help="Number of scheduled show-tech to retrieve", type=int, required=False)
@click.option(
"--configure",
help="Ensure devices have 'aaa authorization exec default local' configured (required for SCP on EOS). THIS WILL CHANGE THE CONFIGURATION OF YOUR NETWORK.",
default=False,
is_flag=True,
show_default=True,
)
def collect_tech_support(inventory: AntaInventory, tags: list[str] | None, output: Path, latest: int | None, configure: bool) -> None:
"""Collect scheduled tech-support from EOS devices"""
asyncio.run(collect_scheduled_show_tech(inventory, output, configure, tags=tags, latest=latest))

161
anta/cli/exec/utils.py Normal file
View file

@ -0,0 +1,161 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Exec CLI helpers
"""
from __future__ import annotations
import asyncio
import itertools
import json
import logging
import re
from pathlib import Path
from typing import Literal
from aioeapi import EapiCommandError
from httpx import ConnectError, HTTPError
from anta.device import AntaDevice, AsyncEOSDevice
from anta.inventory import AntaInventory
from anta.models import AntaCommand
EOS_SCHEDULED_TECH_SUPPORT = "/mnt/flash/schedule/tech-support"
INVALID_CHAR = "`~!@#$/"
logger = logging.getLogger(__name__)
async def clear_counters_utils(anta_inventory: AntaInventory, tags: list[str] | None = None) -> None:
"""
Clear counters
"""
async def clear(dev: AntaDevice) -> None:
commands = [AntaCommand(command="clear counters")]
if dev.hw_model not in ["cEOSLab", "vEOS-lab"]:
commands.append(AntaCommand(command="clear hardware counter drop"))
await dev.collect_commands(commands=commands)
for command in commands:
if not command.collected:
logger.error(f"Could not clear counters on device {dev.name}: {command.errors}")
logger.info(f"Cleared counters on {dev.name} ({dev.hw_model})")
logger.info("Connecting to devices...")
await anta_inventory.connect_inventory()
devices = anta_inventory.get_inventory(established_only=True, tags=tags).values()
logger.info("Clearing counters on remote devices...")
await asyncio.gather(*(clear(device) for device in devices))
async def collect_commands(
inv: AntaInventory,
commands: dict[str, str],
root_dir: Path,
tags: list[str] | None = None,
) -> None:
"""
Collect EOS commands
"""
async def collect(dev: AntaDevice, command: str, outformat: Literal["json", "text"]) -> None:
outdir = Path() / root_dir / dev.name / outformat
outdir.mkdir(parents=True, exist_ok=True)
safe_command = re.sub(r"(/|\|$)", "_", command)
c = AntaCommand(command=command, ofmt=outformat)
await dev.collect(c)
if not c.collected:
logger.error(f"Could not collect commands on device {dev.name}: {c.errors}")
return
if c.ofmt == "json":
outfile = outdir / f"{safe_command}.json"
content = json.dumps(c.json_output, indent=2)
elif c.ofmt == "text":
outfile = outdir / f"{safe_command}.log"
content = c.text_output
with outfile.open(mode="w", encoding="UTF-8") as f:
f.write(content)
logger.info(f"Collected command '{command}' from device {dev.name} ({dev.hw_model})")
logger.info("Connecting to devices...")
await inv.connect_inventory()
devices = inv.get_inventory(established_only=True, tags=tags).values()
logger.info("Collecting commands from remote devices")
coros = []
if "json_format" in commands:
coros += [collect(device, command, "json") for device, command in itertools.product(devices, commands["json_format"])]
if "text_format" in commands:
coros += [collect(device, command, "text") for device, command in itertools.product(devices, commands["text_format"])]
res = await asyncio.gather(*coros, return_exceptions=True)
for r in res:
if isinstance(r, Exception):
logger.error(f"Error when collecting commands: {str(r)}")
async def collect_scheduled_show_tech(inv: AntaInventory, root_dir: Path, configure: bool, tags: list[str] | None = None, latest: int | None = None) -> None:
"""
Collect scheduled show-tech on devices
"""
async def collect(device: AntaDevice) -> None:
"""
Collect all the tech-support files stored on Arista switches flash and copy them locally
"""
try:
# Get the tech-support filename to retrieve
cmd = f"bash timeout 10 ls -1t {EOS_SCHEDULED_TECH_SUPPORT}"
if latest:
cmd += f" | head -{latest}"
command = AntaCommand(command=cmd, ofmt="text")
await device.collect(command=command)
if command.collected and command.text_output:
filenames = list(map(lambda f: Path(f"{EOS_SCHEDULED_TECH_SUPPORT}/{f}"), command.text_output.splitlines()))
else:
logger.error(f"Unable to get tech-support filenames on {device.name}: verify that {EOS_SCHEDULED_TECH_SUPPORT} is not empty")
return
# Create directories
outdir = Path() / root_dir / f"{device.name.lower()}"
outdir.mkdir(parents=True, exist_ok=True)
# Check if 'aaa authorization exec default local' is present in the running-config
command = AntaCommand(command="show running-config | include aaa authorization exec default", ofmt="text")
await device.collect(command=command)
if command.collected and not command.text_output:
logger.debug(f"'aaa authorization exec default local' is not configured on device {device.name}")
if configure:
# Otherwise mypy complains about enable
assert isinstance(device, AsyncEOSDevice)
# TODO - @mtache - add `config` field to `AntaCommand` object to handle this use case.
commands = []
if device.enable and device._enable_password is not None: # pylint: disable=protected-access
commands.append({"cmd": "enable", "input": device._enable_password}) # pylint: disable=protected-access
elif device.enable:
commands.append({"cmd": "enable"})
commands.extend(
[
{"cmd": "configure terminal"},
{"cmd": "aaa authorization exec default local"},
]
)
logger.warning(f"Configuring 'aaa authorization exec default local' on device {device.name}")
command = AntaCommand(command="show running-config | include aaa authorization exec default local", ofmt="text")
await device._session.cli(commands=commands) # pylint: disable=protected-access
logger.info(f"Configured 'aaa authorization exec default local' on device {device.name}")
else:
logger.error(f"Unable to collect tech-support on {device.name}: configuration 'aaa authorization exec default local' is not present")
return
logger.debug(f"'aaa authorization exec default local' is already configured on device {device.name}")
await device.copy(sources=filenames, destination=outdir, direction="from")
logger.info(f"Collected {len(filenames)} scheduled tech-support from {device.name}")
except (EapiCommandError, HTTPError, ConnectError) as e:
logger.error(f"Unable to collect tech-support on {device.name}: {str(e)}")
logger.info("Connecting to devices...")
await inv.connect_inventory()
devices = inv.get_inventory(established_only=True, tags=tags).values()
await asyncio.gather(*(collect(device) for device in devices))

20
anta/cli/get/__init__.py Normal file
View file

@ -0,0 +1,20 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Click commands to get information from or generate inventories
"""
import click
from anta.cli.get import commands
@click.group
def get() -> None:
"""Commands to get information from or generate inventories"""
get.add_command(commands.from_cvp)
get.add_command(commands.from_ansible)
get.add_command(commands.inventory)
get.add_command(commands.tags)

115
anta/cli/get/commands.py Normal file
View file

@ -0,0 +1,115 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
# pylint: disable = redefined-outer-name
"""
Click commands to get information from or generate inventories
"""
from __future__ import annotations
import asyncio
import json
import logging
from pathlib import Path
import click
from cvprac.cvp_client import CvpClient
from cvprac.cvp_client_errors import CvpApiError
from rich.pretty import pretty_repr
from anta.cli.console import console
from anta.cli.get.utils import inventory_output_options
from anta.cli.utils import ExitCode, inventory_options
from anta.inventory import AntaInventory
from .utils import create_inventory_from_ansible, create_inventory_from_cvp, get_cv_token
logger = logging.getLogger(__name__)
@click.command
@click.pass_context
@inventory_output_options
@click.option("--host", "-host", help="CloudVision instance FQDN or IP", type=str, required=True)
@click.option("--username", "-u", help="CloudVision username", type=str, required=True)
@click.option("--password", "-p", help="CloudVision password", type=str, required=True)
@click.option("--container", "-c", help="CloudVision container where devices are configured", type=str)
def from_cvp(ctx: click.Context, output: Path, host: str, username: str, password: str, container: str | None) -> None:
"""
Build ANTA inventory from Cloudvision
TODO - handle get_inventory and get_devices_in_container failure
"""
logger.info(f"Getting authentication token for user '{username}' from CloudVision instance '{host}'")
token = get_cv_token(cvp_ip=host, cvp_username=username, cvp_password=password)
clnt = CvpClient()
try:
clnt.connect(nodes=[host], username="", password="", api_token=token)
except CvpApiError as error:
logger.error(f"Error connecting to CloudVision: {error}")
ctx.exit(ExitCode.USAGE_ERROR)
logger.info(f"Connected to CloudVision instance '{host}'")
cvp_inventory = None
if container is None:
# Get a list of all devices
logger.info(f"Getting full inventory from CloudVision instance '{host}'")
cvp_inventory = clnt.api.get_inventory()
else:
# Get devices under a container
logger.info(f"Getting inventory for container {container} from CloudVision instance '{host}'")
cvp_inventory = clnt.api.get_devices_in_container(container)
create_inventory_from_cvp(cvp_inventory, output)
@click.command
@click.pass_context
@inventory_output_options
@click.option("--ansible-group", "-g", help="Ansible group to filter", type=str, default="all")
@click.option(
"--ansible-inventory",
help="Path to your ansible inventory file to read",
type=click.Path(file_okay=True, dir_okay=False, exists=True, readable=True, path_type=Path),
required=True,
)
def from_ansible(ctx: click.Context, output: Path, ansible_group: str, ansible_inventory: Path) -> None:
"""Build ANTA inventory from an ansible inventory YAML file"""
logger.info(f"Building inventory from ansible file '{ansible_inventory}'")
try:
create_inventory_from_ansible(
inventory=ansible_inventory,
output=output,
ansible_group=ansible_group,
)
except ValueError as e:
logger.error(str(e))
ctx.exit(ExitCode.USAGE_ERROR)
@click.command
@inventory_options
@click.option("--connected/--not-connected", help="Display inventory after connection has been created", default=False, required=False)
def inventory(inventory: AntaInventory, tags: list[str] | None, connected: bool) -> None:
"""Show inventory loaded in ANTA."""
logger.debug(f"Requesting devices for tags: {tags}")
console.print("Current inventory content is:", style="white on blue")
if connected:
asyncio.run(inventory.connect_inventory())
inventory_result = inventory.get_inventory(tags=tags)
console.print(pretty_repr(inventory_result))
@click.command
@inventory_options
def tags(inventory: AntaInventory, tags: list[str] | None) -> None: # pylint: disable=unused-argument
"""Get list of configured tags in user inventory."""
tags_found = []
for device in inventory.values():
tags_found += device.tags
tags_found = sorted(set(tags_found))
console.print("Tags found:")
console.print_json(json.dumps(tags_found, indent=2))

153
anta/cli/get/utils.py Normal file
View file

@ -0,0 +1,153 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Utils functions to use with anta.cli.get.commands module.
"""
from __future__ import annotations
import functools
import json
import logging
from pathlib import Path
from sys import stdin
from typing import Any
import click
import requests
import urllib3
import yaml
from anta.cli.utils import ExitCode
from anta.inventory import AntaInventory
from anta.inventory.models import AntaInventoryHost, AntaInventoryInput
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logger = logging.getLogger(__name__)
def inventory_output_options(f: Any) -> Any:
"""Click common options required when an inventory is being generated"""
@click.option(
"--output",
"-o",
required=True,
envvar="ANTA_INVENTORY",
show_envvar=True,
help="Path to save inventory file",
type=click.Path(file_okay=True, dir_okay=False, exists=False, writable=True, path_type=Path),
)
@click.option(
"--overwrite",
help="Do not prompt when overriding current inventory",
default=False,
is_flag=True,
show_default=True,
required=False,
show_envvar=True,
)
@click.pass_context
@functools.wraps(f)
def wrapper(ctx: click.Context, *args: tuple[Any], output: Path, overwrite: bool, **kwargs: dict[str, Any]) -> Any:
# Boolean to check if the file is empty
output_is_not_empty = output.exists() and output.stat().st_size != 0
# Check overwrite when file is not empty
if not overwrite and output_is_not_empty:
is_tty = stdin.isatty()
if is_tty:
# File has content and it is in an interactive TTY --> Prompt user
click.confirm(f"Your destination file '{output}' is not empty, continue?", abort=True)
else:
# File has content and it is not interactive TTY nor overwrite set to True --> execution stop
logger.critical("Conversion aborted since destination file is not empty (not running in interactive TTY)")
ctx.exit(ExitCode.USAGE_ERROR)
output.parent.mkdir(parents=True, exist_ok=True)
return f(*args, output=output, **kwargs)
return wrapper
def get_cv_token(cvp_ip: str, cvp_username: str, cvp_password: str) -> str:
"""Generate AUTH token from CVP using password"""
# TODO, need to handle requests eror
# use CVP REST API to generate a token
URL = f"https://{cvp_ip}/cvpservice/login/authenticate.do"
payload = json.dumps({"userId": cvp_username, "password": cvp_password})
headers = {"Content-Type": "application/json", "Accept": "application/json"}
response = requests.request("POST", URL, headers=headers, data=payload, verify=False, timeout=10)
return response.json()["sessionId"]
def write_inventory_to_file(hosts: list[AntaInventoryHost], output: Path) -> None:
"""Write a file inventory from pydantic models"""
i = AntaInventoryInput(hosts=hosts)
with open(output, "w", encoding="UTF-8") as out_fd:
out_fd.write(yaml.dump({AntaInventory.INVENTORY_ROOT_KEY: i.model_dump(exclude_unset=True)}))
logger.info(f"ANTA inventory file has been created: '{output}'")
def create_inventory_from_cvp(inv: list[dict[str, Any]], output: Path) -> None:
"""
Create an inventory file from Arista CloudVision inventory
"""
logger.debug(f"Received {len(inv)} device(s) from CloudVision")
hosts = []
for dev in inv:
logger.info(f" * adding entry for {dev['hostname']}")
hosts.append(AntaInventoryHost(name=dev["hostname"], host=dev["ipAddress"], tags=[dev["containerName"].lower()]))
write_inventory_to_file(hosts, output)
def create_inventory_from_ansible(inventory: Path, output: Path, ansible_group: str = "all") -> None:
"""
Create an ANTA inventory from an Ansible inventory YAML file
Args:
inventory: Ansible Inventory file to read
output: ANTA inventory file to generate.
ansible_group: Ansible group from where to extract data.
"""
def find_ansible_group(data: dict[str, Any], group: str) -> dict[str, Any] | None:
for k, v in data.items():
if isinstance(v, dict):
if k == group and ("children" in v.keys() or "hosts" in v.keys()):
return v
d = find_ansible_group(v, group)
if d is not None:
return d
return None
def deep_yaml_parsing(data: dict[str, Any], hosts: list[AntaInventoryHost] | None = None) -> list[AntaInventoryHost]:
"""Deep parsing of YAML file to extract hosts and associated IPs"""
if hosts is None:
hosts = []
for key, value in data.items():
if isinstance(value, dict) and "ansible_host" in value.keys():
logger.info(f" * adding entry for {key}")
hosts.append(AntaInventoryHost(name=key, host=value["ansible_host"]))
elif isinstance(value, dict):
deep_yaml_parsing(value, hosts)
else:
return hosts
return hosts
try:
with open(inventory, encoding="utf-8") as inv:
ansible_inventory = yaml.safe_load(inv)
except OSError as exc:
raise ValueError(f"Could not parse {inventory}.") from exc
if not ansible_inventory:
raise ValueError(f"Ansible inventory {inventory} is empty")
ansible_inventory = find_ansible_group(ansible_inventory, ansible_group)
if ansible_inventory is None:
raise ValueError(f"Group {ansible_group} not found in Ansible inventory")
ansible_hosts = deep_yaml_parsing(ansible_inventory)
write_inventory_to_file(ansible_hosts, output)

81
anta/cli/nrfu/__init__.py Normal file
View file

@ -0,0 +1,81 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Click commands that run ANTA tests using anta.runner
"""
from __future__ import annotations
import asyncio
import click
from anta.catalog import AntaCatalog
from anta.cli.nrfu import commands
from anta.cli.utils import AliasedGroup, catalog_options, inventory_options
from anta.inventory import AntaInventory
from anta.models import AntaTest
from anta.result_manager import ResultManager
from anta.runner import main
from .utils import anta_progress_bar, print_settings
class IgnoreRequiredWithHelp(AliasedGroup):
"""
https://stackoverflow.com/questions/55818737/python-click-application-required-parameters-have-precedence-over-sub-command-he
Solution to allow help without required options on subcommand
This is not planned to be fixed in click as per: https://github.com/pallets/click/issues/295#issuecomment-708129734
"""
def parse_args(self, ctx: click.Context, args: list[str]) -> list[str]:
"""
Ignore MissingParameter exception when parsing arguments if `--help`
is present for a subcommand
"""
# Adding a flag for potential callbacks
ctx.ensure_object(dict)
if "--help" in args:
ctx.obj["_anta_help"] = True
try:
return super().parse_args(ctx, args)
except click.MissingParameter:
if "--help" not in args:
raise
# remove the required params so that help can display
for param in self.params:
param.required = False
return super().parse_args(ctx, args)
@click.group(invoke_without_command=True, cls=IgnoreRequiredWithHelp)
@click.pass_context
@inventory_options
@catalog_options
@click.option("--ignore-status", help="Always exit with success", show_envvar=True, is_flag=True, default=False)
@click.option("--ignore-error", help="Only report failures and not errors", show_envvar=True, is_flag=True, default=False)
def nrfu(ctx: click.Context, inventory: AntaInventory, tags: list[str] | None, catalog: AntaCatalog, ignore_status: bool, ignore_error: bool) -> None:
"""Run ANTA tests on devices"""
# If help is invoke somewhere, skip the command
if ctx.obj.get("_anta_help"):
return
# We use ctx.obj to pass stuff to the next Click functions
ctx.ensure_object(dict)
ctx.obj["result_manager"] = ResultManager()
ctx.obj["ignore_status"] = ignore_status
ctx.obj["ignore_error"] = ignore_error
print_settings(inventory, catalog)
with anta_progress_bar() as AntaTest.progress:
asyncio.run(main(ctx.obj["result_manager"], inventory, catalog, tags=tags))
# Invoke `anta nrfu table` if no command is passed
if ctx.invoked_subcommand is None:
ctx.invoke(commands.table)
nrfu.add_command(commands.table)
nrfu.add_command(commands.json)
nrfu.add_command(commands.text)
nrfu.add_command(commands.tpl_report)

81
anta/cli/nrfu/commands.py Normal file
View file

@ -0,0 +1,81 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Click commands that render ANTA tests results
"""
from __future__ import annotations
import logging
import pathlib
import click
from anta.cli.utils import exit_with_code
from .utils import print_jinja, print_json, print_table, print_text
logger = logging.getLogger(__name__)
@click.command()
@click.pass_context
@click.option("--device", "-d", help="Show a summary for this device", type=str, required=False)
@click.option("--test", "-t", help="Show a summary for this test", type=str, required=False)
@click.option(
"--group-by", default=None, type=click.Choice(["device", "test"], case_sensitive=False), help="Group result by test or host. default none", required=False
)
def table(ctx: click.Context, device: str | None, test: str | None, group_by: str) -> None:
"""ANTA command to check network states with table result"""
print_table(results=ctx.obj["result_manager"], device=device, group_by=group_by, test=test)
exit_with_code(ctx)
@click.command()
@click.pass_context
@click.option(
"--output",
"-o",
type=click.Path(file_okay=True, dir_okay=False, exists=False, writable=True, path_type=pathlib.Path),
show_envvar=True,
required=False,
help="Path to save report as a file",
)
def json(ctx: click.Context, output: pathlib.Path | None) -> None:
"""ANTA command to check network state with JSON result"""
print_json(results=ctx.obj["result_manager"], output=output)
exit_with_code(ctx)
@click.command()
@click.pass_context
@click.option("--search", "-s", help="Regular expression to search in both name and test", type=str, required=False)
@click.option("--skip-error", help="Hide tests in errors due to connectivity issue", default=False, is_flag=True, show_default=True, required=False)
def text(ctx: click.Context, search: str | None, skip_error: bool) -> None:
"""ANTA command to check network states with text result"""
print_text(results=ctx.obj["result_manager"], search=search, skip_error=skip_error)
exit_with_code(ctx)
@click.command()
@click.pass_context
@click.option(
"--template",
"-tpl",
type=click.Path(file_okay=True, dir_okay=False, exists=True, readable=True, path_type=pathlib.Path),
show_envvar=True,
required=True,
help="Path to the template to use for the report",
)
@click.option(
"--output",
"-o",
type=click.Path(file_okay=True, dir_okay=False, exists=False, writable=True, path_type=pathlib.Path),
show_envvar=True,
required=False,
help="Path to save report as a file",
)
def tpl_report(ctx: click.Context, template: pathlib.Path, output: pathlib.Path | None) -> None:
"""ANTA command to check network state with templated report"""
print_jinja(results=ctx.obj["result_manager"], template=template, output=output)
exit_with_code(ctx)

134
anta/cli/nrfu/utils.py Normal file
View file

@ -0,0 +1,134 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Utils functions to use with anta.cli.nrfu.commands module.
"""
from __future__ import annotations
import json
import logging
import pathlib
import re
import rich
from rich.panel import Panel
from rich.pretty import pprint
from rich.progress import BarColumn, MofNCompleteColumn, Progress, SpinnerColumn, TextColumn, TimeElapsedColumn, TimeRemainingColumn
from anta.catalog import AntaCatalog
from anta.cli.console import console
from anta.inventory import AntaInventory
from anta.reporter import ReportJinja, ReportTable
from anta.result_manager import ResultManager
logger = logging.getLogger(__name__)
def print_settings(
inventory: AntaInventory,
catalog: AntaCatalog,
) -> None:
"""Print ANTA settings before running tests"""
message = f"Running ANTA tests:\n- {inventory}\n- Tests catalog contains {len(catalog.tests)} tests"
console.print(Panel.fit(message, style="cyan", title="[green]Settings"))
console.print()
def print_table(results: ResultManager, device: str | None = None, test: str | None = None, group_by: str | None = None) -> None:
"""Print result in a table"""
reporter = ReportTable()
console.print()
if device:
console.print(reporter.report_all(result_manager=results, host=device))
elif test:
console.print(reporter.report_all(result_manager=results, testcase=test))
elif group_by == "device":
console.print(reporter.report_summary_hosts(result_manager=results, host=None))
elif group_by == "test":
console.print(reporter.report_summary_tests(result_manager=results, testcase=None))
else:
console.print(reporter.report_all(result_manager=results))
def print_json(results: ResultManager, output: pathlib.Path | None = None) -> None:
"""Print result in a json format"""
console.print()
console.print(Panel("JSON results of all tests", style="cyan"))
rich.print_json(results.get_json_results())
if output is not None:
with open(output, "w", encoding="utf-8") as fout:
fout.write(results.get_json_results())
def print_list(results: ResultManager, output: pathlib.Path | None = None) -> None:
"""Print result in a list"""
console.print()
console.print(Panel.fit("List results of all tests", style="cyan"))
pprint(results.get_results())
if output is not None:
with open(output, "w", encoding="utf-8") as fout:
fout.write(str(results.get_results()))
def print_text(results: ResultManager, search: str | None = None, skip_error: bool = False) -> None:
"""Print results as simple text"""
console.print()
regexp = re.compile(search or ".*")
for line in results.get_results():
if any(regexp.match(entry) for entry in [line.name, line.test]) and (not skip_error or line.result != "error"):
message = f" ({str(line.messages[0])})" if len(line.messages) > 0 else ""
console.print(f"{line.name} :: {line.test} :: [{line.result}]{line.result.upper()}[/{line.result}]{message}", highlight=False)
def print_jinja(results: ResultManager, template: pathlib.Path, output: pathlib.Path | None = None) -> None:
"""Print result based on template."""
console.print()
reporter = ReportJinja(template_path=template)
json_data = json.loads(results.get_json_results())
report = reporter.render(json_data)
console.print(report)
if output is not None:
with open(output, "w", encoding="utf-8") as file:
file.write(report)
# Adding our own ANTA spinner - overriding rich SPINNERS for our own
# so ignore warning for redefinition
rich.spinner.SPINNERS = { # type: ignore[attr-defined] # noqa: F811
"anta": {
"interval": 150,
"frames": [
"( 🐜)",
"( 🐜 )",
"( 🐜 )",
"( 🐜 )",
"( 🐜 )",
"(🐜 )",
"(🐌 )",
"( 🐌 )",
"( 🐌 )",
"( 🐌 )",
"( 🐌 )",
"( 🐌)",
],
}
}
def anta_progress_bar() -> Progress:
"""
Return a customized Progress for progress bar
"""
return Progress(
SpinnerColumn("anta"),
TextColumn(""),
TextColumn("{task.description}[progress.percentage]{task.percentage:>3.0f}%"),
BarColumn(bar_width=None),
MofNCompleteColumn(),
TextColumn(""),
TimeElapsedColumn(),
TextColumn(""),
TimeRemainingColumn(),
expand=True,
)

274
anta/cli/utils.py Normal file
View file

@ -0,0 +1,274 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Utils functions to use with anta.cli module.
"""
from __future__ import annotations
import enum
import functools
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Any
import click
from pydantic import ValidationError
from yaml import YAMLError
from anta.catalog import AntaCatalog
from anta.inventory import AntaInventory
from anta.inventory.exceptions import InventoryIncorrectSchema, InventoryRootKeyError
if TYPE_CHECKING:
from click import Option
logger = logging.getLogger(__name__)
class ExitCode(enum.IntEnum):
"""
Encodes the valid exit codes by anta
inspired from pytest
"""
# Tests passed.
OK = 0
# An internal error got in the way.
INTERNAL_ERROR = 1
# CLI was misused
USAGE_ERROR = 2
# Test error
TESTS_ERROR = 3
# Tests failed
TESTS_FAILED = 4
def parse_tags(ctx: click.Context, param: Option, value: str) -> list[str] | None:
# pylint: disable=unused-argument
"""
Click option callback to parse an ANTA inventory tags
"""
if value is not None:
return value.split(",") if "," in value else [value]
return None
def exit_with_code(ctx: click.Context) -> None:
"""
Exit the Click application with an exit code.
This function determines the global test status to be either `unset`, `skipped`, `success` or `error`
from the `ResultManger` instance.
If flag `ignore_error` is set, the `error` status will be ignored in all the tests.
If flag `ignore_status` is set, the exit code will always be 0.
Exit the application with the following exit code:
* 0 if `ignore_status` is `True` or global test status is `unset`, `skipped` or `success`
* 1 if status is `failure`
* 2 if status is `error`
Args:
ctx: Click Context
"""
if ctx.obj.get("ignore_status"):
ctx.exit(ExitCode.OK)
# If ignore_error is True then status can never be "error"
status = ctx.obj["result_manager"].get_status(ignore_error=bool(ctx.obj.get("ignore_error")))
if status in {"unset", "skipped", "success"}:
ctx.exit(ExitCode.OK)
if status == "failure":
ctx.exit(ExitCode.TESTS_FAILED)
if status == "error":
ctx.exit(ExitCode.TESTS_ERROR)
logger.error("Please gather logs and open an issue on Github.")
raise ValueError(f"Unknown status returned by the ResultManager: {status}. Please gather logs and open an issue on Github.")
class AliasedGroup(click.Group):
"""
Implements a subclass of Group that accepts a prefix for a command.
If there were a command called push, it would accept pus as an alias (so long as it was unique)
From Click documentation
"""
def get_command(self, ctx: click.Context, cmd_name: str) -> Any:
"""Todo: document code"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
matches = [x for x in self.list_commands(ctx) if x.startswith(cmd_name)]
if not matches:
return None
if len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail(f"Too many matches: {', '.join(sorted(matches))}")
return None
def resolve_command(self, ctx: click.Context, args: Any) -> Any:
"""Todo: document code"""
# always return the full command name
_, cmd, args = super().resolve_command(ctx, args)
return cmd.name, cmd, args # type: ignore
# TODO: check code of click.pass_context that raise mypy errors for types and adapt this decorator
def inventory_options(f: Any) -> Any:
"""Click common options when requiring an inventory to interact with devices"""
@click.option(
"--username",
"-u",
help="Username to connect to EOS",
envvar="ANTA_USERNAME",
show_envvar=True,
required=True,
)
@click.option(
"--password",
"-p",
help="Password to connect to EOS that must be provided. It can be prompted using '--prompt' option.",
show_envvar=True,
envvar="ANTA_PASSWORD",
)
@click.option(
"--enable-password",
help="Password to access EOS Privileged EXEC mode. It can be prompted using '--prompt' option. Requires '--enable' option.",
show_envvar=True,
envvar="ANTA_ENABLE_PASSWORD",
)
@click.option(
"--enable",
help="Some commands may require EOS Privileged EXEC mode. This option tries to access this mode before sending a command to the device.",
default=False,
show_envvar=True,
envvar="ANTA_ENABLE",
is_flag=True,
show_default=True,
)
@click.option(
"--prompt",
"-P",
help="Prompt for passwords if they are not provided.",
default=False,
show_envvar=True,
envvar="ANTA_PROMPT",
is_flag=True,
show_default=True,
)
@click.option(
"--timeout",
help="Global connection timeout",
default=30,
show_envvar=True,
envvar="ANTA_TIMEOUT",
show_default=True,
)
@click.option(
"--insecure",
help="Disable SSH Host Key validation",
default=False,
show_envvar=True,
envvar="ANTA_INSECURE",
is_flag=True,
show_default=True,
)
@click.option("--disable-cache", help="Disable cache globally", show_envvar=True, envvar="ANTA_DISABLE_CACHE", show_default=True, is_flag=True, default=False)
@click.option(
"--inventory",
"-i",
help="Path to the inventory YAML file",
envvar="ANTA_INVENTORY",
show_envvar=True,
required=True,
type=click.Path(file_okay=True, dir_okay=False, exists=True, readable=True, path_type=Path),
)
@click.option(
"--tags",
"-t",
help="List of tags using comma as separator: tag1,tag2,tag3",
show_envvar=True,
envvar="ANTA_TAGS",
type=str,
required=False,
callback=parse_tags,
)
@click.pass_context
@functools.wraps(f)
def wrapper(
ctx: click.Context,
*args: tuple[Any],
inventory: Path,
tags: list[str] | None,
username: str,
password: str | None,
enable_password: str | None,
enable: bool,
prompt: bool,
timeout: int,
insecure: bool,
disable_cache: bool,
**kwargs: dict[str, Any],
) -> Any:
# pylint: disable=too-many-arguments
# If help is invoke somewhere, do not parse inventory
if ctx.obj.get("_anta_help"):
return f(*args, inventory=None, tags=tags, **kwargs)
if prompt:
# User asked for a password prompt
if password is None:
password = click.prompt("Please enter a password to connect to EOS", type=str, hide_input=True, confirmation_prompt=True)
if enable:
if enable_password is None:
if click.confirm("Is a password required to enter EOS privileged EXEC mode?"):
enable_password = click.prompt(
"Please enter a password to enter EOS privileged EXEC mode", type=str, hide_input=True, confirmation_prompt=True
)
if password is None:
raise click.BadParameter("EOS password needs to be provided by using either the '--password' option or the '--prompt' option.")
if not enable and enable_password:
raise click.BadParameter("Providing a password to access EOS Privileged EXEC mode requires '--enable' option.")
try:
i = AntaInventory.parse(
filename=inventory,
username=username,
password=password,
enable=enable,
enable_password=enable_password,
timeout=timeout,
insecure=insecure,
disable_cache=disable_cache,
)
except (ValidationError, TypeError, ValueError, YAMLError, OSError, InventoryIncorrectSchema, InventoryRootKeyError):
ctx.exit(ExitCode.USAGE_ERROR)
return f(*args, inventory=i, tags=tags, **kwargs)
return wrapper
def catalog_options(f: Any) -> Any:
"""Click common options when requiring a test catalog to execute ANTA tests"""
@click.option(
"--catalog",
"-c",
envvar="ANTA_CATALOG",
show_envvar=True,
help="Path to the test catalog YAML file",
type=click.Path(file_okay=True, dir_okay=False, exists=True, readable=True, path_type=Path),
required=True,
)
@click.pass_context
@functools.wraps(f)
def wrapper(ctx: click.Context, *args: tuple[Any], catalog: Path, **kwargs: dict[str, Any]) -> Any:
# If help is invoke somewhere, do not parse catalog
if ctx.obj.get("_anta_help"):
return f(*args, catalog=None, **kwargs)
try:
c = AntaCatalog.parse(catalog)
except (ValidationError, TypeError, ValueError, YAMLError, OSError):
ctx.exit(ExitCode.USAGE_ERROR)
return f(*args, catalog=c, **kwargs)
return wrapper

122
anta/custom_types.py Normal file
View file

@ -0,0 +1,122 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Module that provides predefined types for AntaTest.Input instances
"""
import re
from typing import Literal
from pydantic import Field
from pydantic.functional_validators import AfterValidator, BeforeValidator
from typing_extensions import Annotated
def aaa_group_prefix(v: str) -> str:
"""Prefix the AAA method with 'group' if it is known"""
built_in_methods = ["local", "none", "logging"]
return f"group {v}" if v not in built_in_methods and not v.startswith("group ") else v
def interface_autocomplete(v: str) -> str:
"""Allow the user to only provide the beginning of an interface name.
Supported alias:
- `et`, `eth` will be changed to `Ethernet`
- `po` will be changed to `Port-Channel`
- `lo` will be changed to `Loopback`"""
intf_id_re = re.compile(r"[0-9]+(\/[0-9]+)*(\.[0-9]+)?")
m = intf_id_re.search(v)
if m is None:
raise ValueError(f"Could not parse interface ID in interface '{v}'")
intf_id = m[0]
alias_map = {"et": "Ethernet", "eth": "Ethernet", "po": "Port-Channel", "lo": "Loopback"}
for alias, full_name in alias_map.items():
if v.lower().startswith(alias):
return f"{full_name}{intf_id}"
return v
def interface_case_sensitivity(v: str) -> str:
"""Reformat interface name to match expected case sensitivity.
Examples:
- ethernet -> Ethernet
- vlan -> Vlan
- loopback -> Loopback
"""
if isinstance(v, str) and len(v) > 0 and not v[0].isupper():
return f"{v[0].upper()}{v[1:]}"
return v
def bgp_multiprotocol_capabilities_abbreviations(value: str) -> str:
"""
Abbreviations for different BGP multiprotocol capabilities.
Examples:
- IPv4 Unicast
- L2vpnEVPN
- ipv4 MPLS Labels
- ipv4Mplsvpn
"""
patterns = {
r"\b(l2[\s\-]?vpn[\s\-]?evpn)\b": "l2VpnEvpn",
r"\bipv4[\s_-]?mpls[\s_-]?label(s)?\b": "ipv4MplsLabels",
r"\bipv4[\s_-]?mpls[\s_-]?vpn\b": "ipv4MplsVpn",
r"\bipv4[\s_-]?uni[\s_-]?cast\b": "ipv4Unicast",
}
for pattern, replacement in patterns.items():
match = re.search(pattern, value, re.IGNORECASE)
if match:
return replacement
return value
# ANTA framework
TestStatus = Literal["unset", "success", "failure", "error", "skipped"]
# AntaTest.Input types
AAAAuthMethod = Annotated[str, AfterValidator(aaa_group_prefix)]
Vlan = Annotated[int, Field(ge=0, le=4094)]
MlagPriority = Annotated[int, Field(ge=1, le=32767)]
Vni = Annotated[int, Field(ge=1, le=16777215)]
Interface = Annotated[
str,
Field(pattern=r"^(Dps|Ethernet|Fabric|Loopback|Management|Port-Channel|Tunnel|Vlan|Vxlan)[0-9]+(\/[0-9]+)*(\.[0-9]+)?$"),
BeforeValidator(interface_autocomplete),
BeforeValidator(interface_case_sensitivity),
]
VxlanSrcIntf = Annotated[
str,
Field(pattern=r"^(Loopback)([0-9]|[1-9][0-9]{1,2}|[1-7][0-9]{3}|8[01][0-9]{2}|819[01])$"),
BeforeValidator(interface_autocomplete),
BeforeValidator(interface_case_sensitivity),
]
Afi = Literal["ipv4", "ipv6", "vpn-ipv4", "vpn-ipv6", "evpn", "rt-membership"]
Safi = Literal["unicast", "multicast", "labeled-unicast"]
EncryptionAlgorithm = Literal["RSA", "ECDSA"]
RsaKeySize = Literal[2048, 3072, 4096]
EcdsaKeySize = Literal[256, 384, 521]
MultiProtocolCaps = Annotated[str, BeforeValidator(bgp_multiprotocol_capabilities_abbreviations)]
BfdInterval = Annotated[int, Field(ge=50, le=60000)]
BfdMultiplier = Annotated[int, Field(ge=3, le=50)]
ErrDisableReasons = Literal[
"acl",
"arp-inspection",
"bpduguard",
"dot1x-session-replace",
"hitless-reload-down",
"lacp-rate-limit",
"link-flap",
"no-internal-vlan",
"portchannelguard",
"portsec",
"tapagg",
"uplink-failure-detection",
]
ErrDisableInterval = Annotated[int, Field(ge=30, le=86400)]

104
anta/decorators.py Normal file
View file

@ -0,0 +1,104 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""decorators for tests."""
from __future__ import annotations
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, cast
from anta.models import AntaTest, logger
if TYPE_CHECKING:
from anta.result_manager.models import TestResult
# TODO - should probably use mypy Awaitable in some places rather than this everywhere - @gmuloc
F = TypeVar("F", bound=Callable[..., Any])
def deprecated_test(new_tests: Optional[list[str]] = None) -> Callable[[F], F]:
"""
Return a decorator to log a message of WARNING severity when a test is deprecated.
Args:
new_tests (Optional[list[str]]): A list of new test classes that should replace the deprecated test.
Returns:
Callable[[F], F]: A decorator that can be used to wrap test functions.
"""
def decorator(function: F) -> F:
"""
Actual decorator that logs the message.
Args:
function (F): The test function to be decorated.
Returns:
F: The decorated function.
"""
@wraps(function)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
anta_test = args[0]
if new_tests:
new_test_names = ", ".join(new_tests)
logger.warning(f"{anta_test.name} test is deprecated. Consider using the following new tests: {new_test_names}.")
else:
logger.warning(f"{anta_test.name} test is deprecated.")
return await function(*args, **kwargs)
return cast(F, wrapper)
return decorator
def skip_on_platforms(platforms: list[str]) -> Callable[[F], F]:
"""
Return a decorator to skip a test based on the device's hardware model.
This decorator factory generates a decorator that will check the hardware model of the device
the test is run on. If the model is in the list of platforms specified, the test will be skipped.
Args:
platforms (list[str]): List of hardware models on which the test should be skipped.
Returns:
Callable[[F], F]: A decorator that can be used to wrap test functions.
"""
def decorator(function: F) -> F:
"""
Actual decorator that either runs the test or skips it based on the device's hardware model.
Args:
function (F): The test function to be decorated.
Returns:
F: The decorated function.
"""
@wraps(function)
async def wrapper(*args: Any, **kwargs: Any) -> TestResult:
"""
Check the device's hardware model and conditionally run or skip the test.
This wrapper inspects the hardware model of the device the test is run on.
If the model is in the list of specified platforms, the test is either skipped.
"""
anta_test = args[0]
if anta_test.result.result != "unset":
AntaTest.update_progress()
return anta_test.result
if anta_test.device.hw_model in platforms:
anta_test.result.is_skipped(f"{anta_test.__class__.__name__} test is not supported on {anta_test.device.hw_model}.")
AntaTest.update_progress()
return anta_test.result
return await function(*args, **kwargs)
return cast(F, wrapper)
return decorator

417
anta/device.py Normal file
View file

@ -0,0 +1,417 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
ANTA Device Abstraction Module
"""
from __future__ import annotations
import asyncio
import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from pathlib import Path
from typing import Any, Iterator, Literal, Optional, Union
import asyncssh
from aiocache import Cache
from aiocache.plugins import HitMissRatioPlugin
from asyncssh import SSHClientConnection, SSHClientConnectionOptions
from httpx import ConnectError, HTTPError
from anta import __DEBUG__, aioeapi
from anta.models import AntaCommand
from anta.tools.misc import exc_to_str
logger = logging.getLogger(__name__)
class AntaDevice(ABC):
"""
Abstract class representing a device in ANTA.
An implementation of this class must override the abstract coroutines `_collect()` and
`refresh()`.
Attributes:
name: Device name
is_online: True if the device IP is reachable and a port can be open
established: True if remote command execution succeeds
hw_model: Hardware model of the device
tags: List of tags for this device
cache: In-memory cache from aiocache library for this device (None if cache is disabled)
cache_locks: Dictionary mapping keys to asyncio locks to guarantee exclusive access to the cache if not disabled
"""
def __init__(self, name: str, tags: Optional[list[str]] = None, disable_cache: bool = False) -> None:
"""
Constructor of AntaDevice
Args:
name: Device name
tags: List of tags for this device
disable_cache: Disable caching for all commands for this device. Defaults to False.
"""
self.name: str = name
self.hw_model: Optional[str] = None
self.tags: list[str] = tags if tags is not None else []
# A device always has its own name as tag
self.tags.append(self.name)
self.is_online: bool = False
self.established: bool = False
self.cache: Optional[Cache] = None
self.cache_locks: Optional[defaultdict[str, asyncio.Lock]] = None
# Initialize cache if not disabled
if not disable_cache:
self._init_cache()
@property
@abstractmethod
def _keys(self) -> tuple[Any, ...]:
"""
Read-only property to implement hashing and equality for AntaDevice classes.
"""
def __eq__(self, other: object) -> bool:
"""
Implement equality for AntaDevice objects.
"""
return self._keys == other._keys if isinstance(other, self.__class__) else False
def __hash__(self) -> int:
"""
Implement hashing for AntaDevice objects.
"""
return hash(self._keys)
def _init_cache(self) -> None:
"""
Initialize cache for the device, can be overriden by subclasses to manipulate how it works
"""
self.cache = Cache(cache_class=Cache.MEMORY, ttl=60, namespace=self.name, plugins=[HitMissRatioPlugin()])
self.cache_locks = defaultdict(asyncio.Lock)
@property
def cache_statistics(self) -> dict[str, Any] | None:
"""
Returns the device cache statistics for logging purposes
"""
# Need to ignore pylint no-member as Cache is a proxy class and pylint is not smart enough
# https://github.com/pylint-dev/pylint/issues/7258
if self.cache is not None:
stats = getattr(self.cache, "hit_miss_ratio", {"total": 0, "hits": 0, "hit_ratio": 0})
return {"total_commands_sent": stats["total"], "cache_hits": stats["hits"], "cache_hit_ratio": f"{stats['hit_ratio'] * 100:.2f}%"}
return None
def __rich_repr__(self) -> Iterator[tuple[str, Any]]:
"""
Implements Rich Repr Protocol
https://rich.readthedocs.io/en/stable/pretty.html#rich-repr-protocol
"""
yield "name", self.name
yield "tags", self.tags
yield "hw_model", self.hw_model
yield "is_online", self.is_online
yield "established", self.established
yield "disable_cache", self.cache is None
@abstractmethod
async def _collect(self, command: AntaCommand) -> None:
"""
Collect device command output.
This abstract coroutine can be used to implement any command collection method
for a device in ANTA.
The `_collect()` implementation needs to populate the `output` attribute
of the `AntaCommand` object passed as argument.
If a failure occurs, the `_collect()` implementation is expected to catch the
exception and implement proper logging, the `output` attribute of the
`AntaCommand` object passed as argument would be `None` in this case.
Args:
command: the command to collect
"""
async def collect(self, command: AntaCommand) -> None:
"""
Collects the output for a specified command.
When caching is activated on both the device and the command,
this method prioritizes retrieving the output from the cache. In cases where the output isn't cached yet,
it will be freshly collected and then stored in the cache for future access.
The method employs asynchronous locks based on the command's UID to guarantee exclusive access to the cache.
When caching is NOT enabled, either at the device or command level, the method directly collects the output
via the private `_collect` method without interacting with the cache.
Args:
command (AntaCommand): The command to process.
"""
# Need to ignore pylint no-member as Cache is a proxy class and pylint is not smart enough
# https://github.com/pylint-dev/pylint/issues/7258
if self.cache is not None and self.cache_locks is not None and command.use_cache:
async with self.cache_locks[command.uid]:
cached_output = await self.cache.get(command.uid) # pylint: disable=no-member
if cached_output is not None:
logger.debug(f"Cache hit for {command.command} on {self.name}")
command.output = cached_output
else:
await self._collect(command=command)
await self.cache.set(command.uid, command.output) # pylint: disable=no-member
else:
await self._collect(command=command)
async def collect_commands(self, commands: list[AntaCommand]) -> None:
"""
Collect multiple commands.
Args:
commands: the commands to collect
"""
await asyncio.gather(*(self.collect(command=command) for command in commands))
def supports(self, command: AntaCommand) -> bool:
"""Returns True if the command is supported on the device hardware platform, False otherwise."""
unsupported = any("not supported on this hardware platform" in e for e in command.errors)
logger.debug(command)
if unsupported:
logger.debug(f"{command.command} is not supported on {self.hw_model}")
return not unsupported
@abstractmethod
async def refresh(self) -> None:
"""
Update attributes of an AntaDevice instance.
This coroutine must update the following attributes of AntaDevice:
- `is_online`: When the device IP is reachable and a port can be open
- `established`: When a command execution succeeds
- `hw_model`: The hardware model of the device
"""
async def copy(self, sources: list[Path], destination: Path, direction: Literal["to", "from"] = "from") -> None:
"""
Copy files to and from the device, usually through SCP.
It is not mandatory to implement this for a valid AntaDevice subclass.
Args:
sources: List of files to copy to or from the device.
destination: Local or remote destination when copying the files. Can be a folder.
direction: Defines if this coroutine copies files to or from the device.
"""
raise NotImplementedError(f"copy() method has not been implemented in {self.__class__.__name__} definition")
class AsyncEOSDevice(AntaDevice):
"""
Implementation of AntaDevice for EOS using aio-eapi.
Attributes:
name: Device name
is_online: True if the device IP is reachable and a port can be open
established: True if remote command execution succeeds
hw_model: Hardware model of the device
tags: List of tags for this device
"""
def __init__( # pylint: disable=R0913
self,
host: str,
username: str,
password: str,
name: Optional[str] = None,
enable: bool = False,
enable_password: Optional[str] = None,
port: Optional[int] = None,
ssh_port: Optional[int] = 22,
tags: Optional[list[str]] = None,
timeout: Optional[float] = None,
insecure: bool = False,
proto: Literal["http", "https"] = "https",
disable_cache: bool = False,
) -> None:
"""
Constructor of AsyncEOSDevice
Args:
host: Device FQDN or IP
username: Username to connect to eAPI and SSH
password: Password to connect to eAPI and SSH
name: Device name
enable: Device needs privileged access
enable_password: Password used to gain privileged access on EOS
port: eAPI port. Defaults to 80 is proto is 'http' or 443 if proto is 'https'.
ssh_port: SSH port
tags: List of tags for this device
timeout: Timeout value in seconds for outgoing connections. Default to 10 secs.
insecure: Disable SSH Host Key validation
proto: eAPI protocol. Value can be 'http' or 'https'
disable_cache: Disable caching for all commands for this device. Defaults to False.
"""
if host is None:
message = "'host' is required to create an AsyncEOSDevice"
logger.error(message)
raise ValueError(message)
if name is None:
name = f"{host}{f':{port}' if port else ''}"
super().__init__(name, tags, disable_cache)
if username is None:
message = f"'username' is required to instantiate device '{self.name}'"
logger.error(message)
raise ValueError(message)
if password is None:
message = f"'password' is required to instantiate device '{self.name}'"
logger.error(message)
raise ValueError(message)
self.enable = enable
self._enable_password = enable_password
self._session: aioeapi.Device = aioeapi.Device(host=host, port=port, username=username, password=password, proto=proto, timeout=timeout)
ssh_params: dict[str, Any] = {}
if insecure:
ssh_params["known_hosts"] = None
self._ssh_opts: SSHClientConnectionOptions = SSHClientConnectionOptions(host=host, port=ssh_port, username=username, password=password, **ssh_params)
def __rich_repr__(self) -> Iterator[tuple[str, Any]]:
"""
Implements Rich Repr Protocol
https://rich.readthedocs.io/en/stable/pretty.html#rich-repr-protocol
"""
yield from super().__rich_repr__()
yield ("host", self._session.host)
yield ("eapi_port", self._session.port)
yield ("username", self._ssh_opts.username)
yield ("enable", self.enable)
yield ("insecure", self._ssh_opts.known_hosts is None)
if __DEBUG__:
_ssh_opts = vars(self._ssh_opts).copy()
PASSWORD_VALUE = "<removed>"
_ssh_opts["password"] = PASSWORD_VALUE
_ssh_opts["kwargs"]["password"] = PASSWORD_VALUE
yield ("_session", vars(self._session))
yield ("_ssh_opts", _ssh_opts)
@property
def _keys(self) -> tuple[Any, ...]:
"""
Two AsyncEOSDevice objects are equal if the hostname and the port are the same.
This covers the use case of port forwarding when the host is localhost and the devices have different ports.
"""
return (self._session.host, self._session.port)
async def _collect(self, command: AntaCommand) -> None:
"""
Collect device command output from EOS using aio-eapi.
Supports outformat `json` and `text` as output structure.
Gain privileged access using the `enable_password` attribute
of the `AntaDevice` instance if populated.
Args:
command: the command to collect
"""
commands = []
if self.enable and self._enable_password is not None:
commands.append(
{
"cmd": "enable",
"input": str(self._enable_password),
}
)
elif self.enable:
# No password
commands.append({"cmd": "enable"})
if command.revision:
commands.append({"cmd": command.command, "revision": command.revision})
else:
commands.append({"cmd": command.command})
try:
response: list[dict[str, Any]] = await self._session.cli(
commands=commands,
ofmt=command.ofmt,
version=command.version,
)
except aioeapi.EapiCommandError as e:
command.errors = e.errors
if self.supports(command):
message = f"Command '{command.command}' failed on {self.name}"
logger.error(message)
except (HTTPError, ConnectError) as e:
command.errors = [str(e)]
message = f"Cannot connect to device {self.name}"
logger.error(message)
else:
# selecting only our command output
command.output = response[-1]
logger.debug(f"{self.name}: {command}")
async def refresh(self) -> None:
"""
Update attributes of an AsyncEOSDevice instance.
This coroutine must update the following attributes of AsyncEOSDevice:
- is_online: When a device IP is reachable and a port can be open
- established: When a command execution succeeds
- hw_model: The hardware model of the device
"""
logger.debug(f"Refreshing device {self.name}")
self.is_online = await self._session.check_connection()
if self.is_online:
COMMAND: str = "show version"
HW_MODEL_KEY: str = "modelName"
try:
response = await self._session.cli(command=COMMAND)
except aioeapi.EapiCommandError as e:
logger.warning(f"Cannot get hardware information from device {self.name}: {e.errmsg}")
except (HTTPError, ConnectError) as e:
logger.warning(f"Cannot get hardware information from device {self.name}: {exc_to_str(e)}")
else:
if HW_MODEL_KEY in response:
self.hw_model = response[HW_MODEL_KEY]
else:
logger.warning(f"Cannot get hardware information from device {self.name}: cannot parse '{COMMAND}'")
else:
logger.warning(f"Could not connect to device {self.name}: cannot open eAPI port")
self.established = bool(self.is_online and self.hw_model)
async def copy(self, sources: list[Path], destination: Path, direction: Literal["to", "from"] = "from") -> None:
"""
Copy files to and from the device using asyncssh.scp().
Args:
sources: List of files to copy to or from the device.
destination: Local or remote destination when copying the files. Can be a folder.
direction: Defines if this coroutine copies files to or from the device.
"""
async with asyncssh.connect(
host=self._ssh_opts.host,
port=self._ssh_opts.port,
tunnel=self._ssh_opts.tunnel,
family=self._ssh_opts.family,
local_addr=self._ssh_opts.local_addr,
options=self._ssh_opts,
) as conn:
src: Union[list[tuple[SSHClientConnection, Path]], list[Path]]
dst: Union[tuple[SSHClientConnection, Path], Path]
if direction == "from":
src = [(conn, file) for file in sources]
dst = destination
for file in sources:
logger.info(f"Copying '{file}' from device {self.name} to '{destination}' locally")
elif direction == "to":
src = sources
dst = conn, destination
for file in src:
logger.info(f"Copying '{file}' to device {self.name} to '{destination}' remotely")
else:
logger.critical(f"'direction' argument to copy() fonction is invalid: {direction}")
return
await asyncssh.scp(src, dst)

282
anta/inventory/__init__.py Normal file
View file

@ -0,0 +1,282 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Inventory Module for ANTA.
"""
from __future__ import annotations
import asyncio
import logging
from ipaddress import ip_address, ip_network
from pathlib import Path
from typing import Any, Optional
from pydantic import ValidationError
from yaml import YAMLError, safe_load
from anta.device import AntaDevice, AsyncEOSDevice
from anta.inventory.exceptions import InventoryIncorrectSchema, InventoryRootKeyError
from anta.inventory.models import AntaInventoryInput
from anta.logger import anta_log_exception
logger = logging.getLogger(__name__)
class AntaInventory(dict): # type: ignore
# dict[str, AntaDevice] - not working in python 3.8 hence the ignore
"""
Inventory abstraction for ANTA framework.
"""
# Root key of inventory part of the inventory file
INVENTORY_ROOT_KEY = "anta_inventory"
# Supported Output format
INVENTORY_OUTPUT_FORMAT = ["native", "json"]
def __str__(self) -> str:
"""Human readable string representing the inventory"""
devs = {}
for dev in self.values():
if (dev_type := dev.__class__.__name__) not in devs:
devs[dev_type] = 1
else:
devs[dev_type] += 1
return f"ANTA Inventory contains {' '.join([f'{n} devices ({t})' for t, n in devs.items()])}"
@staticmethod
def _update_disable_cache(inventory_disable_cache: bool, kwargs: dict[str, Any]) -> dict[str, Any]:
"""
Return new dictionary, replacing kwargs with added disable_cache value from inventory_value
if disable_cache has not been set by CLI.
Args:
inventory_disable_cache (bool): The value of disable_cache in the inventory
kwargs: The kwargs to instantiate the device
"""
updated_kwargs = kwargs.copy()
updated_kwargs["disable_cache"] = inventory_disable_cache or kwargs.get("disable_cache")
return updated_kwargs
@staticmethod
def _parse_hosts(inventory_input: AntaInventoryInput, inventory: AntaInventory, **kwargs: Any) -> None:
"""
Parses the host section of an AntaInventoryInput and add the devices to the inventory
Args:
inventory_input (AntaInventoryInput): AntaInventoryInput used to parse the devices
inventory (AntaInventory): AntaInventory to add the parsed devices to
"""
if inventory_input.hosts is None:
return
for host in inventory_input.hosts:
updated_kwargs = AntaInventory._update_disable_cache(host.disable_cache, kwargs)
device = AsyncEOSDevice(name=host.name, host=str(host.host), port=host.port, tags=host.tags, **updated_kwargs)
inventory.add_device(device)
@staticmethod
def _parse_networks(inventory_input: AntaInventoryInput, inventory: AntaInventory, **kwargs: Any) -> None:
"""
Parses the network section of an AntaInventoryInput and add the devices to the inventory.
Args:
inventory_input (AntaInventoryInput): AntaInventoryInput used to parse the devices
inventory (AntaInventory): AntaInventory to add the parsed devices to
Raises:
InventoryIncorrectSchema: Inventory file is not following AntaInventory Schema.
"""
if inventory_input.networks is None:
return
for network in inventory_input.networks:
try:
updated_kwargs = AntaInventory._update_disable_cache(network.disable_cache, kwargs)
for host_ip in ip_network(str(network.network)):
device = AsyncEOSDevice(host=str(host_ip), tags=network.tags, **updated_kwargs)
inventory.add_device(device)
except ValueError as e:
message = "Could not parse network {network.network} in the inventory"
anta_log_exception(e, message, logger)
raise InventoryIncorrectSchema(message) from e
@staticmethod
def _parse_ranges(inventory_input: AntaInventoryInput, inventory: AntaInventory, **kwargs: Any) -> None:
"""
Parses the range section of an AntaInventoryInput and add the devices to the inventory.
Args:
inventory_input (AntaInventoryInput): AntaInventoryInput used to parse the devices
inventory (AntaInventory): AntaInventory to add the parsed devices to
Raises:
InventoryIncorrectSchema: Inventory file is not following AntaInventory Schema.
"""
if inventory_input.ranges is None:
return
for range_def in inventory_input.ranges:
try:
updated_kwargs = AntaInventory._update_disable_cache(range_def.disable_cache, kwargs)
range_increment = ip_address(str(range_def.start))
range_stop = ip_address(str(range_def.end))
while range_increment <= range_stop: # type: ignore[operator]
# mypy raise an issue about comparing IPv4Address and IPv6Address
# but this is handled by the ipaddress module natively by raising a TypeError
device = AsyncEOSDevice(host=str(range_increment), tags=range_def.tags, **updated_kwargs)
inventory.add_device(device)
range_increment += 1
except ValueError as e:
message = f"Could not parse the following range in the inventory: {range_def.start} - {range_def.end}"
anta_log_exception(e, message, logger)
raise InventoryIncorrectSchema(message) from e
except TypeError as e:
message = f"A range in the inventory has different address families for start and end: {range_def.start} - {range_def.end}"
anta_log_exception(e, message, logger)
raise InventoryIncorrectSchema(message) from e
@staticmethod
def parse(
filename: str | Path,
username: str,
password: str,
enable: bool = False,
enable_password: Optional[str] = None,
timeout: Optional[float] = None,
insecure: bool = False,
disable_cache: bool = False,
) -> AntaInventory:
# pylint: disable=too-many-arguments
"""
Create an AntaInventory instance from an inventory file.
The inventory devices are AsyncEOSDevice instances.
Args:
filename (str): Path to device inventory YAML file
username (str): Username to use to connect to devices
password (str): Password to use to connect to devices
enable (bool): Whether or not the commands need to be run in enable mode towards the devices
enable_password (str, optional): Enable password to use if required
timeout (float, optional): timeout in seconds for every API call.
insecure (bool): Disable SSH Host Key validation
disable_cache (bool): Disable cache globally
Raises:
InventoryRootKeyError: Root key of inventory is missing.
InventoryIncorrectSchema: Inventory file is not following AntaInventory Schema.
"""
inventory = AntaInventory()
kwargs: dict[str, Any] = {
"username": username,
"password": password,
"enable": enable,
"enable_password": enable_password,
"timeout": timeout,
"insecure": insecure,
"disable_cache": disable_cache,
}
if username is None:
message = "'username' is required to create an AntaInventory"
logger.error(message)
raise ValueError(message)
if password is None:
message = "'password' is required to create an AntaInventory"
logger.error(message)
raise ValueError(message)
try:
with open(file=filename, mode="r", encoding="UTF-8") as file:
data = safe_load(file)
except (TypeError, YAMLError, OSError) as e:
message = f"Unable to parse ANTA Device Inventory file '{filename}'"
anta_log_exception(e, message, logger)
raise
if AntaInventory.INVENTORY_ROOT_KEY not in data:
exc = InventoryRootKeyError(f"Inventory root key ({AntaInventory.INVENTORY_ROOT_KEY}) is not defined in your inventory")
anta_log_exception(exc, f"Device inventory is invalid! (from {filename})", logger)
raise exc
try:
inventory_input = AntaInventoryInput(**data[AntaInventory.INVENTORY_ROOT_KEY])
except ValidationError as e:
anta_log_exception(e, f"Device inventory is invalid! (from {filename})", logger)
raise
# Read data from input
AntaInventory._parse_hosts(inventory_input, inventory, **kwargs)
AntaInventory._parse_networks(inventory_input, inventory, **kwargs)
AntaInventory._parse_ranges(inventory_input, inventory, **kwargs)
return inventory
###########################################################################
# Public methods
###########################################################################
###########################################################################
# GET methods
###########################################################################
def get_inventory(self, established_only: bool = False, tags: Optional[list[str]] = None) -> AntaInventory:
"""
Returns a filtered inventory.
Args:
established_only: Whether or not to include only established devices. Default False.
tags: List of tags to filter devices.
Returns:
AntaInventory: An inventory with filtered AntaDevice objects.
"""
def _filter_devices(device: AntaDevice) -> bool:
"""
Helper function to select the devices based on the input tags
and the requirement for an established connection.
"""
if tags is not None and all(tag not in tags for tag in device.tags):
return False
return bool(not established_only or device.established)
devices: list[AntaDevice] = list(filter(_filter_devices, self.values()))
result = AntaInventory()
for device in devices:
result.add_device(device)
return result
###########################################################################
# SET methods
###########################################################################
def __setitem__(self, key: str, value: AntaDevice) -> None:
if key != value.name:
raise RuntimeError(f"The key must be the device name for device '{value.name}'. Use AntaInventory.add_device().")
return super().__setitem__(key, value)
def add_device(self, device: AntaDevice) -> None:
"""Add a device to final inventory.
Args:
device: Device object to be added
"""
self[device.name] = device
###########################################################################
# MISC methods
###########################################################################
async def connect_inventory(self) -> None:
"""Run `refresh()` coroutines for all AntaDevice objects in this inventory."""
logger.debug("Refreshing devices...")
results = await asyncio.gather(
*(device.refresh() for device in self.values()),
return_exceptions=True,
)
for r in results:
if isinstance(r, Exception):
message = "Error when refreshing inventory"
anta_log_exception(r, message, logger)

View file

@ -0,0 +1,12 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Manage Exception in Inventory module."""
class InventoryRootKeyError(Exception):
"""Error raised when inventory root key is not found."""
class InventoryIncorrectSchema(Exception):
"""Error when user data does not follow ANTA schema."""

92
anta/inventory/models.py Normal file
View file

@ -0,0 +1,92 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Models related to inventory management."""
from __future__ import annotations
import logging
from typing import List, Optional, Union
# Need to keep List for pydantic in python 3.8
from pydantic import BaseModel, ConfigDict, IPvAnyAddress, IPvAnyNetwork, conint, constr
logger = logging.getLogger(__name__)
# Pydantic models for input validation
RFC_1123_REGEX = r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
class AntaInventoryHost(BaseModel):
"""
Host definition for user's inventory.
Attributes:
host (IPvAnyAddress): IPv4 or IPv6 address of the device
port (int): (Optional) eAPI port to use Default is 443.
name (str): (Optional) Name to display during tests report. Default is hostname:port
tags (list[str]): List of attached tags read from inventory file.
disable_cache (bool): Disable cache per host. Defaults to False.
"""
model_config = ConfigDict(extra="forbid")
name: Optional[str] = None
host: Union[constr(pattern=RFC_1123_REGEX), IPvAnyAddress] # type: ignore
port: Optional[conint(gt=1, lt=65535)] = None # type: ignore
tags: Optional[List[str]] = None
disable_cache: bool = False
class AntaInventoryNetwork(BaseModel):
"""
Network definition for user's inventory.
Attributes:
network (IPvAnyNetwork): Subnet to use for testing.
tags (list[str]): List of attached tags read from inventory file.
disable_cache (bool): Disable cache per network. Defaults to False.
"""
model_config = ConfigDict(extra="forbid")
network: IPvAnyNetwork
tags: Optional[List[str]] = None
disable_cache: bool = False
class AntaInventoryRange(BaseModel):
"""
IP Range definition for user's inventory.
Attributes:
start (IPvAnyAddress): IPv4 or IPv6 address for the begining of the range.
stop (IPvAnyAddress): IPv4 or IPv6 address for the end of the range.
tags (list[str]): List of attached tags read from inventory file.
disable_cache (bool): Disable cache per range of hosts. Defaults to False.
"""
model_config = ConfigDict(extra="forbid")
start: IPvAnyAddress
end: IPvAnyAddress
tags: Optional[List[str]] = None
disable_cache: bool = False
class AntaInventoryInput(BaseModel):
"""
User's inventory model.
Attributes:
networks (list[AntaInventoryNetwork],Optional): List of AntaInventoryNetwork objects for networks.
hosts (list[AntaInventoryHost],Optional): List of AntaInventoryHost objects for hosts.
range (list[AntaInventoryRange],Optional): List of AntaInventoryRange objects for ranges.
"""
model_config = ConfigDict(extra="forbid")
networks: Optional[List[AntaInventoryNetwork]] = None
hosts: Optional[List[AntaInventoryHost]] = None
ranges: Optional[List[AntaInventoryRange]] = None

107
anta/logger.py Normal file
View file

@ -0,0 +1,107 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Configure logging for ANTA
"""
from __future__ import annotations
import logging
from enum import Enum
from pathlib import Path
from typing import Literal, Optional
from rich.logging import RichHandler
from anta import __DEBUG__
from anta.tools.misc import exc_to_str
logger = logging.getLogger(__name__)
class Log(str, Enum):
"""Represent log levels from logging module as immutable strings"""
CRITICAL = logging.getLevelName(logging.CRITICAL)
ERROR = logging.getLevelName(logging.ERROR)
WARNING = logging.getLevelName(logging.WARNING)
INFO = logging.getLevelName(logging.INFO)
DEBUG = logging.getLevelName(logging.DEBUG)
LogLevel = Literal[Log.CRITICAL, Log.ERROR, Log.WARNING, Log.INFO, Log.DEBUG]
def setup_logging(level: LogLevel = Log.INFO, file: Path | None = None) -> None:
"""
Configure logging for ANTA.
By default, the logging level is INFO for all loggers except for httpx and asyncssh which are too verbose:
their logging level is WARNING.
If logging level DEBUG is selected, all loggers will be configured with this level.
In ANTA Debug Mode (environment variable `ANTA_DEBUG=true`), Python tracebacks are logged and logging level is
overwritten to be DEBUG.
If a file is provided, logs will also be sent to the file in addition to stdout.
If a file is provided and logging level is DEBUG, only the logging level INFO and higher will
be logged to stdout while all levels will be logged in the file.
Args:
level: ANTA logging level
file: Send logs to a file
"""
# Init root logger
root = logging.getLogger()
# In ANTA debug mode, level is overriden to DEBUG
loglevel = logging.DEBUG if __DEBUG__ else getattr(logging, level.upper())
root.setLevel(loglevel)
# Silence the logging of chatty Python modules when level is INFO
if loglevel == logging.INFO:
# asyncssh is really chatty
logging.getLogger("asyncssh").setLevel(logging.WARNING)
# httpx as well
logging.getLogger("httpx").setLevel(logging.WARNING)
# Add RichHandler for stdout
richHandler = RichHandler(markup=True, rich_tracebacks=True, tracebacks_show_locals=False)
# In ANTA debug mode, show Python module in stdout
if __DEBUG__:
fmt_string = r"[grey58]\[%(name)s][/grey58] %(message)s"
else:
fmt_string = "%(message)s"
formatter = logging.Formatter(fmt=fmt_string, datefmt="[%X]")
richHandler.setFormatter(formatter)
root.addHandler(richHandler)
# Add FileHandler if file is provided
if file:
fileHandler = logging.FileHandler(file)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fileHandler.setFormatter(formatter)
root.addHandler(fileHandler)
# If level is DEBUG and file is provided, do not send DEBUG level to stdout
if loglevel == logging.DEBUG:
richHandler.setLevel(logging.INFO)
if __DEBUG__:
logger.debug("ANTA Debug Mode enabled")
def anta_log_exception(exception: BaseException, message: Optional[str] = None, calling_logger: Optional[logging.Logger] = None) -> None:
"""
Helper function to help log exceptions:
* if anta.__DEBUG__ is True then the logger.exception method is called to get the traceback
* otherwise logger.error is called
Args:
exception (BAseException): The Exception being logged
message (str): An optional message
calling_logger (logging.Logger): A logger to which the exception should be logged
if not present, the logger in this file is used.
"""
if calling_logger is None:
calling_logger = logger
calling_logger.critical(f"{message}\n{exc_to_str(exception)}" if message else exc_to_str(exception))
if __DEBUG__:
calling_logger.exception(f"[ANTA Debug Mode]{f' {message}' if message else ''}", exc_info=exception)

541
anta/models.py Normal file
View file

@ -0,0 +1,541 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Models to define a TestStructure
"""
from __future__ import annotations
import hashlib
import logging
import re
import time
from abc import ABC, abstractmethod
from copy import deepcopy
from datetime import timedelta
from functools import wraps
# Need to keep Dict and List for pydantic in python 3.8
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Coroutine, Dict, List, Literal, Optional, TypeVar, Union
from pydantic import BaseModel, ConfigDict, ValidationError, conint
from rich.progress import Progress, TaskID
from anta import GITHUB_SUGGESTION
from anta.logger import anta_log_exception
from anta.result_manager.models import TestResult
from anta.tools.misc import exc_to_str
if TYPE_CHECKING:
from anta.device import AntaDevice
F = TypeVar("F", bound=Callable[..., Any])
# Proper way to type input class - revisit this later if we get any issue @gmuloc
# This would imply overhead to define classes
# https://stackoverflow.com/questions/74103528/type-hinting-an-instance-of-a-nested-class
# N = TypeVar("N", bound="AntaTest.Input")
# TODO - make this configurable - with an env var maybe?
BLACKLIST_REGEX = [r"^reload.*", r"^conf\w*\s*(terminal|session)*", r"^wr\w*\s*\w+"]
logger = logging.getLogger(__name__)
class AntaMissingParamException(Exception):
"""
This Exception should be used when an expected key in an AntaCommand.params dictionary
was not found.
This Exception should in general never be raised in normal usage of ANTA.
"""
def __init__(self, message: str) -> None:
self.message = "\n".join([message, GITHUB_SUGGESTION])
super().__init__(self.message)
class AntaTemplate(BaseModel):
"""Class to define a command template as Python f-string.
Can render a command from parameters.
Attributes:
template: Python f-string. Example: 'show vlan {vlan_id}'
version: eAPI version - valid values are 1 or "latest" - default is "latest"
revision: Revision of the command. Valid values are 1 to 99. Revision has precedence over version.
ofmt: eAPI output - json or text - default is json
use_cache: Enable or disable caching for this AntaTemplate if the AntaDevice supports it - default is True
"""
template: str
version: Literal[1, "latest"] = "latest"
revision: Optional[conint(ge=1, le=99)] = None # type: ignore
ofmt: Literal["json", "text"] = "json"
use_cache: bool = True
def render(self, **params: dict[str, Any]) -> AntaCommand:
"""Render an AntaCommand from an AntaTemplate instance.
Keep the parameters used in the AntaTemplate instance.
Args:
params: dictionary of variables with string values to render the Python f-string
Returns:
command: The rendered AntaCommand.
This AntaCommand instance have a template attribute that references this
AntaTemplate instance.
"""
try:
return AntaCommand(
command=self.template.format(**params),
ofmt=self.ofmt,
version=self.version,
revision=self.revision,
template=self,
params=params,
use_cache=self.use_cache,
)
except KeyError as e:
raise AntaTemplateRenderError(self, e.args[0]) from e
class AntaCommand(BaseModel):
"""Class to define a command.
!!! info
eAPI models are revisioned, this means that if a model is modified in a non-backwards compatible way, then its revision will be bumped up
(revisions are numbers, default value is 1).
By default an eAPI request will return revision 1 of the model instance,
this ensures that older management software will not suddenly stop working when a switch is upgraded.
A **revision** applies to a particular CLI command whereas a **version** is global and is internally
translated to a specific **revision** for each CLI command in the RPC.
__Revision has precedence over version.__
Attributes:
command: Device command
version: eAPI version - valid values are 1 or "latest" - default is "latest"
revision: eAPI revision of the command. Valid values are 1 to 99. Revision has precedence over version.
ofmt: eAPI output - json or text - default is json
output: Output of the command populated by the collect() function
template: AntaTemplate object used to render this command
params: Dictionary of variables with string values to render the template
errors: If the command execution fails, eAPI returns a list of strings detailing the error
use_cache: Enable or disable caching for this AntaCommand if the AntaDevice supports it - default is True
"""
command: str
version: Literal[1, "latest"] = "latest"
revision: Optional[conint(ge=1, le=99)] = None # type: ignore
ofmt: Literal["json", "text"] = "json"
output: Optional[Union[Dict[str, Any], str]] = None
template: Optional[AntaTemplate] = None
errors: List[str] = []
params: Dict[str, Any] = {}
use_cache: bool = True
@property
def uid(self) -> str:
"""Generate a unique identifier for this command"""
uid_str = f"{self.command}_{self.version}_{self.revision or 'NA'}_{self.ofmt}"
return hashlib.sha1(uid_str.encode()).hexdigest()
@property
def json_output(self) -> dict[str, Any]:
"""Get the command output as JSON"""
if self.output is None:
raise RuntimeError(f"There is no output for command {self.command}")
if self.ofmt != "json" or not isinstance(self.output, dict):
raise RuntimeError(f"Output of command {self.command} is invalid")
return dict(self.output)
@property
def text_output(self) -> str:
"""Get the command output as a string"""
if self.output is None:
raise RuntimeError(f"There is no output for command {self.command}")
if self.ofmt != "text" or not isinstance(self.output, str):
raise RuntimeError(f"Output of command {self.command} is invalid")
return str(self.output)
@property
def collected(self) -> bool:
"""Return True if the command has been collected"""
return self.output is not None and not self.errors
class AntaTemplateRenderError(RuntimeError):
"""
Raised when an AntaTemplate object could not be rendered
because of missing parameters
"""
def __init__(self, template: AntaTemplate, key: str):
"""Constructor for AntaTemplateRenderError
Args:
template: The AntaTemplate instance that failed to render
key: Key that has not been provided to render the template
"""
self.template = template
self.key = key
super().__init__(f"'{self.key}' was not provided for template '{self.template.template}'")
class AntaTest(ABC):
"""Abstract class defining a test in ANTA
The goal of this class is to handle the heavy lifting and make
writing a test as simple as possible.
Examples:
The following is an example of an AntaTest subclass implementation:
```python
class VerifyReachability(AntaTest):
name = "VerifyReachability"
description = "Test the network reachability to one or many destination IP(s)."
categories = ["connectivity"]
commands = [AntaTemplate(template="ping vrf {vrf} {dst} source {src} repeat 2")]
class Input(AntaTest.Input):
hosts: list[Host]
class Host(BaseModel):
dst: IPv4Address
src: IPv4Address
vrf: str = "default"
def render(self, template: AntaTemplate) -> list[AntaCommand]:
return [template.render({"dst": host.dst, "src": host.src, "vrf": host.vrf}) for host in self.inputs.hosts]
@AntaTest.anta_test
def test(self) -> None:
failures = []
for command in self.instance_commands:
if command.params and ("src" and "dst") in command.params:
src, dst = command.params["src"], command.params["dst"]
if "2 received" not in command.json_output["messages"][0]:
failures.append((str(src), str(dst)))
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"Connectivity test failed for the following source-destination pairs: {failures}")
```
Attributes:
device: AntaDevice instance on which this test is run
inputs: AntaTest.Input instance carrying the test inputs
instance_commands: List of AntaCommand instances of this test
result: TestResult instance representing the result of this test
logger: Python logger for this test instance
"""
# Mandatory class attributes
# TODO - find a way to tell mypy these are mandatory for child classes - maybe Protocol
name: ClassVar[str]
description: ClassVar[str]
categories: ClassVar[list[str]]
commands: ClassVar[list[Union[AntaTemplate, AntaCommand]]]
# Class attributes to handle the progress bar of ANTA CLI
progress: Optional[Progress] = None
nrfu_task: Optional[TaskID] = None
class Input(BaseModel):
"""Class defining inputs for a test in ANTA.
Examples:
A valid test catalog will look like the following:
```yaml
<Python module>:
- <AntaTest subclass>:
result_overwrite:
categories:
- "Overwritten category 1"
description: "Test with overwritten description"
custom_field: "Test run by John Doe"
```
Attributes:
result_overwrite: Define fields to overwrite in the TestResult object
"""
model_config = ConfigDict(extra="forbid")
result_overwrite: Optional[ResultOverwrite] = None
filters: Optional[Filters] = None
def __hash__(self) -> int:
"""
Implement generic hashing for AntaTest.Input.
This will work in most cases but this does not consider 2 lists with different ordering as equal.
"""
return hash(self.model_dump_json())
class ResultOverwrite(BaseModel):
"""Test inputs model to overwrite result fields
Attributes:
description: overwrite TestResult.description
categories: overwrite TestResult.categories
custom_field: a free string that will be included in the TestResult object
"""
model_config = ConfigDict(extra="forbid")
description: Optional[str] = None
categories: Optional[List[str]] = None
custom_field: Optional[str] = None
class Filters(BaseModel):
"""Runtime filters to map tests with list of tags or devices
Attributes:
tags: List of device's tags for the test.
"""
model_config = ConfigDict(extra="forbid")
tags: Optional[List[str]] = None
def __init__(
self,
device: AntaDevice,
inputs: dict[str, Any] | AntaTest.Input | None = None,
eos_data: list[dict[Any, Any] | str] | None = None,
):
"""AntaTest Constructor
Args:
device: AntaDevice instance on which the test will be run
inputs: dictionary of attributes used to instantiate the AntaTest.Input instance
eos_data: Populate outputs of the test commands instead of collecting from devices.
This list must have the same length and order than the `instance_commands` instance attribute.
"""
self.logger: logging.Logger = logging.getLogger(f"{self.__module__}.{self.__class__.__name__}")
self.device: AntaDevice = device
self.inputs: AntaTest.Input
self.instance_commands: list[AntaCommand] = []
self.result: TestResult = TestResult(name=device.name, test=self.name, categories=self.categories, description=self.description)
self._init_inputs(inputs)
if self.result.result == "unset":
self._init_commands(eos_data)
def _init_inputs(self, inputs: dict[str, Any] | AntaTest.Input | None) -> None:
"""Instantiate the `inputs` instance attribute with an `AntaTest.Input` instance
to validate test inputs from defined model.
Overwrite result fields based on `ResultOverwrite` input definition.
Any input validation error will set this test result status as 'error'."""
try:
if inputs is None:
self.inputs = self.Input()
elif isinstance(inputs, AntaTest.Input):
self.inputs = inputs
elif isinstance(inputs, dict):
self.inputs = self.Input(**inputs)
except ValidationError as e:
message = f"{self.__module__}.{self.__class__.__name__}: Inputs are not valid\n{e}"
self.logger.error(message)
self.result.is_error(message=message)
return
if res_ow := self.inputs.result_overwrite:
if res_ow.categories:
self.result.categories = res_ow.categories
if res_ow.description:
self.result.description = res_ow.description
self.result.custom_field = res_ow.custom_field
def _init_commands(self, eos_data: Optional[list[dict[Any, Any] | str]]) -> None:
"""Instantiate the `instance_commands` instance attribute from the `commands` class attribute.
- Copy of the `AntaCommand` instances
- Render all `AntaTemplate` instances using the `render()` method
Any template rendering error will set this test result status as 'error'.
Any exception in user code in `render()` will set this test result status as 'error'.
"""
if self.__class__.commands:
for cmd in self.__class__.commands:
if isinstance(cmd, AntaCommand):
self.instance_commands.append(deepcopy(cmd))
elif isinstance(cmd, AntaTemplate):
try:
self.instance_commands.extend(self.render(cmd))
except AntaTemplateRenderError as e:
self.result.is_error(message=f"Cannot render template {{{e.template}}}")
return
except NotImplementedError as e:
self.result.is_error(message=e.args[0])
return
except Exception as e: # pylint: disable=broad-exception-caught
# render() is user-defined code.
# We need to catch everything if we want the AntaTest object
# to live until the reporting
message = f"Exception in {self.__module__}.{self.__class__.__name__}.render()"
anta_log_exception(e, message, self.logger)
self.result.is_error(message=f"{message}: {exc_to_str(e)}")
return
if eos_data is not None:
self.logger.debug(f"Test {self.name} initialized with input data")
self.save_commands_data(eos_data)
def save_commands_data(self, eos_data: list[dict[str, Any] | str]) -> None:
"""Populate output of all AntaCommand instances in `instance_commands`"""
if len(eos_data) > len(self.instance_commands):
self.result.is_error(message="Test initialization error: Trying to save more data than there are commands for the test")
return
if len(eos_data) < len(self.instance_commands):
self.result.is_error(message="Test initialization error: Trying to save less data than there are commands for the test")
return
for index, data in enumerate(eos_data or []):
self.instance_commands[index].output = data
def __init_subclass__(cls) -> None:
"""Verify that the mandatory class attributes are defined"""
mandatory_attributes = ["name", "description", "categories", "commands"]
for attr in mandatory_attributes:
if not hasattr(cls, attr):
raise NotImplementedError(f"Class {cls.__module__}.{cls.__name__} is missing required class attribute {attr}")
@property
def collected(self) -> bool:
"""Returns True if all commands for this test have been collected."""
return all(command.collected for command in self.instance_commands)
@property
def failed_commands(self) -> list[AntaCommand]:
"""Returns a list of all the commands that have failed."""
return [command for command in self.instance_commands if command.errors]
def render(self, template: AntaTemplate) -> list[AntaCommand]:
"""Render an AntaTemplate instance of this AntaTest using the provided
AntaTest.Input instance at self.inputs.
This is not an abstract method because it does not need to be implemented if there is
no AntaTemplate for this test."""
raise NotImplementedError(f"AntaTemplate are provided but render() method has not been implemented for {self.__module__}.{self.name}")
@property
def blocked(self) -> bool:
"""Check if CLI commands contain a blocked keyword."""
state = False
for command in self.instance_commands:
for pattern in BLACKLIST_REGEX:
if re.match(pattern, command.command):
self.logger.error(f"Command <{command.command}> is blocked for security reason matching {BLACKLIST_REGEX}")
self.result.is_error(f"<{command.command}> is blocked for security reason")
state = True
return state
async def collect(self) -> None:
"""
Method used to collect outputs of all commands of this test class from the device of this test instance.
"""
try:
if self.blocked is False:
await self.device.collect_commands(self.instance_commands)
except Exception as e: # pylint: disable=broad-exception-caught
# device._collect() is user-defined code.
# We need to catch everything if we want the AntaTest object
# to live until the reporting
message = f"Exception raised while collecting commands for test {self.name} (on device {self.device.name})"
anta_log_exception(e, message, self.logger)
self.result.is_error(message=exc_to_str(e))
@staticmethod
def anta_test(function: F) -> Callable[..., Coroutine[Any, Any, TestResult]]:
"""
Decorator for the `test()` method.
This decorator implements (in this order):
1. Instantiate the command outputs if `eos_data` is provided to the `test()` method
2. Collect the commands from the device
3. Run the `test()` method
4. Catches any exception in `test()` user code and set the `result` instance attribute
"""
@wraps(function)
async def wrapper(
self: AntaTest,
eos_data: list[dict[Any, Any] | str] | None = None,
**kwargs: Any,
) -> TestResult:
"""
Args:
eos_data: Populate outputs of the test commands instead of collecting from devices.
This list must have the same length and order than the `instance_commands` instance attribute.
Returns:
result: TestResult instance attribute populated with error status if any
"""
def format_td(seconds: float, digits: int = 3) -> str:
isec, fsec = divmod(round(seconds * 10**digits), 10**digits)
return f"{timedelta(seconds=isec)}.{fsec:0{digits}.0f}"
start_time = time.time()
if self.result.result != "unset":
return self.result
# Data
if eos_data is not None:
self.save_commands_data(eos_data)
self.logger.debug(f"Test {self.name} initialized with input data {eos_data}")
# If some data is missing, try to collect
if not self.collected:
await self.collect()
if self.result.result != "unset":
return self.result
if cmds := self.failed_commands:
self.logger.debug(self.device.supports)
unsupported_commands = [f"Skipped because {c.command} is not supported on {self.device.hw_model}" for c in cmds if not self.device.supports(c)]
self.logger.debug(unsupported_commands)
if unsupported_commands:
self.logger.warning(f"Test {self.name} has been skipped because it is not supported on {self.device.hw_model}: {GITHUB_SUGGESTION}")
self.result.is_skipped("\n".join(unsupported_commands))
return self.result
self.result.is_error(message="\n".join([f"{c.command} has failed: {', '.join(c.errors)}" for c in cmds]))
return self.result
try:
function(self, **kwargs)
except Exception as e: # pylint: disable=broad-exception-caught
# test() is user-defined code.
# We need to catch everything if we want the AntaTest object
# to live until the reporting
message = f"Exception raised for test {self.name} (on device {self.device.name})"
anta_log_exception(e, message, self.logger)
self.result.is_error(message=exc_to_str(e))
test_duration = time.time() - start_time
self.logger.debug(f"Executing test {self.name} on device {self.device.name} took {format_td(test_duration)}")
AntaTest.update_progress()
return self.result
return wrapper
@classmethod
def update_progress(cls) -> None:
"""
Update progress bar for all AntaTest objects if it exists
"""
if cls.progress and (cls.nrfu_task is not None):
cls.progress.update(cls.nrfu_task, advance=1)
@abstractmethod
def test(self) -> Coroutine[Any, Any, TestResult]:
"""
This abstract method is the core of the test logic.
It must set the correct status of the `result` instance attribute
with the appropriate outcome of the test.
Examples:
It must be implemented using the `AntaTest.anta_test` decorator:
```python
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
for command in self.instance_commands:
if not self._test_command(command): # _test_command() is an arbitrary test logic
self.result.is_failure("Failure reson")
```
"""

251
anta/reporter/__init__.py Normal file
View file

@ -0,0 +1,251 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Report management for ANTA.
"""
# pylint: disable = too-few-public-methods
from __future__ import annotations
import logging
import os.path
import pathlib
from typing import Any, Optional
from jinja2 import Template
from rich.table import Table
from anta import RICH_COLOR_PALETTE, RICH_COLOR_THEME
from anta.custom_types import TestStatus
from anta.result_manager import ResultManager
logger = logging.getLogger(__name__)
class ReportTable:
"""TableReport Generate a Table based on TestResult."""
def _split_list_to_txt_list(self, usr_list: list[str], delimiter: Optional[str] = None) -> str:
"""
Split list to multi-lines string
Args:
usr_list (list[str]): List of string to concatenate
delimiter (str, optional): A delimiter to use to start string. Defaults to None.
Returns:
str: Multi-lines string
"""
if delimiter is not None:
return "\n".join(f"{delimiter} {line}" for line in usr_list)
return "\n".join(f"{line}" for line in usr_list)
def _build_headers(self, headers: list[str], table: Table) -> Table:
"""
Create headers for a table.
First key is considered as header and is colored using RICH_COLOR_PALETTE.HEADER
Args:
headers (list[str]): List of headers
table (Table): A rich Table instance
Returns:
Table: A rich Table instance with headers
"""
for idx, header in enumerate(headers):
if idx == 0:
table.add_column(header, justify="left", style=RICH_COLOR_PALETTE.HEADER, no_wrap=True)
elif header == "Test Name":
# We always want the full test name
table.add_column(header, justify="left", no_wrap=True)
else:
table.add_column(header, justify="left")
return table
def _color_result(self, status: TestStatus) -> str:
"""
Return a colored string based on the status value.
Args:
status (TestStatus): status value to color
Returns:
str: the colored string
"""
color = RICH_COLOR_THEME.get(status, "")
return f"[{color}]{status}" if color != "" else str(status)
def report_all(
self,
result_manager: ResultManager,
host: Optional[str] = None,
testcase: Optional[str] = None,
title: str = "All tests results",
) -> Table:
"""
Create a table report with all tests for one or all devices.
Create table with full output: Host / Test / Status / Message
Args:
result_manager (ResultManager): A manager with a list of tests.
host (str, optional): IP Address of a host to search for. Defaults to None.
testcase (str, optional): A test name to search for. Defaults to None.
title (str, optional): Title for the report. Defaults to 'All tests results'.
Returns:
Table: A fully populated rich Table
"""
table = Table(title=title, show_lines=True)
headers = ["Device", "Test Name", "Test Status", "Message(s)", "Test description", "Test category"]
table = self._build_headers(headers=headers, table=table)
for result in result_manager.get_results():
# pylint: disable=R0916
if (host is None and testcase is None) or (host is not None and str(result.name) == host) or (testcase is not None and testcase == str(result.test)):
state = self._color_result(result.result)
message = self._split_list_to_txt_list(result.messages) if len(result.messages) > 0 else ""
categories = ", ".join(result.categories)
table.add_row(str(result.name), result.test, state, message, result.description, categories)
return table
def report_summary_tests(
self,
result_manager: ResultManager,
testcase: Optional[str] = None,
title: str = "Summary per test case",
) -> Table:
"""
Create a table report with result agregated per test.
Create table with full output: Test / Number of success / Number of failure / Number of error / List of nodes in error or failure
Args:
result_manager (ResultManager): A manager with a list of tests.
testcase (str, optional): A test name to search for. Defaults to None.
title (str, optional): Title for the report. Defaults to 'All tests results'.
Returns:
Table: A fully populated rich Table
"""
# sourcery skip: class-extract-method
table = Table(title=title, show_lines=True)
headers = [
"Test Case",
"# of success",
"# of skipped",
"# of failure",
"# of errors",
"List of failed or error nodes",
]
table = self._build_headers(headers=headers, table=table)
for testcase_read in result_manager.get_testcases():
if testcase is None or str(testcase_read) == testcase:
results = result_manager.get_result_by_test(testcase_read)
nb_failure = len([result for result in results if result.result == "failure"])
nb_error = len([result for result in results if result.result == "error"])
list_failure = [str(result.name) for result in results if result.result in ["failure", "error"]]
nb_success = len([result for result in results if result.result == "success"])
nb_skipped = len([result for result in results if result.result == "skipped"])
table.add_row(
testcase_read,
str(nb_success),
str(nb_skipped),
str(nb_failure),
str(nb_error),
str(list_failure),
)
return table
def report_summary_hosts(
self,
result_manager: ResultManager,
host: Optional[str] = None,
title: str = "Summary per host",
) -> Table:
"""
Create a table report with result agregated per host.
Create table with full output: Host / Number of success / Number of failure / Number of error / List of nodes in error or failure
Args:
result_manager (ResultManager): A manager with a list of tests.
host (str, optional): IP Address of a host to search for. Defaults to None.
title (str, optional): Title for the report. Defaults to 'All tests results'.
Returns:
Table: A fully populated rich Table
"""
table = Table(title=title, show_lines=True)
headers = [
"Device",
"# of success",
"# of skipped",
"# of failure",
"# of errors",
"List of failed or error test cases",
]
table = self._build_headers(headers=headers, table=table)
for host_read in result_manager.get_hosts():
if host is None or str(host_read) == host:
results = result_manager.get_result_by_host(host_read)
logger.debug("data to use for computation")
logger.debug(f"{host}: {results}")
nb_failure = len([result for result in results if result.result == "failure"])
nb_error = len([result for result in results if result.result == "error"])
list_failure = [str(result.test) for result in results if result.result in ["failure", "error"]]
nb_success = len([result for result in results if result.result == "success"])
nb_skipped = len([result for result in results if result.result == "skipped"])
table.add_row(
str(host_read),
str(nb_success),
str(nb_skipped),
str(nb_failure),
str(nb_error),
str(list_failure),
)
return table
class ReportJinja:
"""Report builder based on a Jinja2 template."""
def __init__(self, template_path: pathlib.Path) -> None:
if os.path.isfile(template_path):
self.tempalte_path = template_path
else:
raise FileNotFoundError(f"template file is not found: {template_path}")
def render(self, data: list[dict[str, Any]], trim_blocks: bool = True, lstrip_blocks: bool = True) -> str:
"""
Build a report based on a Jinja2 template
Report is built based on a J2 template provided by user.
Data structure sent to template is:
>>> data = ResultManager.get_json_results()
>>> print(data)
[
{
name: ...,
test: ...,
result: ...,
messages: [...]
categories: ...,
description: ...,
}
]
Args:
data (list[dict[str, Any]]): List of results from ResultManager.get_results
trim_blocks (bool, optional): enable trim_blocks for J2 rendering. Defaults to True.
lstrip_blocks (bool, optional): enable lstrip_blocks for J2 rendering. Defaults to True.
Returns:
str: rendered template
"""
with open(self.tempalte_path, encoding="utf-8") as file_:
template = Template(file_.read(), trim_blocks=trim_blocks, lstrip_blocks=lstrip_blocks)
return template.render({"data": data})

View file

@ -0,0 +1,211 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Result Manager Module for ANTA.
"""
from __future__ import annotations
import json
import logging
from pydantic import TypeAdapter
from anta.custom_types import TestStatus
from anta.result_manager.models import TestResult
logger = logging.getLogger(__name__)
class ResultManager:
"""
Helper to manage Test Results and generate reports.
Examples:
Create Inventory:
inventory_anta = AntaInventory.parse(
filename='examples/inventory.yml',
username='ansible',
password='ansible',
timeout=0.5
)
Create Result Manager:
manager = ResultManager()
Run tests for all connected devices:
for device in inventory_anta.get_inventory():
manager.add_test_result(
VerifyNTP(device=device).test()
)
manager.add_test_result(
VerifyEOSVersion(device=device).test(version='4.28.3M')
)
Print result in native format:
manager.get_results()
[
TestResult(
host=IPv4Address('192.168.0.10'),
test='VerifyNTP',
result='failure',
message="device is not running NTP correctly"
),
TestResult(
host=IPv4Address('192.168.0.10'),
test='VerifyEOSVersion',
result='success',
message=None
),
]
"""
def __init__(self) -> None:
"""
Class constructor.
The status of the class is initialized to "unset"
Then when adding a test with a status that is NOT 'error' the following
table shows the updated status:
| Current Status | Added test Status | Updated Status |
| -------------- | ------------------------------- | -------------- |
| unset | Any | Any |
| skipped | unset, skipped | skipped |
| skipped | success | success |
| skipped | failure | failure |
| success | unset, skipped, success | success |
| success | failure | failure |
| failure | unset, skipped success, failure | failure |
If the status of the added test is error, the status is untouched and the
error_status is set to True.
"""
self._result_entries: list[TestResult] = []
# Initialize status
self.status: TestStatus = "unset"
self.error_status = False
def __len__(self) -> int:
"""
Implement __len__ method to count number of results.
"""
return len(self._result_entries)
def _update_status(self, test_status: TestStatus) -> None:
"""
Update ResultManager status based on the table above.
"""
ResultValidator = TypeAdapter(TestStatus)
ResultValidator.validate_python(test_status)
if test_status == "error":
self.error_status = True
return
if self.status == "unset":
self.status = test_status
elif self.status == "skipped" and test_status in {"success", "failure"}:
self.status = test_status
elif self.status == "success" and test_status == "failure":
self.status = "failure"
def add_test_result(self, entry: TestResult) -> None:
"""Add a result to the list
Args:
entry (TestResult): TestResult data to add to the report
"""
logger.debug(entry)
self._result_entries.append(entry)
self._update_status(entry.result)
def add_test_results(self, entries: list[TestResult]) -> None:
"""Add a list of results to the list
Args:
entries (list[TestResult]): List of TestResult data to add to the report
"""
for e in entries:
self.add_test_result(e)
def get_status(self, ignore_error: bool = False) -> str:
"""
Returns the current status including error_status if ignore_error is False
"""
return "error" if self.error_status and not ignore_error else self.status
def get_results(self) -> list[TestResult]:
"""
Expose list of all test results in different format
Returns:
any: List of results.
"""
return self._result_entries
def get_json_results(self) -> str:
"""
Expose list of all test results in JSON
Returns:
str: JSON dumps of the list of results
"""
result = [result.model_dump() for result in self._result_entries]
return json.dumps(result, indent=4)
def get_result_by_test(self, test_name: str) -> list[TestResult]:
"""
Get list of test result for a given test.
Args:
test_name (str): Test name to use to filter results
output_format (str, optional): format selector. Can be either native/list. Defaults to 'native'.
Returns:
list[TestResult]: List of results related to the test.
"""
return [result for result in self._result_entries if str(result.test) == test_name]
def get_result_by_host(self, host_ip: str) -> list[TestResult]:
"""
Get list of test result for a given host.
Args:
host_ip (str): IP Address of the host to use to filter results.
output_format (str, optional): format selector. Can be either native/list. Defaults to 'native'.
Returns:
list[TestResult]: List of results related to the host.
"""
return [result for result in self._result_entries if str(result.name) == host_ip]
def get_testcases(self) -> list[str]:
"""
Get list of name of all test cases in current manager.
Returns:
list[str]: List of names for all tests.
"""
result_list = []
for testcase in self._result_entries:
if str(testcase.test) not in result_list:
result_list.append(str(testcase.test))
return result_list
def get_hosts(self) -> list[str]:
"""
Get list of IP addresses in current manager.
Returns:
list[str]: List of IP addresses.
"""
result_list = []
for testcase in self._result_entries:
if str(testcase.name) not in result_list:
result_list.append(str(testcase.name))
return result_list

View file

@ -0,0 +1,86 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Models related to anta.result_manager module."""
from __future__ import annotations
# Need to keep List for pydantic in 3.8
from typing import List, Optional
from pydantic import BaseModel
from anta.custom_types import TestStatus
class TestResult(BaseModel):
"""
Describe the result of a test from a single device.
Attributes:
name: Device name where the test has run.
test: Test name runs on the device.
categories: List of categories the TestResult belongs to, by default the AntaTest categories.
description: TestResult description, by default the AntaTest description.
result: Result of the test. Can be one of "unset", "success", "failure", "error" or "skipped".
messages: Message to report after the test if any.
custom_field: Custom field to store a string for flexibility in integrating with ANTA
"""
name: str
test: str
categories: List[str]
description: str
result: TestStatus = "unset"
messages: List[str] = []
custom_field: Optional[str] = None
def is_success(self, message: str | None = None) -> None:
"""
Helper to set status to success
Args:
message: Optional message related to the test
"""
self._set_status("success", message)
def is_failure(self, message: str | None = None) -> None:
"""
Helper to set status to failure
Args:
message: Optional message related to the test
"""
self._set_status("failure", message)
def is_skipped(self, message: str | None = None) -> None:
"""
Helper to set status to skipped
Args:
message: Optional message related to the test
"""
self._set_status("skipped", message)
def is_error(self, message: str | None = None) -> None:
"""
Helper to set status to error
"""
self._set_status("error", message)
def _set_status(self, status: TestStatus, message: str | None = None) -> None:
"""
Set status and insert optional message
Args:
status: status of the test
message: optional message
"""
self.result = status
if message is not None:
self.messages.append(message)
def __str__(self) -> str:
"""
Returns a human readable string of this TestResult
"""
return f"Test '{self.test}' (on '{self.name}'): Result '{self.result}'\nMessages: {self.messages}"

109
anta/runner.py Normal file
View file

@ -0,0 +1,109 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
# pylint: disable=too-many-branches
"""
ANTA runner function
"""
from __future__ import annotations
import asyncio
import logging
from typing import Tuple
from anta import GITHUB_SUGGESTION
from anta.catalog import AntaCatalog, AntaTestDefinition
from anta.device import AntaDevice
from anta.inventory import AntaInventory
from anta.logger import anta_log_exception
from anta.models import AntaTest
from anta.result_manager import ResultManager
logger = logging.getLogger(__name__)
AntaTestRunner = Tuple[AntaTestDefinition, AntaDevice]
async def main(manager: ResultManager, inventory: AntaInventory, catalog: AntaCatalog, tags: list[str] | None = None, established_only: bool = True) -> None:
"""
Main coroutine to run ANTA.
Use this as an entrypoint to the test framwork in your script.
Args:
manager: ResultManager object to populate with the test results.
inventory: AntaInventory object that includes the device(s).
catalog: AntaCatalog object that includes the list of tests.
tags: List of tags to filter devices from the inventory. Defaults to None.
established_only: Include only established device(s). Defaults to True.
Returns:
any: ResultManager object gets updated with the test results.
"""
if not catalog.tests:
logger.info("The list of tests is empty, exiting")
return
if len(inventory) == 0:
logger.info("The inventory is empty, exiting")
return
await inventory.connect_inventory()
devices: list[AntaDevice] = list(inventory.get_inventory(established_only=established_only, tags=tags).values())
if not devices:
logger.info(
f"No device in the established state '{established_only}' "
f"{f'matching the tags {tags} ' if tags else ''}was found. There is no device to run tests against, exiting"
)
return
coros = []
# Using a set to avoid inserting duplicate tests
tests_set: set[AntaTestRunner] = set()
for device in devices:
if tags:
# If there are CLI tags, only execute tests with matching tags
tests_set.update((test, device) for test in catalog.get_tests_by_tags(tags))
else:
# If there is no CLI tags, execute all tests without filters
tests_set.update((t, device) for t in catalog.tests if t.inputs.filters is None or t.inputs.filters.tags is None)
# Then add the tests with matching tags from device tags
tests_set.update((t, device) for t in catalog.get_tests_by_tags(device.tags))
tests: list[AntaTestRunner] = list(tests_set)
if not tests:
logger.info(f"There is no tests{f' matching the tags {tags} ' if tags else ' '}to run on current inventory. " "Exiting...")
return
for test_definition, device in tests:
try:
test_instance = test_definition.test(device=device, inputs=test_definition.inputs)
coros.append(test_instance.test())
except Exception as e: # pylint: disable=broad-exception-caught
# An AntaTest instance is potentially user-defined code.
# We need to catch everything and exit gracefully with an
# error message
message = "\n".join(
[
f"There is an error when creating test {test_definition.test.__module__}.{test_definition.test.__name__}.",
f"If this is not a custom test implementation: {GITHUB_SUGGESTION}",
]
)
anta_log_exception(e, message, logger)
if AntaTest.progress is not None:
AntaTest.nrfu_task = AntaTest.progress.add_task("Running NRFU Tests...", total=len(coros))
logger.info("Running ANTA tests...")
test_results = await asyncio.gather(*coros)
for r in test_results:
manager.add_test_result(r)
for device in devices:
if device.cache_statistics is not None:
logger.info(
f"Cache statistics for '{device.name}': "
f"{device.cache_statistics['cache_hits']} hits / {device.cache_statistics['total_commands_sent']} "
f"command(s) ({device.cache_statistics['cache_hit_ratio']})"
)
else:
logger.info(f"Caching is not enabled on {device.name}")

3
anta/tests/__init__.py Normal file
View file

@ -0,0 +1,3 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.

292
anta/tests/aaa.py Normal file
View file

@ -0,0 +1,292 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to the EOS various AAA settings
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
from ipaddress import IPv4Address
# Need to keep List and Set for pydantic in python 3.8
from typing import List, Literal, Set
from anta.custom_types import AAAAuthMethod
from anta.models import AntaCommand, AntaTest
class VerifyTacacsSourceIntf(AntaTest):
"""
Verifies TACACS source-interface for a specified VRF.
Expected Results:
* success: The test will pass if the provided TACACS source-interface is configured in the specified VRF.
* failure: The test will fail if the provided TACACS source-interface is NOT configured in the specified VRF.
"""
name = "VerifyTacacsSourceIntf"
description = "Verifies TACACS source-interface for a specified VRF."
categories = ["aaa"]
commands = [AntaCommand(command="show tacacs")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
intf: str
"""Source-interface to use as source IP of TACACS messages"""
vrf: str = "default"
"""The name of the VRF to transport TACACS messages"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
try:
if command_output["srcIntf"][self.inputs.vrf] == self.inputs.intf:
self.result.is_success()
else:
self.result.is_failure(f"Wrong source-interface configured in VRF {self.inputs.vrf}")
except KeyError:
self.result.is_failure(f"Source-interface {self.inputs.intf} is not configured in VRF {self.inputs.vrf}")
class VerifyTacacsServers(AntaTest):
"""
Verifies TACACS servers are configured for a specified VRF.
Expected Results:
* success: The test will pass if the provided TACACS servers are configured in the specified VRF.
* failure: The test will fail if the provided TACACS servers are NOT configured in the specified VRF.
"""
name = "VerifyTacacsServers"
description = "Verifies TACACS servers are configured for a specified VRF."
categories = ["aaa"]
commands = [AntaCommand(command="show tacacs")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
servers: List[IPv4Address]
"""List of TACACS servers"""
vrf: str = "default"
"""The name of the VRF to transport TACACS messages"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
tacacs_servers = command_output["tacacsServers"]
if not tacacs_servers:
self.result.is_failure("No TACACS servers are configured")
return
not_configured = [
str(server)
for server in self.inputs.servers
if not any(
str(server) == tacacs_server["serverInfo"]["hostname"] and self.inputs.vrf == tacacs_server["serverInfo"]["vrf"] for tacacs_server in tacacs_servers
)
]
if not not_configured:
self.result.is_success()
else:
self.result.is_failure(f"TACACS servers {not_configured} are not configured in VRF {self.inputs.vrf}")
class VerifyTacacsServerGroups(AntaTest):
"""
Verifies if the provided TACACS server group(s) are configured.
Expected Results:
* success: The test will pass if the provided TACACS server group(s) are configured.
* failure: The test will fail if one or all the provided TACACS server group(s) are NOT configured.
"""
name = "VerifyTacacsServerGroups"
description = "Verifies if the provided TACACS server group(s) are configured."
categories = ["aaa"]
commands = [AntaCommand(command="show tacacs")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
groups: List[str]
"""List of TACACS server group"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
tacacs_groups = command_output["groups"]
if not tacacs_groups:
self.result.is_failure("No TACACS server group(s) are configured")
return
not_configured = [group for group in self.inputs.groups if group not in tacacs_groups]
if not not_configured:
self.result.is_success()
else:
self.result.is_failure(f"TACACS server group(s) {not_configured} are not configured")
class VerifyAuthenMethods(AntaTest):
"""
Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x).
Expected Results:
* success: The test will pass if the provided AAA authentication method list is matching in the configured authentication types.
* failure: The test will fail if the provided AAA authentication method list is NOT matching in the configured authentication types.
"""
name = "VerifyAuthenMethods"
description = "Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x)."
categories = ["aaa"]
commands = [AntaCommand(command="show aaa methods authentication")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
methods: List[AAAAuthMethod]
"""List of AAA authentication methods. Methods should be in the right order"""
types: Set[Literal["login", "enable", "dot1x"]]
"""List of authentication types to verify"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
not_matching = []
for k, v in command_output.items():
auth_type = k.replace("AuthenMethods", "")
if auth_type not in self.inputs.types:
# We do not need to verify this accounting type
continue
if auth_type == "login":
if "login" not in v:
self.result.is_failure("AAA authentication methods are not configured for login console")
return
if v["login"]["methods"] != self.inputs.methods:
self.result.is_failure(f"AAA authentication methods {self.inputs.methods} are not matching for login console")
return
for methods in v.values():
if methods["methods"] != self.inputs.methods:
not_matching.append(auth_type)
if not not_matching:
self.result.is_success()
else:
self.result.is_failure(f"AAA authentication methods {self.inputs.methods} are not matching for {not_matching}")
class VerifyAuthzMethods(AntaTest):
"""
Verifies the AAA authorization method lists for different authorization types (commands, exec).
Expected Results:
* success: The test will pass if the provided AAA authorization method list is matching in the configured authorization types.
* failure: The test will fail if the provided AAA authorization method list is NOT matching in the configured authorization types.
"""
name = "VerifyAuthzMethods"
description = "Verifies the AAA authorization method lists for different authorization types (commands, exec)."
categories = ["aaa"]
commands = [AntaCommand(command="show aaa methods authorization")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
methods: List[AAAAuthMethod]
"""List of AAA authorization methods. Methods should be in the right order"""
types: Set[Literal["commands", "exec"]]
"""List of authorization types to verify"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
not_matching = []
for k, v in command_output.items():
authz_type = k.replace("AuthzMethods", "")
if authz_type not in self.inputs.types:
# We do not need to verify this accounting type
continue
for methods in v.values():
if methods["methods"] != self.inputs.methods:
not_matching.append(authz_type)
if not not_matching:
self.result.is_success()
else:
self.result.is_failure(f"AAA authorization methods {self.inputs.methods} are not matching for {not_matching}")
class VerifyAcctDefaultMethods(AntaTest):
"""
Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x).
Expected Results:
* success: The test will pass if the provided AAA accounting default method list is matching in the configured accounting types.
* failure: The test will fail if the provided AAA accounting default method list is NOT matching in the configured accounting types.
"""
name = "VerifyAcctDefaultMethods"
description = "Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x)."
categories = ["aaa"]
commands = [AntaCommand(command="show aaa methods accounting")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
methods: List[AAAAuthMethod]
"""List of AAA accounting methods. Methods should be in the right order"""
types: Set[Literal["commands", "exec", "system", "dot1x"]]
"""List of accounting types to verify"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
not_matching = []
not_configured = []
for k, v in command_output.items():
acct_type = k.replace("AcctMethods", "")
if acct_type not in self.inputs.types:
# We do not need to verify this accounting type
continue
for methods in v.values():
if "defaultAction" not in methods:
not_configured.append(acct_type)
if methods["defaultMethods"] != self.inputs.methods:
not_matching.append(acct_type)
if not_configured:
self.result.is_failure(f"AAA default accounting is not configured for {not_configured}")
return
if not not_matching:
self.result.is_success()
else:
self.result.is_failure(f"AAA accounting default methods {self.inputs.methods} are not matching for {not_matching}")
class VerifyAcctConsoleMethods(AntaTest):
"""
Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x).
Expected Results:
* success: The test will pass if the provided AAA accounting console method list is matching in the configured accounting types.
* failure: The test will fail if the provided AAA accounting console method list is NOT matching in the configured accounting types.
"""
name = "VerifyAcctConsoleMethods"
description = "Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x)."
categories = ["aaa"]
commands = [AntaCommand(command="show aaa methods accounting")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
methods: List[AAAAuthMethod]
"""List of AAA accounting console methods. Methods should be in the right order"""
types: Set[Literal["commands", "exec", "system", "dot1x"]]
"""List of accounting console types to verify"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
not_matching = []
not_configured = []
for k, v in command_output.items():
acct_type = k.replace("AcctMethods", "")
if acct_type not in self.inputs.types:
# We do not need to verify this accounting type
continue
for methods in v.values():
if "consoleAction" not in methods:
not_configured.append(acct_type)
if methods["consoleMethods"] != self.inputs.methods:
not_matching.append(acct_type)
if not_configured:
self.result.is_failure(f"AAA console accounting is not configured for {not_configured}")
return
if not not_matching:
self.result.is_success()
else:
self.result.is_failure(f"AAA accounting console methods {self.inputs.methods} are not matching for {not_matching}")

235
anta/tests/bfd.py Normal file
View file

@ -0,0 +1,235 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
BFD test functions
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
from datetime import datetime
from ipaddress import IPv4Address
from typing import Any, List, Optional
from pydantic import BaseModel, Field
from anta.custom_types import BfdInterval, BfdMultiplier
from anta.models import AntaCommand, AntaTest
from anta.tools.get_value import get_value
class VerifyBFDSpecificPeers(AntaTest):
"""
This class verifies if the IPv4 BFD peer's sessions are UP and remote disc is non-zero in the specified VRF.
Expected results:
* success: The test will pass if IPv4 BFD peers are up and remote disc is non-zero in the specified VRF.
* failure: The test will fail if IPv4 BFD peers are not found, the status is not UP or remote disc is zero in the specified VRF.
"""
name = "VerifyBFDSpecificPeers"
description = "Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF."
categories = ["bfd"]
commands = [AntaCommand(command="show bfd peers")]
class Input(AntaTest.Input):
"""
This class defines the input parameters of the test case.
"""
bfd_peers: List[BFDPeers]
"""List of IPv4 BFD peers"""
class BFDPeers(BaseModel):
"""
This class defines the details of an IPv4 BFD peer.
"""
peer_address: IPv4Address
"""IPv4 address of a BFD peer"""
vrf: str = "default"
"""Optional VRF for BGP peer. If not provided, it defaults to `default`."""
@AntaTest.anta_test
def test(self) -> None:
failures: dict[Any, Any] = {}
# Iterating over BFD peers
for bfd_peer in self.inputs.bfd_peers:
peer = str(bfd_peer.peer_address)
vrf = bfd_peer.vrf
bfd_output = get_value(self.instance_commands[0].json_output, f"vrfs..{vrf}..ipv4Neighbors..{peer}..peerStats..", separator="..")
# Check if BFD peer configured
if not bfd_output:
failures[peer] = {vrf: "Not Configured"}
continue
# Check BFD peer status and remote disc
if not (bfd_output.get("status") == "up" and bfd_output.get("remoteDisc") != 0):
failures[peer] = {vrf: {"status": bfd_output.get("status"), "remote_disc": bfd_output.get("remoteDisc")}}
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"Following BFD peers are not configured, status is not up or remote disc is zero:\n{failures}")
class VerifyBFDPeersIntervals(AntaTest):
"""
This class verifies the timers of the IPv4 BFD peers in the specified VRF.
Expected results:
* success: The test will pass if the timers of the IPv4 BFD peers are correct in the specified VRF.
* failure: The test will fail if the IPv4 BFD peers are not found or their timers are incorrect in the specified VRF.
"""
name = "VerifyBFDPeersIntervals"
description = "Verifies the timers of the IPv4 BFD peers in the specified VRF."
categories = ["bfd"]
commands = [AntaCommand(command="show bfd peers detail")]
class Input(AntaTest.Input):
"""
This class defines the input parameters of the test case.
"""
bfd_peers: List[BFDPeers]
"""List of BFD peers"""
class BFDPeers(BaseModel):
"""
This class defines the details of an IPv4 BFD peer.
"""
peer_address: IPv4Address
"""IPv4 address of a BFD peer"""
vrf: str = "default"
"""Optional VRF for BGP peer. If not provided, it defaults to `default`."""
tx_interval: BfdInterval
"""Tx interval of BFD peer in milliseconds"""
rx_interval: BfdInterval
"""Rx interval of BFD peer in milliseconds"""
multiplier: BfdMultiplier
"""Multiplier of BFD peer"""
@AntaTest.anta_test
def test(self) -> None:
failures: dict[Any, Any] = {}
# Iterating over BFD peers
for bfd_peers in self.inputs.bfd_peers:
peer = str(bfd_peers.peer_address)
vrf = bfd_peers.vrf
# Converting milliseconds intervals into actual value
tx_interval = bfd_peers.tx_interval * 1000
rx_interval = bfd_peers.rx_interval * 1000
multiplier = bfd_peers.multiplier
bfd_output = get_value(self.instance_commands[0].json_output, f"vrfs..{vrf}..ipv4Neighbors..{peer}..peerStats..", separator="..")
# Check if BFD peer configured
if not bfd_output:
failures[peer] = {vrf: "Not Configured"}
continue
bfd_details = bfd_output.get("peerStatsDetail", {})
intervals_ok = (
bfd_details.get("operTxInterval") == tx_interval and bfd_details.get("operRxInterval") == rx_interval and bfd_details.get("detectMult") == multiplier
)
# Check timers of BFD peer
if not intervals_ok:
failures[peer] = {
vrf: {
"tx_interval": bfd_details.get("operTxInterval"),
"rx_interval": bfd_details.get("operRxInterval"),
"multiplier": bfd_details.get("detectMult"),
}
}
# Check if any failures
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"Following BFD peers are not configured or timers are not correct:\n{failures}")
class VerifyBFDPeersHealth(AntaTest):
"""
This class verifies the health of IPv4 BFD peers across all VRFs.
It checks that no BFD peer is in the down state and that the discriminator value of the remote system is not zero.
Optionally, it can also verify that BFD peers have not been down before a specified threshold of hours.
Expected results:
* Success: The test will pass if all IPv4 BFD peers are up, the discriminator value of each remote system is non-zero,
and the last downtime of each peer is above the defined threshold.
* Failure: The test will fail if any IPv4 BFD peer is down, the discriminator value of any remote system is zero,
or the last downtime of any peer is below the defined threshold.
"""
name = "VerifyBFDPeersHealth"
description = "Verifies the health of all IPv4 BFD peers."
categories = ["bfd"]
# revision 1 as later revision introduces additional nesting for type
commands = [AntaCommand(command="show bfd peers", revision=1), AntaCommand(command="show clock")]
class Input(AntaTest.Input):
"""
This class defines the input parameters of the test case.
"""
down_threshold: Optional[int] = Field(default=None, gt=0)
"""Optional down threshold in hours to check if a BFD peer was down before those hours or not."""
@AntaTest.anta_test
def test(self) -> None:
# Initialize failure strings
down_failures = []
up_failures = []
# Extract the current timestamp and command output
clock_output = self.instance_commands[1].json_output
current_timestamp = clock_output["utcTime"]
bfd_output = self.instance_commands[0].json_output
# set the initial result
self.result.is_success()
# Check if any IPv4 BFD peer is configured
ipv4_neighbors_exist = any(vrf_data["ipv4Neighbors"] for vrf_data in bfd_output["vrfs"].values())
if not ipv4_neighbors_exist:
self.result.is_failure("No IPv4 BFD peers are configured for any VRF.")
return
# Iterate over IPv4 BFD peers
for vrf, vrf_data in bfd_output["vrfs"].items():
for peer, neighbor_data in vrf_data["ipv4Neighbors"].items():
for peer_data in neighbor_data["peerStats"].values():
peer_status = peer_data["status"]
remote_disc = peer_data["remoteDisc"]
remote_disc_info = f" with remote disc {remote_disc}" if remote_disc == 0 else ""
last_down = peer_data["lastDown"]
hours_difference = (datetime.fromtimestamp(current_timestamp) - datetime.fromtimestamp(last_down)).total_seconds() / 3600
# Check if peer status is not up
if peer_status != "up":
down_failures.append(f"{peer} is {peer_status} in {vrf} VRF{remote_disc_info}.")
# Check if the last down is within the threshold
elif self.inputs.down_threshold and hours_difference < self.inputs.down_threshold:
up_failures.append(f"{peer} in {vrf} VRF was down {round(hours_difference)} hours ago{remote_disc_info}.")
# Check if remote disc is 0
elif remote_disc == 0:
up_failures.append(f"{peer} in {vrf} VRF has remote disc {remote_disc}.")
# Check if there are any failures
if down_failures:
down_failures_str = "\n".join(down_failures)
self.result.is_failure(f"Following BFD peers are not up:\n{down_failures_str}")
if up_failures:
up_failures_str = "\n".join(up_failures)
self.result.is_failure(f"\nFollowing BFD peers were down:\n{up_failures_str}")

View file

@ -0,0 +1,51 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to the device configuration
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
from anta.models import AntaCommand, AntaTest
class VerifyZeroTouch(AntaTest):
"""
Verifies ZeroTouch is disabled
"""
name = "VerifyZeroTouch"
description = "Verifies ZeroTouch is disabled"
categories = ["configuration"]
commands = [AntaCommand(command="show zerotouch")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].output
assert isinstance(command_output, dict)
if command_output["mode"] == "disabled":
self.result.is_success()
else:
self.result.is_failure("ZTP is NOT disabled")
class VerifyRunningConfigDiffs(AntaTest):
"""
Verifies there is no difference between the running-config and the startup-config
"""
name = "VerifyRunningConfigDiffs"
description = "Verifies there is no difference between the running-config and the startup-config"
categories = ["configuration"]
commands = [AntaCommand(command="show running-config diffs", ofmt="text")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].output
if command_output is None or command_output == "":
self.result.is_success()
else:
self.result.is_failure()
self.result.is_failure(str(command_output))

125
anta/tests/connectivity.py Normal file
View file

@ -0,0 +1,125 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to various connectivity checks
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
from ipaddress import IPv4Address
# Need to keep List for pydantic in python 3.8
from typing import List, Union
from pydantic import BaseModel
from anta.custom_types import Interface
from anta.models import AntaCommand, AntaMissingParamException, AntaTemplate, AntaTest
class VerifyReachability(AntaTest):
"""
Test network reachability to one or many destination IP(s).
Expected Results:
* success: The test will pass if all destination IP(s) are reachable.
* failure: The test will fail if one or many destination IP(s) are unreachable.
"""
name = "VerifyReachability"
description = "Test the network reachability to one or many destination IP(s)."
categories = ["connectivity"]
commands = [AntaTemplate(template="ping vrf {vrf} {destination} source {source} repeat {repeat}")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
hosts: List[Host]
"""List of hosts to ping"""
class Host(BaseModel):
"""Remote host to ping"""
destination: IPv4Address
"""IPv4 address to ping"""
source: Union[IPv4Address, Interface]
"""IPv4 address source IP or Egress interface to use"""
vrf: str = "default"
"""VRF context"""
repeat: int = 2
"""Number of ping repetition (default=2)"""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
return [template.render(destination=host.destination, source=host.source, vrf=host.vrf, repeat=host.repeat) for host in self.inputs.hosts]
@AntaTest.anta_test
def test(self) -> None:
failures = []
for command in self.instance_commands:
src = command.params.get("source")
dst = command.params.get("destination")
repeat = command.params.get("repeat")
if any(elem is None for elem in (src, dst, repeat)):
raise AntaMissingParamException(f"A parameter is missing to execute the test for command {command}")
if f"{repeat} received" not in command.json_output["messages"][0]:
failures.append((str(src), str(dst)))
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"Connectivity test failed for the following source-destination pairs: {failures}")
class VerifyLLDPNeighbors(AntaTest):
"""
This test verifies that the provided LLDP neighbors are present and connected with the correct configuration.
Expected Results:
* success: The test will pass if each of the provided LLDP neighbors is present and connected to the specified port and device.
* failure: The test will fail if any of the following conditions are met:
- The provided LLDP neighbor is not found.
- The system name or port of the LLDP neighbor does not match the provided information.
"""
name = "VerifyLLDPNeighbors"
description = "Verifies that the provided LLDP neighbors are connected properly."
categories = ["connectivity"]
commands = [AntaCommand(command="show lldp neighbors detail")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
neighbors: List[Neighbor]
"""List of LLDP neighbors"""
class Neighbor(BaseModel):
"""LLDP neighbor"""
port: Interface
"""LLDP port"""
neighbor_device: str
"""LLDP neighbor device"""
neighbor_port: Interface
"""LLDP neighbor port"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
failures: dict[str, list[str]] = {}
for neighbor in self.inputs.neighbors:
if neighbor.port not in command_output["lldpNeighbors"]:
failures.setdefault("port_not_configured", []).append(neighbor.port)
elif len(lldp_neighbor_info := command_output["lldpNeighbors"][neighbor.port]["lldpNeighborInfo"]) == 0:
failures.setdefault("no_lldp_neighbor", []).append(neighbor.port)
elif (
lldp_neighbor_info[0]["systemName"] != neighbor.neighbor_device
or lldp_neighbor_info[0]["neighborInterfaceInfo"]["interfaceId_v2"] != neighbor.neighbor_port
):
failures.setdefault("wrong_lldp_neighbor", []).append(neighbor.port)
if not failures:
self.result.is_success()
else:
self.result.is_failure(f"The following port(s) have issues: {failures}")

165
anta/tests/field_notices.py Normal file
View file

@ -0,0 +1,165 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions to flag field notices
"""
from anta.decorators import skip_on_platforms
from anta.models import AntaCommand, AntaTest
class VerifyFieldNotice44Resolution(AntaTest):
"""
Verifies the device is using an Aboot version that fix the bug discussed
in the field notice 44 (Aboot manages system settings prior to EOS initialization).
https://www.arista.com/en/support/advisories-notices/field-notice/8756-field-notice-44
"""
name = "VerifyFieldNotice44Resolution"
description = (
"Verifies the device is using an Aboot version that fix the bug discussed in the field notice 44 (Aboot manages system settings prior to EOS initialization)"
)
categories = ["field notices", "software"]
commands = [AntaCommand(command="show version detail")]
# TODO maybe implement ONLY ON PLATFORMS instead
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
devices = [
"DCS-7010T-48",
"DCS-7010T-48-DC",
"DCS-7050TX-48",
"DCS-7050TX-64",
"DCS-7050TX-72",
"DCS-7050TX-72Q",
"DCS-7050TX-96",
"DCS-7050TX2-128",
"DCS-7050SX-64",
"DCS-7050SX-72",
"DCS-7050SX-72Q",
"DCS-7050SX2-72Q",
"DCS-7050SX-96",
"DCS-7050SX2-128",
"DCS-7050QX-32S",
"DCS-7050QX2-32S",
"DCS-7050SX3-48YC12",
"DCS-7050CX3-32S",
"DCS-7060CX-32S",
"DCS-7060CX2-32S",
"DCS-7060SX2-48YC6",
"DCS-7160-48YC6",
"DCS-7160-48TC6",
"DCS-7160-32CQ",
"DCS-7280SE-64",
"DCS-7280SE-68",
"DCS-7280SE-72",
"DCS-7150SC-24-CLD",
"DCS-7150SC-64-CLD",
"DCS-7020TR-48",
"DCS-7020TRA-48",
"DCS-7020SR-24C2",
"DCS-7020SRG-24C2",
"DCS-7280TR-48C6",
"DCS-7280TRA-48C6",
"DCS-7280SR-48C6",
"DCS-7280SRA-48C6",
"DCS-7280SRAM-48C6",
"DCS-7280SR2K-48C6-M",
"DCS-7280SR2-48YC6",
"DCS-7280SR2A-48YC6",
"DCS-7280SRM-40CX2",
"DCS-7280QR-C36",
"DCS-7280QRA-C36S",
]
variants = ["-SSD-F", "-SSD-R", "-M-F", "-M-R", "-F", "-R"]
model = command_output["modelName"]
# TODO this list could be a regex
for variant in variants:
model = model.replace(variant, "")
if model not in devices:
self.result.is_skipped("device is not impacted by FN044")
return
for component in command_output["details"]["components"]:
if component["name"] == "Aboot":
aboot_version = component["version"].split("-")[2]
self.result.is_success()
if aboot_version.startswith("4.0.") and int(aboot_version.split(".")[2]) < 7:
self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
elif aboot_version.startswith("4.1.") and int(aboot_version.split(".")[2]) < 1:
self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
elif aboot_version.startswith("6.0.") and int(aboot_version.split(".")[2]) < 9:
self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
elif aboot_version.startswith("6.1.") and int(aboot_version.split(".")[2]) < 7:
self.result.is_failure(f"device is running incorrect version of aboot ({aboot_version})")
class VerifyFieldNotice72Resolution(AntaTest):
"""
Checks if the device is potentially exposed to Field Notice 72, and if the issue has been mitigated.
https://www.arista.com/en/support/advisories-notices/field-notice/17410-field-notice-0072
"""
name = "VerifyFieldNotice72Resolution"
description = "Verifies if the device has exposeure to FN72, and if the issue has been mitigated"
categories = ["field notices", "software"]
commands = [AntaCommand(command="show version detail")]
# TODO maybe implement ONLY ON PLATFORMS instead
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
devices = ["DCS-7280SR3-48YC8", "DCS-7280SR3K-48YC8"]
variants = ["-SSD-F", "-SSD-R", "-M-F", "-M-R", "-F", "-R"]
model = command_output["modelName"]
for variant in variants:
model = model.replace(variant, "")
if model not in devices:
self.result.is_skipped("Platform is not impacted by FN072")
return
serial = command_output["serialNumber"]
number = int(serial[3:7])
if "JPE" not in serial and "JAS" not in serial:
self.result.is_skipped("Device not exposed")
return
if model == "DCS-7280SR3-48YC8" and "JPE" in serial and number >= 2131:
self.result.is_skipped("Device not exposed")
return
if model == "DCS-7280SR3-48YC8" and "JAS" in serial and number >= 2041:
self.result.is_skipped("Device not exposed")
return
if model == "DCS-7280SR3K-48YC8" and "JPE" in serial and number >= 2134:
self.result.is_skipped("Device not exposed")
return
if model == "DCS-7280SR3K-48YC8" and "JAS" in serial and number >= 2041:
self.result.is_skipped("Device not exposed")
return
# Because each of the if checks above will return if taken, we only run the long
# check if we get this far
for entry in command_output["details"]["components"]:
if entry["name"] == "FixedSystemvrm1":
if int(entry["version"]) < 7:
self.result.is_failure("Device is exposed to FN72")
else:
self.result.is_success("FN72 is mitigated")
return
# We should never hit this point
self.result.is_error(message="Error in running test - FixedSystemvrm1 not found")
return

60
anta/tests/greent.py Normal file
View file

@ -0,0 +1,60 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to GreenT (Postcard Telemetry) in EOS
"""
from __future__ import annotations
from anta.models import AntaCommand, AntaTest
class VerifyGreenTCounters(AntaTest):
"""
Verifies whether GRE packets are sent.
Expected Results:
* success: if >0 gre packets are sent
* failure: if no gre packets are sent
"""
name = "VerifyGreenTCounters"
description = "Verifies if the greent counters are incremented."
categories = ["greent"]
commands = [AntaCommand(command="show monitor telemetry postcard counters")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["grePktSent"] > 0:
self.result.is_success()
else:
self.result.is_failure("GRE packets are not sent")
class VerifyGreenT(AntaTest):
"""
Verifies whether GreenT policy is created.
Expected Results:
* success: if there exists any policy other than "default" policy.
* failure: if no policy is created.
"""
name = "VerifyGreenT"
description = "Verifies whether greent policy is created."
categories = ["greent"]
commands = [AntaCommand(command="show monitor telemetry postcard policy profile")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
out = [f"{i} policy is created" for i in command_output["profiles"].keys() if "default" not in i]
if len(out) > 0:
for i in out:
self.result.is_success(f"{i} policy is created")
else:
self.result.is_failure("policy is not created")

220
anta/tests/hardware.py Normal file
View file

@ -0,0 +1,220 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to the hardware or environment
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
# Need to keep List for pydantic in python 3.8
from typing import List
from anta.decorators import skip_on_platforms
from anta.models import AntaCommand, AntaTest
class VerifyTransceiversManufacturers(AntaTest):
"""
This test verifies if all the transceivers come from approved manufacturers.
Expected Results:
* success: The test will pass if all transceivers are from approved manufacturers.
* failure: The test will fail if some transceivers are from unapproved manufacturers.
"""
name = "VerifyTransceiversManufacturers"
description = "Verifies if all transceivers come from approved manufacturers."
categories = ["hardware"]
commands = [AntaCommand(command="show inventory", ofmt="json")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
manufacturers: List[str]
"""List of approved transceivers manufacturers"""
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
wrong_manufacturers = {
interface: value["mfgName"] for interface, value in command_output["xcvrSlots"].items() if value["mfgName"] not in self.inputs.manufacturers
}
if not wrong_manufacturers:
self.result.is_success()
else:
self.result.is_failure(f"Some transceivers are from unapproved manufacturers: {wrong_manufacturers}")
class VerifyTemperature(AntaTest):
"""
This test verifies if the device temperature is within acceptable limits.
Expected Results:
* success: The test will pass if the device temperature is currently OK: 'temperatureOk'.
* failure: The test will fail if the device temperature is NOT OK.
"""
name = "VerifyTemperature"
description = "Verifies the device temperature."
categories = ["hardware"]
commands = [AntaCommand(command="show system environment temperature", ofmt="json")]
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
temperature_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
if temperature_status == "temperatureOk":
self.result.is_success()
else:
self.result.is_failure(f"Device temperature exceeds acceptable limits. Current system status: '{temperature_status}'")
class VerifyTransceiversTemperature(AntaTest):
"""
This test verifies if all the transceivers are operating at an acceptable temperature.
Expected Results:
* success: The test will pass if all transceivers status are OK: 'ok'.
* failure: The test will fail if some transceivers are NOT OK.
"""
name = "VerifyTransceiversTemperature"
description = "Verifies the transceivers temperature."
categories = ["hardware"]
commands = [AntaCommand(command="show system environment temperature transceiver", ofmt="json")]
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
sensors = command_output["tempSensors"] if "tempSensors" in command_output.keys() else ""
wrong_sensors = {
sensor["name"]: {
"hwStatus": sensor["hwStatus"],
"alertCount": sensor["alertCount"],
}
for sensor in sensors
if sensor["hwStatus"] != "ok" or sensor["alertCount"] != 0
}
if not wrong_sensors:
self.result.is_success()
else:
self.result.is_failure(f"The following sensors are operating outside the acceptable temperature range or have raised alerts: {wrong_sensors}")
class VerifyEnvironmentSystemCooling(AntaTest):
"""
This test verifies the device's system cooling.
Expected Results:
* success: The test will pass if the system cooling status is OK: 'coolingOk'.
* failure: The test will fail if the system cooling status is NOT OK.
"""
name = "VerifyEnvironmentSystemCooling"
description = "Verifies the system cooling status."
categories = ["hardware"]
commands = [AntaCommand(command="show system environment cooling", ofmt="json")]
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
sys_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
self.result.is_success()
if sys_status != "coolingOk":
self.result.is_failure(f"Device system cooling is not OK: '{sys_status}'")
class VerifyEnvironmentCooling(AntaTest):
"""
This test verifies the fans status.
Expected Results:
* success: The test will pass if the fans status are within the accepted states list.
* failure: The test will fail if some fans status is not within the accepted states list.
"""
name = "VerifyEnvironmentCooling"
description = "Verifies the status of power supply fans and all fan trays."
categories = ["hardware"]
commands = [AntaCommand(command="show system environment cooling", ofmt="json")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
states: List[str]
"""Accepted states list for fan status"""
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
self.result.is_success()
# First go through power supplies fans
for power_supply in command_output.get("powerSupplySlots", []):
for fan in power_supply.get("fans", []):
if (state := fan["status"]) not in self.inputs.states:
self.result.is_failure(f"Fan {fan['label']} on PowerSupply {power_supply['label']} is: '{state}'")
# Then go through fan trays
for fan_tray in command_output.get("fanTraySlots", []):
for fan in fan_tray.get("fans", []):
if (state := fan["status"]) not in self.inputs.states:
self.result.is_failure(f"Fan {fan['label']} on Fan Tray {fan_tray['label']} is: '{state}'")
class VerifyEnvironmentPower(AntaTest):
"""
This test verifies the power supplies status.
Expected Results:
* success: The test will pass if the power supplies status are within the accepted states list.
* failure: The test will fail if some power supplies status is not within the accepted states list.
"""
name = "VerifyEnvironmentPower"
description = "Verifies the power supplies status."
categories = ["hardware"]
commands = [AntaCommand(command="show system environment power", ofmt="json")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
states: List[str]
"""Accepted states list for power supplies status"""
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
power_supplies = command_output["powerSupplies"] if "powerSupplies" in command_output.keys() else "{}"
wrong_power_supplies = {
powersupply: {"state": value["state"]} for powersupply, value in dict(power_supplies).items() if value["state"] not in self.inputs.states
}
if not wrong_power_supplies:
self.result.is_success()
else:
self.result.is_failure(f"The following power supplies status are not in the accepted states list: {wrong_power_supplies}")
class VerifyAdverseDrops(AntaTest):
"""
This test verifies if there are no adverse drops on DCS7280E and DCS7500E.
Expected Results:
* success: The test will pass if there are no adverse drops.
* failure: The test will fail if there are adverse drops.
"""
name = "VerifyAdverseDrops"
description = "Verifies there are no adverse drops on DCS7280E and DCS7500E"
categories = ["hardware"]
commands = [AntaCommand(command="show hardware counter drop", ofmt="json")]
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
total_adverse_drop = command_output["totalAdverseDrops"] if "totalAdverseDrops" in command_output.keys() else ""
if total_adverse_drop == 0:
self.result.is_success()
else:
self.result.is_failure(f"Device totalAdverseDrops counter is: '{total_adverse_drop}'")

599
anta/tests/interfaces.py Normal file
View file

@ -0,0 +1,599 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to the device interfaces
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
import re
from ipaddress import IPv4Network
# Need to keep Dict and List for pydantic in python 3.8
from typing import Any, Dict, List, Literal, Optional
from pydantic import BaseModel, conint
from pydantic_extra_types.mac_address import MacAddress
from anta.custom_types import Interface
from anta.decorators import skip_on_platforms
from anta.models import AntaCommand, AntaTemplate, AntaTest
from anta.tools.get_item import get_item
from anta.tools.get_value import get_value
class VerifyInterfaceUtilization(AntaTest):
"""
Verifies interfaces utilization is below 75%.
Expected Results:
* success: The test will pass if all interfaces have a usage below 75%.
* failure: The test will fail if one or more interfaces have a usage above 75%.
"""
name = "VerifyInterfaceUtilization"
description = "Verifies that all interfaces have a usage below 75%."
categories = ["interfaces"]
# TODO - move from text to json if possible
commands = [AntaCommand(command="show interfaces counters rates", ofmt="text")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].text_output
wrong_interfaces = {}
for line in command_output.split("\n")[1:]:
if len(line) > 0:
if line.split()[-5] == "-" or line.split()[-2] == "-":
pass
elif float(line.split()[-5].replace("%", "")) > 75.0:
wrong_interfaces[line.split()[0]] = line.split()[-5]
elif float(line.split()[-2].replace("%", "")) > 75.0:
wrong_interfaces[line.split()[0]] = line.split()[-2]
if not wrong_interfaces:
self.result.is_success()
else:
self.result.is_failure(f"The following interfaces have a usage > 75%: {wrong_interfaces}")
class VerifyInterfaceErrors(AntaTest):
"""
This test verifies that interfaces error counters are equal to zero.
Expected Results:
* success: The test will pass if all interfaces have error counters equal to zero.
* failure: The test will fail if one or more interfaces have non-zero error counters.
"""
name = "VerifyInterfaceErrors"
description = "Verifies there are no interface error counters."
categories = ["interfaces"]
commands = [AntaCommand(command="show interfaces counters errors")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
wrong_interfaces: list[dict[str, dict[str, int]]] = []
for interface, counters in command_output["interfaceErrorCounters"].items():
if any(value > 0 for value in counters.values()) and all(interface not in wrong_interface for wrong_interface in wrong_interfaces):
wrong_interfaces.append({interface: counters})
if not wrong_interfaces:
self.result.is_success()
else:
self.result.is_failure(f"The following interface(s) have non-zero error counters: {wrong_interfaces}")
class VerifyInterfaceDiscards(AntaTest):
"""
Verifies interfaces packet discard counters are equal to zero.
Expected Results:
* success: The test will pass if all interfaces have discard counters equal to zero.
* failure: The test will fail if one or more interfaces have non-zero discard counters.
"""
name = "VerifyInterfaceDiscards"
description = "Verifies there are no interface discard counters."
categories = ["interfaces"]
commands = [AntaCommand(command="show interfaces counters discards")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
wrong_interfaces: list[dict[str, dict[str, int]]] = []
for interface, outer_v in command_output["interfaces"].items():
wrong_interfaces.extend({interface: outer_v} for counter, value in outer_v.items() if value > 0)
if not wrong_interfaces:
self.result.is_success()
else:
self.result.is_failure(f"The following interfaces have non 0 discard counter(s): {wrong_interfaces}")
class VerifyInterfaceErrDisabled(AntaTest):
"""
Verifies there are no interfaces in errdisabled state.
Expected Results:
* success: The test will pass if there are no interfaces in errdisabled state.
* failure: The test will fail if there is at least one interface in errdisabled state.
"""
name = "VerifyInterfaceErrDisabled"
description = "Verifies there are no interfaces in the errdisabled state."
categories = ["interfaces"]
commands = [AntaCommand(command="show interfaces status")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
errdisabled_interfaces = [interface for interface, value in command_output["interfaceStatuses"].items() if value["linkStatus"] == "errdisabled"]
if errdisabled_interfaces:
self.result.is_failure(f"The following interfaces are in error disabled state: {errdisabled_interfaces}")
else:
self.result.is_success()
class VerifyInterfacesStatus(AntaTest):
"""
This test verifies if the provided list of interfaces are all in the expected state.
- If line protocol status is provided, prioritize checking against both status and line protocol status
- If line protocol status is not provided and interface status is "up", expect both status and line protocol to be "up"
- If interface status is not "up", check only the interface status without considering line protocol status
Expected Results:
* success: The test will pass if the provided interfaces are all in the expected state.
* failure: The test will fail if any interface is not in the expected state.
"""
name = "VerifyInterfacesStatus"
description = "Verifies the status of the provided interfaces."
categories = ["interfaces"]
commands = [AntaCommand(command="show interfaces description")]
class Input(AntaTest.Input):
"""Input for the VerifyInterfacesStatus test."""
interfaces: List[InterfaceState]
"""List of interfaces to validate with the expected state."""
class InterfaceState(BaseModel):
"""Model for the interface state input."""
name: Interface
"""Interface to validate."""
status: Literal["up", "down", "adminDown"]
"""Expected status of the interface."""
line_protocol_status: Optional[Literal["up", "down", "testing", "unknown", "dormant", "notPresent", "lowerLayerDown"]] = None
"""Expected line protocol status of the interface."""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
self.result.is_success()
intf_not_configured = []
intf_wrong_state = []
for interface in self.inputs.interfaces:
if (intf_status := get_value(command_output["interfaceDescriptions"], interface.name, separator="..")) is None:
intf_not_configured.append(interface.name)
continue
status = "up" if intf_status["interfaceStatus"] in {"up", "connected"} else intf_status["interfaceStatus"]
proto = "up" if intf_status["lineProtocolStatus"] in {"up", "connected"} else intf_status["lineProtocolStatus"]
# If line protocol status is provided, prioritize checking against both status and line protocol status
if interface.line_protocol_status:
if interface.status != status or interface.line_protocol_status != proto:
intf_wrong_state.append(f"{interface.name} is {status}/{proto}")
# If line protocol status is not provided and interface status is "up", expect both status and proto to be "up"
# If interface status is not "up", check only the interface status without considering line protocol status
elif (interface.status == "up" and (status != "up" or proto != "up")) or (interface.status != status):
intf_wrong_state.append(f"{interface.name} is {status}/{proto}")
if intf_not_configured:
self.result.is_failure(f"The following interface(s) are not configured: {intf_not_configured}")
if intf_wrong_state:
self.result.is_failure(f"The following interface(s) are not in the expected state: {intf_wrong_state}")
class VerifyStormControlDrops(AntaTest):
"""
Verifies the device did not drop packets due its to storm-control configuration.
Expected Results:
* success: The test will pass if there are no storm-control drop counters.
* failure: The test will fail if there is at least one storm-control drop counter.
"""
name = "VerifyStormControlDrops"
description = "Verifies there are no interface storm-control drop counters."
categories = ["interfaces"]
commands = [AntaCommand(command="show storm-control")]
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
storm_controlled_interfaces: dict[str, dict[str, Any]] = {}
for interface, interface_dict in command_output["interfaces"].items():
for traffic_type, traffic_type_dict in interface_dict["trafficTypes"].items():
if "drop" in traffic_type_dict and traffic_type_dict["drop"] != 0:
storm_controlled_interface_dict = storm_controlled_interfaces.setdefault(interface, {})
storm_controlled_interface_dict.update({traffic_type: traffic_type_dict["drop"]})
if not storm_controlled_interfaces:
self.result.is_success()
else:
self.result.is_failure(f"The following interfaces have none 0 storm-control drop counters {storm_controlled_interfaces}")
class VerifyPortChannels(AntaTest):
"""
Verifies there are no inactive ports in all port channels.
Expected Results:
* success: The test will pass if there are no inactive ports in all port channels.
* failure: The test will fail if there is at least one inactive port in a port channel.
"""
name = "VerifyPortChannels"
description = "Verifies there are no inactive ports in all port channels."
categories = ["interfaces"]
commands = [AntaCommand(command="show port-channel")]
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
po_with_invactive_ports: list[dict[str, str]] = []
for portchannel, portchannel_dict in command_output["portChannels"].items():
if len(portchannel_dict["inactivePorts"]) != 0:
po_with_invactive_ports.extend({portchannel: portchannel_dict["inactivePorts"]})
if not po_with_invactive_ports:
self.result.is_success()
else:
self.result.is_failure(f"The following port-channels have inactive port(s): {po_with_invactive_ports}")
class VerifyIllegalLACP(AntaTest):
"""
Verifies there are no illegal LACP packets received.
Expected Results:
* success: The test will pass if there are no illegal LACP packets received.
* failure: The test will fail if there is at least one illegal LACP packet received.
"""
name = "VerifyIllegalLACP"
description = "Verifies there are no illegal LACP packets in all port channels."
categories = ["interfaces"]
commands = [AntaCommand(command="show lacp counters all-ports")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
po_with_illegal_lacp: list[dict[str, dict[str, int]]] = []
for portchannel, portchannel_dict in command_output["portChannels"].items():
po_with_illegal_lacp.extend(
{portchannel: interface} for interface, interface_dict in portchannel_dict["interfaces"].items() if interface_dict["illegalRxCount"] != 0
)
if not po_with_illegal_lacp:
self.result.is_success()
else:
self.result.is_failure("The following port-channels have recieved illegal lacp packets on the " f"following ports: {po_with_illegal_lacp}")
class VerifyLoopbackCount(AntaTest):
"""
Verifies that the device has the expected number of loopback interfaces and all are operational.
Expected Results:
* success: The test will pass if the device has the correct number of loopback interfaces and none are down.
* failure: The test will fail if the loopback interface count is incorrect or any are non-operational.
"""
name = "VerifyLoopbackCount"
description = "Verifies the number of loopback interfaces and their status."
categories = ["interfaces"]
commands = [AntaCommand(command="show ip interface brief")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
number: conint(ge=0) # type: ignore
"""Number of loopback interfaces expected to be present"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
loopback_count = 0
down_loopback_interfaces = []
for interface in command_output["interfaces"]:
interface_dict = command_output["interfaces"][interface]
if "Loopback" in interface:
loopback_count += 1
if not (interface_dict["lineProtocolStatus"] == "up" and interface_dict["interfaceStatus"] == "connected"):
down_loopback_interfaces.append(interface)
if loopback_count == self.inputs.number and len(down_loopback_interfaces) == 0:
self.result.is_success()
else:
self.result.is_failure()
if loopback_count != self.inputs.number:
self.result.is_failure(f"Found {loopback_count} Loopbacks when expecting {self.inputs.number}")
elif len(down_loopback_interfaces) != 0:
self.result.is_failure(f"The following Loopbacks are not up: {down_loopback_interfaces}")
class VerifySVI(AntaTest):
"""
Verifies the status of all SVIs.
Expected Results:
* success: The test will pass if all SVIs are up.
* failure: The test will fail if one or many SVIs are not up.
"""
name = "VerifySVI"
description = "Verifies the status of all SVIs."
categories = ["interfaces"]
commands = [AntaCommand(command="show ip interface brief")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
down_svis = []
for interface in command_output["interfaces"]:
interface_dict = command_output["interfaces"][interface]
if "Vlan" in interface:
if not (interface_dict["lineProtocolStatus"] == "up" and interface_dict["interfaceStatus"] == "connected"):
down_svis.append(interface)
if len(down_svis) == 0:
self.result.is_success()
else:
self.result.is_failure(f"The following SVIs are not up: {down_svis}")
class VerifyL3MTU(AntaTest):
"""
Verifies the global layer 3 Maximum Transfer Unit (MTU) for all L3 interfaces.
Test that L3 interfaces are configured with the correct MTU. It supports Ethernet, Port Channel and VLAN interfaces.
You can define a global MTU to check and also an MTU per interface and also ignored some interfaces.
Expected Results:
* success: The test will pass if all layer 3 interfaces have the proper MTU configured.
* failure: The test will fail if one or many layer 3 interfaces have the wrong MTU configured.
"""
name = "VerifyL3MTU"
description = "Verifies the global L3 MTU of all L3 interfaces."
categories = ["interfaces"]
commands = [AntaCommand(command="show interfaces")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
mtu: int = 1500
"""Default MTU we should have configured on all non-excluded interfaces"""
ignored_interfaces: List[str] = ["Management", "Loopback", "Vxlan", "Tunnel"]
"""A list of L3 interfaces to ignore"""
specific_mtu: List[Dict[str, int]] = []
"""A list of dictionary of L3 interfaces with their specific MTU configured"""
@AntaTest.anta_test
def test(self) -> None:
# Parameter to save incorrect interface settings
wrong_l3mtu_intf: list[dict[str, int]] = []
command_output = self.instance_commands[0].json_output
# Set list of interfaces with specific settings
specific_interfaces: list[str] = []
if self.inputs.specific_mtu:
for d in self.inputs.specific_mtu:
specific_interfaces.extend(d)
for interface, values in command_output["interfaces"].items():
if re.findall(r"[a-z]+", interface, re.IGNORECASE)[0] not in self.inputs.ignored_interfaces and values["forwardingModel"] == "routed":
if interface in specific_interfaces:
wrong_l3mtu_intf.extend({interface: values["mtu"]} for custom_data in self.inputs.specific_mtu if values["mtu"] != custom_data[interface])
# Comparison with generic setting
elif values["mtu"] != self.inputs.mtu:
wrong_l3mtu_intf.append({interface: values["mtu"]})
if wrong_l3mtu_intf:
self.result.is_failure(f"Some interfaces do not have correct MTU configured:\n{wrong_l3mtu_intf}")
else:
self.result.is_success()
class VerifyIPProxyARP(AntaTest):
"""
Verifies if Proxy-ARP is enabled for the provided list of interface(s).
Expected Results:
* success: The test will pass if Proxy-ARP is enabled on the specified interface(s).
* failure: The test will fail if Proxy-ARP is disabled on the specified interface(s).
"""
name = "VerifyIPProxyARP"
description = "Verifies if Proxy ARP is enabled."
categories = ["interfaces"]
commands = [AntaTemplate(template="show ip interface {intf}")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
interfaces: List[str]
"""list of interfaces to be tested"""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
return [template.render(intf=intf) for intf in self.inputs.interfaces]
@AntaTest.anta_test
def test(self) -> None:
disabled_intf = []
for command in self.instance_commands:
if "intf" in command.params:
intf = command.params["intf"]
if not command.json_output["interfaces"][intf]["proxyArp"]:
disabled_intf.append(intf)
if disabled_intf:
self.result.is_failure(f"The following interface(s) have Proxy-ARP disabled: {disabled_intf}")
else:
self.result.is_success()
class VerifyL2MTU(AntaTest):
"""
Verifies the global layer 2 Maximum Transfer Unit (MTU) for all L2 interfaces.
Test that L2 interfaces are configured with the correct MTU. It supports Ethernet, Port Channel and VLAN interfaces.
You can define a global MTU to check and also an MTU per interface and also ignored some interfaces.
Expected Results:
* success: The test will pass if all layer 2 interfaces have the proper MTU configured.
* failure: The test will fail if one or many layer 2 interfaces have the wrong MTU configured.
"""
name = "VerifyL2MTU"
description = "Verifies the global L2 MTU of all L2 interfaces."
categories = ["interfaces"]
commands = [AntaCommand(command="show interfaces")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
mtu: int = 9214
"""Default MTU we should have configured on all non-excluded interfaces"""
ignored_interfaces: List[str] = ["Management", "Loopback", "Vxlan", "Tunnel"]
"""A list of L2 interfaces to ignore"""
specific_mtu: List[Dict[str, int]] = []
"""A list of dictionary of L2 interfaces with their specific MTU configured"""
@AntaTest.anta_test
def test(self) -> None:
# Parameter to save incorrect interface settings
wrong_l2mtu_intf: list[dict[str, int]] = []
command_output = self.instance_commands[0].json_output
# Set list of interfaces with specific settings
specific_interfaces: list[str] = []
if self.inputs.specific_mtu:
for d in self.inputs.specific_mtu:
specific_interfaces.extend(d)
for interface, values in command_output["interfaces"].items():
if re.findall(r"[a-z]+", interface, re.IGNORECASE)[0] not in self.inputs.ignored_interfaces and values["forwardingModel"] == "bridged":
if interface in specific_interfaces:
wrong_l2mtu_intf.extend({interface: values["mtu"]} for custom_data in self.inputs.specific_mtu if values["mtu"] != custom_data[interface])
# Comparison with generic setting
elif values["mtu"] != self.inputs.mtu:
wrong_l2mtu_intf.append({interface: values["mtu"]})
if wrong_l2mtu_intf:
self.result.is_failure(f"Some L2 interfaces do not have correct MTU configured:\n{wrong_l2mtu_intf}")
else:
self.result.is_success()
class VerifyInterfaceIPv4(AntaTest):
"""
Verifies if an interface is configured with a correct primary and list of optional secondary IPv4 addresses.
Expected Results:
* success: The test will pass if an interface is configured with a correct primary and secondary IPv4 address.
* failure: The test will fail if an interface is not found or the primary and secondary IPv4 addresses do not match with the input.
"""
name = "VerifyInterfaceIPv4"
description = "Verifies the interface IPv4 addresses."
categories = ["interfaces"]
commands = [AntaTemplate(template="show ip interface {interface}")]
class Input(AntaTest.Input):
"""Inputs for the VerifyInterfaceIPv4 test."""
interfaces: List[InterfaceDetail]
"""list of interfaces to be tested"""
class InterfaceDetail(BaseModel):
"""Detail of an interface"""
name: Interface
"""Name of the interface"""
primary_ip: IPv4Network
"""Primary IPv4 address with subnet on interface"""
secondary_ips: Optional[List[IPv4Network]] = None
"""Optional list of secondary IPv4 addresses with subnet on interface"""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
# Render the template for each interface
return [
template.render(interface=interface.name, primary_ip=interface.primary_ip, secondary_ips=interface.secondary_ips) for interface in self.inputs.interfaces
]
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
for command in self.instance_commands:
intf = command.params["interface"]
input_primary_ip = str(command.params["primary_ip"])
failed_messages = []
# Check if the interface has an IP address configured
if not (interface_output := get_value(command.json_output, f"interfaces.{intf}.interfaceAddress")):
self.result.is_failure(f"For interface `{intf}`, IP address is not configured.")
continue
primary_ip = get_value(interface_output, "primaryIp")
# Combine IP address and subnet for primary IP
actual_primary_ip = f"{primary_ip['address']}/{primary_ip['maskLen']}"
# Check if the primary IP address matches the input
if actual_primary_ip != input_primary_ip:
failed_messages.append(f"The expected primary IP address is `{input_primary_ip}`, but the actual primary IP address is `{actual_primary_ip}`.")
if command.params["secondary_ips"] is not None:
input_secondary_ips = sorted([str(network) for network in command.params["secondary_ips"]])
secondary_ips = get_value(interface_output, "secondaryIpsOrderedList")
# Combine IP address and subnet for secondary IPs
actual_secondary_ips = sorted([f"{secondary_ip['address']}/{secondary_ip['maskLen']}" for secondary_ip in secondary_ips])
# Check if the secondary IP address is configured
if not actual_secondary_ips:
failed_messages.append(
f"The expected secondary IP addresses are `{input_secondary_ips}`, but the actual secondary IP address is not configured."
)
# Check if the secondary IP addresses match the input
elif actual_secondary_ips != input_secondary_ips:
failed_messages.append(
f"The expected secondary IP addresses are `{input_secondary_ips}`, but the actual secondary IP addresses are `{actual_secondary_ips}`."
)
if failed_messages:
self.result.is_failure(f"For interface `{intf}`, " + " ".join(failed_messages))
class VerifyIpVirtualRouterMac(AntaTest):
"""
Verifies the IP virtual router MAC address.
Expected Results:
* success: The test will pass if the IP virtual router MAC address matches the input.
* failure: The test will fail if the IP virtual router MAC address does not match the input.
"""
name = "VerifyIpVirtualRouterMac"
description = "Verifies the IP virtual router MAC address."
categories = ["interfaces"]
commands = [AntaCommand(command="show ip virtual-router")]
class Input(AntaTest.Input):
"""Inputs for the VerifyIpVirtualRouterMac test."""
mac_address: MacAddress
"""IP virtual router MAC address"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output["virtualMacs"]
mac_address_found = get_item(command_output, "macAddress", self.inputs.mac_address)
if mac_address_found is None:
self.result.is_failure(f"IP virtual router MAC address `{self.inputs.mac_address}` is not configured.")
else:
self.result.is_success()

34
anta/tests/lanz.py Normal file
View file

@ -0,0 +1,34 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to LANZ
"""
from __future__ import annotations
from anta.models import AntaCommand, AntaTest
class VerifyLANZ(AntaTest):
"""
Verifies if LANZ is enabled
Expected results:
* success: the test will pass if lanz is enabled
* failure: the test will fail if lanz is disabled
"""
name = "VerifyLANZ"
description = "Verifies if LANZ is enabled."
categories = ["lanz"]
commands = [AntaCommand(command="show queue-monitor length status")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["lanzEnabled"] is not True:
self.result.is_failure("LANZ is not enabled")
else:
self.result.is_success("LANZ is enabled")

279
anta/tests/logging.py Normal file
View file

@ -0,0 +1,279 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to the EOS various logging settings
NOTE: 'show logging' does not support json output yet
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
import logging
import re
from ipaddress import IPv4Address
# Need to keep List for pydantic in python 3.8
from typing import List
from anta.models import AntaCommand, AntaTest
def _get_logging_states(logger: logging.Logger, command_output: str) -> str:
"""
Parse "show logging" output and gets operational logging states used
in the tests in this module.
Args:
command_output: The 'show logging' output
"""
log_states = command_output.partition("\n\nExternal configuration:")[0]
logger.debug(f"Device logging states:\n{log_states}")
return log_states
class VerifyLoggingPersistent(AntaTest):
"""
Verifies if logging persistent is enabled and logs are saved in flash.
Expected Results:
* success: The test will pass if logging persistent is enabled and logs are in flash.
* failure: The test will fail if logging persistent is disabled or no logs are saved in flash.
"""
name = "VerifyLoggingPersistent"
description = "Verifies if logging persistent is enabled and logs are saved in flash."
categories = ["logging"]
commands = [
AntaCommand(command="show logging", ofmt="text"),
AntaCommand(command="dir flash:/persist/messages", ofmt="text"),
]
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
log_output = self.instance_commands[0].text_output
dir_flash_output = self.instance_commands[1].text_output
if "Persistent logging: disabled" in _get_logging_states(self.logger, log_output):
self.result.is_failure("Persistent logging is disabled")
return
pattern = r"-rw-\s+(\d+)"
persist_logs = re.search(pattern, dir_flash_output)
if not persist_logs or int(persist_logs.group(1)) == 0:
self.result.is_failure("No persistent logs are saved in flash")
class VerifyLoggingSourceIntf(AntaTest):
"""
Verifies logging source-interface for a specified VRF.
Expected Results:
* success: The test will pass if the provided logging source-interface is configured in the specified VRF.
* failure: The test will fail if the provided logging source-interface is NOT configured in the specified VRF.
"""
name = "VerifyLoggingSourceInt"
description = "Verifies logging source-interface for a specified VRF."
categories = ["logging"]
commands = [AntaCommand(command="show logging", ofmt="text")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
interface: str
"""Source-interface to use as source IP of log messages"""
vrf: str = "default"
"""The name of the VRF to transport log messages"""
@AntaTest.anta_test
def test(self) -> None:
output = self.instance_commands[0].text_output
pattern = rf"Logging source-interface '{self.inputs.interface}'.*VRF {self.inputs.vrf}"
if re.search(pattern, _get_logging_states(self.logger, output)):
self.result.is_success()
else:
self.result.is_failure(f"Source-interface '{self.inputs.interface}' is not configured in VRF {self.inputs.vrf}")
class VerifyLoggingHosts(AntaTest):
"""
Verifies logging hosts (syslog servers) for a specified VRF.
Expected Results:
* success: The test will pass if the provided syslog servers are configured in the specified VRF.
* failure: The test will fail if the provided syslog servers are NOT configured in the specified VRF.
"""
name = "VerifyLoggingHosts"
description = "Verifies logging hosts (syslog servers) for a specified VRF."
categories = ["logging"]
commands = [AntaCommand(command="show logging", ofmt="text")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
hosts: List[IPv4Address]
"""List of hosts (syslog servers) IP addresses"""
vrf: str = "default"
"""The name of the VRF to transport log messages"""
@AntaTest.anta_test
def test(self) -> None:
output = self.instance_commands[0].text_output
not_configured = []
for host in self.inputs.hosts:
pattern = rf"Logging to '{str(host)}'.*VRF {self.inputs.vrf}"
if not re.search(pattern, _get_logging_states(self.logger, output)):
not_configured.append(str(host))
if not not_configured:
self.result.is_success()
else:
self.result.is_failure(f"Syslog servers {not_configured} are not configured in VRF {self.inputs.vrf}")
class VerifyLoggingLogsGeneration(AntaTest):
"""
Verifies if logs are generated.
Expected Results:
* success: The test will pass if logs are generated.
* failure: The test will fail if logs are NOT generated.
"""
name = "VerifyLoggingLogsGeneration"
description = "Verifies if logs are generated."
categories = ["logging"]
commands = [
AntaCommand(command="send log level informational message ANTA VerifyLoggingLogsGeneration validation"),
AntaCommand(command="show logging informational last 30 seconds | grep ANTA", ofmt="text", use_cache=False),
]
@AntaTest.anta_test
def test(self) -> None:
log_pattern = r"ANTA VerifyLoggingLogsGeneration validation"
output = self.instance_commands[1].text_output
lines = output.strip().split("\n")[::-1]
for line in lines:
if re.search(log_pattern, line):
self.result.is_success()
return
self.result.is_failure("Logs are not generated")
class VerifyLoggingHostname(AntaTest):
"""
Verifies if logs are generated with the device FQDN.
Expected Results:
* success: The test will pass if logs are generated with the device FQDN.
* failure: The test will fail if logs are NOT generated with the device FQDN.
"""
name = "VerifyLoggingHostname"
description = "Verifies if logs are generated with the device FQDN."
categories = ["logging"]
commands = [
AntaCommand(command="show hostname"),
AntaCommand(command="send log level informational message ANTA VerifyLoggingHostname validation"),
AntaCommand(command="show logging informational last 30 seconds | grep ANTA", ofmt="text", use_cache=False),
]
@AntaTest.anta_test
def test(self) -> None:
output_hostname = self.instance_commands[0].json_output
output_logging = self.instance_commands[2].text_output
fqdn = output_hostname["fqdn"]
lines = output_logging.strip().split("\n")[::-1]
log_pattern = r"ANTA VerifyLoggingHostname validation"
last_line_with_pattern = ""
for line in lines:
if re.search(log_pattern, line):
last_line_with_pattern = line
break
if fqdn in last_line_with_pattern:
self.result.is_success()
else:
self.result.is_failure("Logs are not generated with the device FQDN")
class VerifyLoggingTimestamp(AntaTest):
"""
Verifies if logs are generated with the approprate timestamp.
Expected Results:
* success: The test will pass if logs are generated with the appropriated timestamp.
* failure: The test will fail if logs are NOT generated with the appropriated timestamp.
"""
name = "VerifyLoggingTimestamp"
description = "Verifies if logs are generated with the appropriate timestamp."
categories = ["logging"]
commands = [
AntaCommand(command="send log level informational message ANTA VerifyLoggingTimestamp validation"),
AntaCommand(command="show logging informational last 30 seconds | grep ANTA", ofmt="text", use_cache=False),
]
@AntaTest.anta_test
def test(self) -> None:
log_pattern = r"ANTA VerifyLoggingTimestamp validation"
timestamp_pattern = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}-\d{2}:\d{2}"
output = self.instance_commands[1].text_output
lines = output.strip().split("\n")[::-1]
last_line_with_pattern = ""
for line in lines:
if re.search(log_pattern, line):
last_line_with_pattern = line
break
if re.search(timestamp_pattern, last_line_with_pattern):
self.result.is_success()
else:
self.result.is_failure("Logs are not generated with the appropriate timestamp format")
class VerifyLoggingAccounting(AntaTest):
"""
Verifies if AAA accounting logs are generated.
Expected Results:
* success: The test will pass if AAA accounting logs are generated.
* failure: The test will fail if AAA accounting logs are NOT generated.
"""
name = "VerifyLoggingAccounting"
description = "Verifies if AAA accounting logs are generated."
categories = ["logging"]
commands = [AntaCommand(command="show aaa accounting logs | tail", ofmt="text")]
@AntaTest.anta_test
def test(self) -> None:
pattern = r"cmd=show aaa accounting logs"
output = self.instance_commands[0].text_output
if re.search(pattern, output):
self.result.is_success()
else:
self.result.is_failure("AAA accounting logs are not generated")
class VerifyLoggingErrors(AntaTest):
"""
This test verifies there are no syslog messages with a severity of ERRORS or higher.
Expected Results:
* success: The test will pass if there are NO syslog messages with a severity of ERRORS or higher.
* failure: The test will fail if ERRORS or higher syslog messages are present.
"""
name = "VerifyLoggingWarning"
description = "This test verifies there are no syslog messages with a severity of ERRORS or higher."
categories = ["logging"]
commands = [AntaCommand(command="show logging threshold errors", ofmt="text")]
@AntaTest.anta_test
def test(self) -> None:
"""
Run VerifyLoggingWarning validation
"""
command_output = self.instance_commands[0].text_output
if len(command_output) == 0:
self.result.is_success()
else:
self.result.is_failure("Device has reported syslog messages with a severity of ERRORS or higher")

239
anta/tests/mlag.py Normal file
View file

@ -0,0 +1,239 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to Multi-chassis Link Aggregation (MLAG)
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
from pydantic import conint
from anta.custom_types import MlagPriority
from anta.models import AntaCommand, AntaTest
from anta.tools.get_value import get_value
class VerifyMlagStatus(AntaTest):
"""
This test verifies the health status of the MLAG configuration.
Expected Results:
* success: The test will pass if the MLAG state is 'active', negotiation status is 'connected',
peer-link status and local interface status are 'up'.
* failure: The test will fail if the MLAG state is not 'active', negotiation status is not 'connected',
peer-link status or local interface status are not 'up'.
* skipped: The test will be skipped if MLAG is 'disabled'.
"""
name = "VerifyMlagStatus"
description = "Verifies the health status of the MLAG configuration."
categories = ["mlag"]
commands = [AntaCommand(command="show mlag", ofmt="json")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["state"] == "disabled":
self.result.is_skipped("MLAG is disabled")
return
keys_to_verify = ["state", "negStatus", "localIntfStatus", "peerLinkStatus"]
verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
if (
verified_output["state"] == "active"
and verified_output["negStatus"] == "connected"
and verified_output["localIntfStatus"] == "up"
and verified_output["peerLinkStatus"] == "up"
):
self.result.is_success()
else:
self.result.is_failure(f"MLAG status is not OK: {verified_output}")
class VerifyMlagInterfaces(AntaTest):
"""
This test verifies there are no inactive or active-partial MLAG ports.
Expected Results:
* success: The test will pass if there are NO inactive or active-partial MLAG ports.
* failure: The test will fail if there are inactive or active-partial MLAG ports.
* skipped: The test will be skipped if MLAG is 'disabled'.
"""
name = "VerifyMlagInterfaces"
description = "Verifies there are no inactive or active-partial MLAG ports."
categories = ["mlag"]
commands = [AntaCommand(command="show mlag", ofmt="json")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["state"] == "disabled":
self.result.is_skipped("MLAG is disabled")
return
if command_output["mlagPorts"]["Inactive"] == 0 and command_output["mlagPorts"]["Active-partial"] == 0:
self.result.is_success()
else:
self.result.is_failure(f"MLAG status is not OK: {command_output['mlagPorts']}")
class VerifyMlagConfigSanity(AntaTest):
"""
This test verifies there are no MLAG config-sanity inconsistencies.
Expected Results:
* success: The test will pass if there are NO MLAG config-sanity inconsistencies.
* failure: The test will fail if there are MLAG config-sanity inconsistencies.
* skipped: The test will be skipped if MLAG is 'disabled'.
* error: The test will give an error if 'mlagActive' is not found in the JSON response.
"""
name = "VerifyMlagConfigSanity"
description = "Verifies there are no MLAG config-sanity inconsistencies."
categories = ["mlag"]
commands = [AntaCommand(command="show mlag config-sanity", ofmt="json")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if (mlag_status := get_value(command_output, "mlagActive")) is None:
self.result.is_error(message="Incorrect JSON response - 'mlagActive' state was not found")
return
if mlag_status is False:
self.result.is_skipped("MLAG is disabled")
return
keys_to_verify = ["globalConfiguration", "interfaceConfiguration"]
verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
if not any(verified_output.values()):
self.result.is_success()
else:
self.result.is_failure(f"MLAG config-sanity returned inconsistencies: {verified_output}")
class VerifyMlagReloadDelay(AntaTest):
"""
This test verifies the reload-delay parameters of the MLAG configuration.
Expected Results:
* success: The test will pass if the reload-delay parameters are configured properly.
* failure: The test will fail if the reload-delay parameters are NOT configured properly.
* skipped: The test will be skipped if MLAG is 'disabled'.
"""
name = "VerifyMlagReloadDelay"
description = "Verifies the MLAG reload-delay parameters."
categories = ["mlag"]
commands = [AntaCommand(command="show mlag", ofmt="json")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
reload_delay: conint(ge=0) # type: ignore
"""Delay (seconds) after reboot until non peer-link ports that are part of an MLAG are enabled"""
reload_delay_non_mlag: conint(ge=0) # type: ignore
"""Delay (seconds) after reboot until ports that are not part of an MLAG are enabled"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["state"] == "disabled":
self.result.is_skipped("MLAG is disabled")
return
keys_to_verify = ["reloadDelay", "reloadDelayNonMlag"]
verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
if verified_output["reloadDelay"] == self.inputs.reload_delay and verified_output["reloadDelayNonMlag"] == self.inputs.reload_delay_non_mlag:
self.result.is_success()
else:
self.result.is_failure(f"The reload-delay parameters are not configured properly: {verified_output}")
class VerifyMlagDualPrimary(AntaTest):
"""
This test verifies the dual-primary detection and its parameters of the MLAG configuration.
Expected Results:
* success: The test will pass if the dual-primary detection is enabled and its parameters are configured properly.
* failure: The test will fail if the dual-primary detection is NOT enabled or its parameters are NOT configured properly.
* skipped: The test will be skipped if MLAG is 'disabled'.
"""
name = "VerifyMlagDualPrimary"
description = "Verifies the MLAG dual-primary detection parameters."
categories = ["mlag"]
commands = [AntaCommand(command="show mlag detail", ofmt="json")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
detection_delay: conint(ge=0) # type: ignore
"""Delay detection (seconds)"""
errdisabled: bool = False
"""Errdisabled all interfaces when dual-primary is detected"""
recovery_delay: conint(ge=0) # type: ignore
"""Delay (seconds) after dual-primary detection resolves until non peer-link ports that are part of an MLAG are enabled"""
recovery_delay_non_mlag: conint(ge=0) # type: ignore
"""Delay (seconds) after dual-primary detection resolves until ports that are not part of an MLAG are enabled"""
@AntaTest.anta_test
def test(self) -> None:
errdisabled_action = "errdisableAllInterfaces" if self.inputs.errdisabled else "none"
command_output = self.instance_commands[0].json_output
if command_output["state"] == "disabled":
self.result.is_skipped("MLAG is disabled")
return
if command_output["dualPrimaryDetectionState"] == "disabled":
self.result.is_failure("Dual-primary detection is disabled")
return
keys_to_verify = ["detail.dualPrimaryDetectionDelay", "detail.dualPrimaryAction", "dualPrimaryMlagRecoveryDelay", "dualPrimaryNonMlagRecoveryDelay"]
verified_output = {key: get_value(command_output, key) for key in keys_to_verify}
if (
verified_output["detail.dualPrimaryDetectionDelay"] == self.inputs.detection_delay
and verified_output["detail.dualPrimaryAction"] == errdisabled_action
and verified_output["dualPrimaryMlagRecoveryDelay"] == self.inputs.recovery_delay
and verified_output["dualPrimaryNonMlagRecoveryDelay"] == self.inputs.recovery_delay_non_mlag
):
self.result.is_success()
else:
self.result.is_failure(f"The dual-primary parameters are not configured properly: {verified_output}")
class VerifyMlagPrimaryPriority(AntaTest):
"""
Test class to verify the MLAG (Multi-Chassis Link Aggregation) primary priority.
Expected Results:
* Success: The test will pass if the MLAG state is set as 'primary' and the priority matches the input.
* Failure: The test will fail if the MLAG state is not 'primary' or the priority doesn't match the input.
* Skipped: The test will be skipped if MLAG is 'disabled'.
"""
name = "VerifyMlagPrimaryPriority"
description = "Verifies the configuration of the MLAG primary priority."
categories = ["mlag"]
commands = [AntaCommand(command="show mlag detail")]
class Input(AntaTest.Input):
"""Inputs for the VerifyMlagPrimaryPriority test."""
primary_priority: MlagPriority
"""The expected MLAG primary priority."""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
self.result.is_success()
# Skip the test if MLAG is disabled
if command_output["state"] == "disabled":
self.result.is_skipped("MLAG is disabled")
return
mlag_state = get_value(command_output, "detail.mlagState")
primary_priority = get_value(command_output, "detail.primaryPriority")
# Check MLAG state
if mlag_state != "primary":
self.result.is_failure("The device is not set as MLAG primary.")
# Check primary priority
if primary_priority != self.inputs.primary_priority:
self.result.is_failure(
f"The primary priority does not match expected. Expected `{self.inputs.primary_priority}`, but found `{primary_priority}` instead."
)

66
anta/tests/multicast.py Normal file
View file

@ -0,0 +1,66 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to multicast
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
# Need to keep Dict for pydantic in python 3.8
from typing import Dict
from anta.custom_types import Vlan
from anta.models import AntaCommand, AntaTest
class VerifyIGMPSnoopingVlans(AntaTest):
"""
Verifies the IGMP snooping configuration for some VLANs.
"""
name = "VerifyIGMPSnoopingVlans"
description = "Verifies the IGMP snooping configuration for some VLANs."
categories = ["multicast", "igmp"]
commands = [AntaCommand(command="show ip igmp snooping")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
vlans: Dict[Vlan, bool]
"""Dictionary of VLANs with associated IGMP configuration status (True=enabled, False=disabled)"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
self.result.is_success()
for vlan, enabled in self.inputs.vlans.items():
if str(vlan) not in command_output["vlans"]:
self.result.is_failure(f"Supplied vlan {vlan} is not present on the device.")
continue
igmp_state = command_output["vlans"][str(vlan)]["igmpSnoopingState"]
if igmp_state != "enabled" if enabled else igmp_state != "disabled":
self.result.is_failure(f"IGMP state for vlan {vlan} is {igmp_state}")
class VerifyIGMPSnoopingGlobal(AntaTest):
"""
Verifies the IGMP snooping global configuration.
"""
name = "VerifyIGMPSnoopingGlobal"
description = "Verifies the IGMP snooping global configuration."
categories = ["multicast", "igmp"]
commands = [AntaCommand(command="show ip igmp snooping")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
enabled: bool
"""Expected global IGMP snooping configuration (True=enabled, False=disabled)"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
self.result.is_success()
igmp_state = command_output["igmpSnoopingState"]
if igmp_state != "enabled" if self.inputs.enabled else igmp_state != "disabled":
self.result.is_failure(f"IGMP state is not valid: {igmp_state}")

62
anta/tests/profiles.py Normal file
View file

@ -0,0 +1,62 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to ASIC profiles
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
from typing import Literal
from anta.decorators import skip_on_platforms
from anta.models import AntaCommand, AntaTest
class VerifyUnifiedForwardingTableMode(AntaTest):
"""
Verifies the device is using the expected Unified Forwarding Table mode.
"""
name = "VerifyUnifiedForwardingTableMode"
description = ""
categories = ["profiles"]
commands = [AntaCommand(command="show platform trident forwarding-table partition", ofmt="json")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
mode: Literal[0, 1, 2, 3, 4, "flexible"]
"""Expected UFT mode"""
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["uftMode"] == str(self.inputs.mode):
self.result.is_success()
else:
self.result.is_failure(f"Device is not running correct UFT mode (expected: {self.inputs.mode} / running: {command_output['uftMode']})")
class VerifyTcamProfile(AntaTest):
"""
Verifies the device is using the configured TCAM profile.
"""
name = "VerifyTcamProfile"
description = "Verify that the assigned TCAM profile is actually running on the device"
categories = ["profiles"]
commands = [AntaCommand(command="show hardware tcam profile", ofmt="json")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
profile: str
"""Expected TCAM profile"""
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["pmfProfiles"]["FixedSystem"]["status"] == command_output["pmfProfiles"]["FixedSystem"]["config"] == self.inputs.profile:
self.result.is_success()
else:
self.result.is_failure(f"Incorrect profile running on device: {command_output['pmfProfiles']['FixedSystem']['status']}")

33
anta/tests/ptp.py Normal file
View file

@ -0,0 +1,33 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to PTP (Precision Time Protocol) in EOS
"""
from __future__ import annotations
from anta.models import AntaCommand, AntaTest
class VerifyPtpStatus(AntaTest):
"""
Verifies whether the PTP agent is enabled globally.
Expected Results:
* success: The test will pass if the PTP agent is enabled globally.
* failure: The test will fail if the PTP agent is enabled globally.
"""
name = "VerifyPtpStatus"
description = "Verifies if the PTP agent is enabled."
categories = ["ptp"]
commands = [AntaCommand(command="show ptp")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if "ptpMode" in command_output.keys():
self.result.is_success()
else:
self.result.is_failure("PTP agent disabled")

View file

@ -0,0 +1,3 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.

1003
anta/tests/routing/bgp.py Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,118 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Generic routing test functions
"""
from __future__ import annotations
from ipaddress import IPv4Address, ip_interface
# Need to keep List for pydantic in python 3.8
from typing import List, Literal
from pydantic import model_validator
from anta.models import AntaCommand, AntaTemplate, AntaTest
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
class VerifyRoutingProtocolModel(AntaTest):
"""
Verifies the configured routing protocol model is the one we expect.
And if there is no mismatch between the configured and operating routing protocol model.
"""
name = "VerifyRoutingProtocolModel"
description = "Verifies the configured routing protocol model."
categories = ["routing"]
commands = [AntaCommand(command="show ip route summary", revision=3)]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
model: Literal["multi-agent", "ribd"] = "multi-agent"
"""Expected routing protocol model"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
configured_model = command_output["protoModelStatus"]["configuredProtoModel"]
operating_model = command_output["protoModelStatus"]["operatingProtoModel"]
if configured_model == operating_model == self.inputs.model:
self.result.is_success()
else:
self.result.is_failure(f"routing model is misconfigured: configured: {configured_model} - operating: {operating_model} - expected: {self.inputs.model}")
class VerifyRoutingTableSize(AntaTest):
"""
Verifies the size of the IP routing table (default VRF).
Should be between the two provided thresholds.
"""
name = "VerifyRoutingTableSize"
description = "Verifies the size of the IP routing table (default VRF). Should be between the two provided thresholds."
categories = ["routing"]
commands = [AntaCommand(command="show ip route summary", revision=3)]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
minimum: int
"""Expected minimum routing table (default VRF) size"""
maximum: int
"""Expected maximum routing table (default VRF) size"""
@model_validator(mode="after") # type: ignore
def check_min_max(self) -> AntaTest.Input:
"""Validate that maximum is greater than minimum"""
if self.minimum > self.maximum:
raise ValueError(f"Minimum {self.minimum} is greater than maximum {self.maximum}")
return self
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
total_routes = int(command_output["vrfs"]["default"]["totalRoutes"])
if self.inputs.minimum <= total_routes <= self.inputs.maximum:
self.result.is_success()
else:
self.result.is_failure(f"routing-table has {total_routes} routes and not between min ({self.inputs.minimum}) and maximum ({self.inputs.maximum})")
class VerifyRoutingTableEntry(AntaTest):
"""
This test verifies that the provided routes are present in the routing table of a specified VRF.
Expected Results:
* success: The test will pass if the provided routes are present in the routing table.
* failure: The test will fail if one or many provided routes are missing from the routing table.
"""
name = "VerifyRoutingTableEntry"
description = "Verifies that the provided routes are present in the routing table of a specified VRF."
categories = ["routing"]
commands = [AntaTemplate(template="show ip route vrf {vrf} {route}")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
vrf: str = "default"
"""VRF context"""
routes: List[IPv4Address]
"""Routes to verify"""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
return [template.render(vrf=self.inputs.vrf, route=route) for route in self.inputs.routes]
@AntaTest.anta_test
def test(self) -> None:
missing_routes = []
for command in self.instance_commands:
if "vrf" in command.params and "route" in command.params:
vrf, route = command.params["vrf"], command.params["route"]
if len(routes := command.json_output["vrfs"][vrf]["routes"]) == 0 or route != ip_interface(list(routes)[0]).ip:
missing_routes.append(str(route))
if not missing_routes:
self.result.is_success()
else:
self.result.is_failure(f"The following route(s) are missing from the routing table of VRF {self.inputs.vrf}: {missing_routes}")

View file

@ -0,0 +1,95 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
OSPF test functions
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
from typing import Any
from anta.models import AntaCommand, AntaTest
def _count_ospf_neighbor(ospf_neighbor_json: dict[str, Any]) -> int:
"""
Count the number of OSPF neighbors
"""
count = 0
for _, vrf_data in ospf_neighbor_json["vrfs"].items():
for _, instance_data in vrf_data["instList"].items():
count += len(instance_data.get("ospfNeighborEntries", []))
return count
def _get_not_full_ospf_neighbors(ospf_neighbor_json: dict[str, Any]) -> list[dict[str, Any]]:
"""
Return the OSPF neighbors whose adjacency state is not "full"
"""
not_full_neighbors = []
for vrf, vrf_data in ospf_neighbor_json["vrfs"].items():
for instance, instance_data in vrf_data["instList"].items():
for neighbor_data in instance_data.get("ospfNeighborEntries", []):
if (state := neighbor_data["adjacencyState"]) != "full":
not_full_neighbors.append(
{
"vrf": vrf,
"instance": instance,
"neighbor": neighbor_data["routerId"],
"state": state,
}
)
return not_full_neighbors
class VerifyOSPFNeighborState(AntaTest):
"""
Verifies all OSPF neighbors are in FULL state.
"""
name = "VerifyOSPFNeighborState"
description = "Verifies all OSPF neighbors are in FULL state."
categories = ["ospf"]
commands = [AntaCommand(command="show ip ospf neighbor")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if _count_ospf_neighbor(command_output) == 0:
self.result.is_skipped("no OSPF neighbor found")
return
self.result.is_success()
not_full_neighbors = _get_not_full_ospf_neighbors(command_output)
if not_full_neighbors:
self.result.is_failure(f"Some neighbors are not correctly configured: {not_full_neighbors}.")
class VerifyOSPFNeighborCount(AntaTest):
"""
Verifies the number of OSPF neighbors in FULL state is the one we expect.
"""
name = "VerifyOSPFNeighborCount"
description = "Verifies the number of OSPF neighbors in FULL state is the one we expect."
categories = ["ospf"]
commands = [AntaCommand(command="show ip ospf neighbor")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
number: int
"""The expected number of OSPF neighbors in FULL state"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if (neighbor_count := _count_ospf_neighbor(command_output)) == 0:
self.result.is_skipped("no OSPF neighbor found")
return
self.result.is_success()
if neighbor_count != self.inputs.number:
self.result.is_failure(f"device has {neighbor_count} neighbors (expected {self.inputs.number})")
not_full_neighbors = _get_not_full_ospf_neighbors(command_output)
print(not_full_neighbors)
if not_full_neighbors:
self.result.is_failure(f"Some neighbors are not correctly configured: {not_full_neighbors}.")

514
anta/tests/security.py Normal file
View file

@ -0,0 +1,514 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to the EOS various security settings
"""
from __future__ import annotations
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from datetime import datetime
from typing import List, Union
from pydantic import BaseModel, Field, conint, model_validator
from anta.custom_types import EcdsaKeySize, EncryptionAlgorithm, RsaKeySize
from anta.models import AntaCommand, AntaTemplate, AntaTest
from anta.tools.get_item import get_item
from anta.tools.get_value import get_value
from anta.tools.utils import get_failed_logs
class VerifySSHStatus(AntaTest):
"""
Verifies if the SSHD agent is disabled in the default VRF.
Expected Results:
* success: The test will pass if the SSHD agent is disabled in the default VRF.
* failure: The test will fail if the SSHD agent is NOT disabled in the default VRF.
"""
name = "VerifySSHStatus"
description = "Verifies if the SSHD agent is disabled in the default VRF."
categories = ["security"]
commands = [AntaCommand(command="show management ssh", ofmt="text")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].text_output
line = [line for line in command_output.split("\n") if line.startswith("SSHD status")][0]
status = line.split("is ")[1]
if status == "disabled":
self.result.is_success()
else:
self.result.is_failure(line)
class VerifySSHIPv4Acl(AntaTest):
"""
Verifies if the SSHD agent has the right number IPv4 ACL(s) configured for a specified VRF.
Expected results:
* success: The test will pass if the SSHD agent has the provided number of IPv4 ACL(s) in the specified VRF.
* failure: The test will fail if the SSHD agent has not the right number of IPv4 ACL(s) in the specified VRF.
"""
name = "VerifySSHIPv4Acl"
description = "Verifies if the SSHD agent has IPv4 ACL(s) configured."
categories = ["security"]
commands = [AntaCommand(command="show management ssh ip access-list summary")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
number: conint(ge=0) # type:ignore
"""The number of expected IPv4 ACL(s)"""
vrf: str = "default"
"""The name of the VRF in which to check for the SSHD agent"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
ipv4_acl_list = command_output["ipAclList"]["aclList"]
ipv4_acl_number = len(ipv4_acl_list)
not_configured_acl_list = []
if ipv4_acl_number != self.inputs.number:
self.result.is_failure(f"Expected {self.inputs.number} SSH IPv4 ACL(s) in vrf {self.inputs.vrf} but got {ipv4_acl_number}")
return
for ipv4_acl in ipv4_acl_list:
if self.inputs.vrf not in ipv4_acl["configuredVrfs"] or self.inputs.vrf not in ipv4_acl["activeVrfs"]:
not_configured_acl_list.append(ipv4_acl["name"])
if not_configured_acl_list:
self.result.is_failure(f"SSH IPv4 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl_list}")
else:
self.result.is_success()
class VerifySSHIPv6Acl(AntaTest):
"""
Verifies if the SSHD agent has the right number IPv6 ACL(s) configured for a specified VRF.
Expected results:
* success: The test will pass if the SSHD agent has the provided number of IPv6 ACL(s) in the specified VRF.
* failure: The test will fail if the SSHD agent has not the right number of IPv6 ACL(s) in the specified VRF.
"""
name = "VerifySSHIPv6Acl"
description = "Verifies if the SSHD agent has IPv6 ACL(s) configured."
categories = ["security"]
commands = [AntaCommand(command="show management ssh ipv6 access-list summary")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
number: conint(ge=0) # type:ignore
"""The number of expected IPv6 ACL(s)"""
vrf: str = "default"
"""The name of the VRF in which to check for the SSHD agent"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
ipv6_acl_list = command_output["ipv6AclList"]["aclList"]
ipv6_acl_number = len(ipv6_acl_list)
not_configured_acl_list = []
if ipv6_acl_number != self.inputs.number:
self.result.is_failure(f"Expected {self.inputs.number} SSH IPv6 ACL(s) in vrf {self.inputs.vrf} but got {ipv6_acl_number}")
return
for ipv6_acl in ipv6_acl_list:
if self.inputs.vrf not in ipv6_acl["configuredVrfs"] or self.inputs.vrf not in ipv6_acl["activeVrfs"]:
not_configured_acl_list.append(ipv6_acl["name"])
if not_configured_acl_list:
self.result.is_failure(f"SSH IPv6 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl_list}")
else:
self.result.is_success()
class VerifyTelnetStatus(AntaTest):
"""
Verifies if Telnet is disabled in the default VRF.
Expected Results:
* success: The test will pass if Telnet is disabled in the default VRF.
* failure: The test will fail if Telnet is NOT disabled in the default VRF.
"""
name = "VerifyTelnetStatus"
description = "Verifies if Telnet is disabled in the default VRF."
categories = ["security"]
commands = [AntaCommand(command="show management telnet")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["serverState"] == "disabled":
self.result.is_success()
else:
self.result.is_failure("Telnet status for Default VRF is enabled")
class VerifyAPIHttpStatus(AntaTest):
"""
Verifies if eAPI HTTP server is disabled globally.
Expected Results:
* success: The test will pass if eAPI HTTP server is disabled globally.
* failure: The test will fail if eAPI HTTP server is NOT disabled globally.
"""
name = "VerifyAPIHttpStatus"
description = "Verifies if eAPI HTTP server is disabled globally."
categories = ["security"]
commands = [AntaCommand(command="show management api http-commands")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["enabled"] and not command_output["httpServer"]["running"]:
self.result.is_success()
else:
self.result.is_failure("eAPI HTTP server is enabled globally")
class VerifyAPIHttpsSSL(AntaTest):
"""
Verifies if eAPI HTTPS server SSL profile is configured and valid.
Expected results:
* success: The test will pass if the eAPI HTTPS server SSL profile is configured and valid.
* failure: The test will fail if the eAPI HTTPS server SSL profile is NOT configured, misconfigured or invalid.
"""
name = "VerifyAPIHttpsSSL"
description = "Verifies if the eAPI has a valid SSL profile."
categories = ["security"]
commands = [AntaCommand(command="show management api http-commands")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
profile: str
"""SSL profile to verify"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
try:
if command_output["sslProfile"]["name"] == self.inputs.profile and command_output["sslProfile"]["state"] == "valid":
self.result.is_success()
else:
self.result.is_failure(f"eAPI HTTPS server SSL profile ({self.inputs.profile}) is misconfigured or invalid")
except KeyError:
self.result.is_failure(f"eAPI HTTPS server SSL profile ({self.inputs.profile}) is not configured")
class VerifyAPIIPv4Acl(AntaTest):
"""
Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF.
Expected results:
* success: The test will pass if eAPI has the provided number of IPv4 ACL(s) in the specified VRF.
* failure: The test will fail if eAPI has not the right number of IPv4 ACL(s) in the specified VRF.
"""
name = "VerifyAPIIPv4Acl"
description = "Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF."
categories = ["security"]
commands = [AntaCommand(command="show management api http-commands ip access-list summary")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
number: conint(ge=0) # type:ignore
"""The number of expected IPv4 ACL(s)"""
vrf: str = "default"
"""The name of the VRF in which to check for eAPI"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
ipv4_acl_list = command_output["ipAclList"]["aclList"]
ipv4_acl_number = len(ipv4_acl_list)
not_configured_acl_list = []
if ipv4_acl_number != self.inputs.number:
self.result.is_failure(f"Expected {self.inputs.number} eAPI IPv4 ACL(s) in vrf {self.inputs.vrf} but got {ipv4_acl_number}")
return
for ipv4_acl in ipv4_acl_list:
if self.inputs.vrf not in ipv4_acl["configuredVrfs"] or self.inputs.vrf not in ipv4_acl["activeVrfs"]:
not_configured_acl_list.append(ipv4_acl["name"])
if not_configured_acl_list:
self.result.is_failure(f"eAPI IPv4 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl_list}")
else:
self.result.is_success()
class VerifyAPIIPv6Acl(AntaTest):
"""
Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF.
Expected results:
* success: The test will pass if eAPI has the provided number of IPv6 ACL(s) in the specified VRF.
* failure: The test will fail if eAPI has not the right number of IPv6 ACL(s) in the specified VRF.
* skipped: The test will be skipped if the number of IPv6 ACL(s) or VRF parameter is not provided.
"""
name = "VerifyAPIIPv6Acl"
description = "Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF."
categories = ["security"]
commands = [AntaCommand(command="show management api http-commands ipv6 access-list summary")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
number: conint(ge=0) # type:ignore
"""The number of expected IPv6 ACL(s)"""
vrf: str = "default"
"""The name of the VRF in which to check for eAPI"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
ipv6_acl_list = command_output["ipv6AclList"]["aclList"]
ipv6_acl_number = len(ipv6_acl_list)
not_configured_acl_list = []
if ipv6_acl_number != self.inputs.number:
self.result.is_failure(f"Expected {self.inputs.number} eAPI IPv6 ACL(s) in vrf {self.inputs.vrf} but got {ipv6_acl_number}")
return
for ipv6_acl in ipv6_acl_list:
if self.inputs.vrf not in ipv6_acl["configuredVrfs"] or self.inputs.vrf not in ipv6_acl["activeVrfs"]:
not_configured_acl_list.append(ipv6_acl["name"])
if not_configured_acl_list:
self.result.is_failure(f"eAPI IPv6 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl_list}")
else:
self.result.is_success()
class VerifyAPISSLCertificate(AntaTest):
"""
Verifies the eAPI SSL certificate expiry, common subject name, encryption algorithm and key size.
Expected Results:
* success: The test will pass if the certificate's expiry date is greater than the threshold,
and the certificate has the correct name, encryption algorithm, and key size.
* failure: The test will fail if the certificate is expired or is going to expire,
or if the certificate has an incorrect name, encryption algorithm, or key size.
"""
name = "VerifyAPISSLCertificate"
description = "Verifies the eAPI SSL certificate expiry, common subject name, encryption algorithm and key size."
categories = ["security"]
commands = [AntaCommand(command="show management security ssl certificate"), AntaCommand(command="show clock")]
class Input(AntaTest.Input):
"""
Input parameters for the VerifyAPISSLCertificate test.
"""
certificates: List[APISSLCertificates]
"""List of API SSL certificates"""
class APISSLCertificates(BaseModel):
"""
This class defines the details of an API SSL certificate.
"""
certificate_name: str
"""The name of the certificate to be verified."""
expiry_threshold: int
"""The expiry threshold of the certificate in days."""
common_name: str
"""The common subject name of the certificate."""
encryption_algorithm: EncryptionAlgorithm
"""The encryption algorithm of the certificate."""
key_size: Union[RsaKeySize, EcdsaKeySize]
"""The encryption algorithm key size of the certificate."""
@model_validator(mode="after")
def validate_inputs(self: BaseModel) -> BaseModel:
"""
Validate the key size provided to the APISSLCertificates class.
If encryption_algorithm is RSA then key_size should be in {2048, 3072, 4096}.
If encryption_algorithm is ECDSA then key_size should be in {256, 384, 521}.
"""
if self.encryption_algorithm == "RSA" and self.key_size not in RsaKeySize.__args__:
raise ValueError(f"`{self.certificate_name}` key size {self.key_size} is invalid for RSA encryption. Allowed sizes are {RsaKeySize.__args__}.")
if self.encryption_algorithm == "ECDSA" and self.key_size not in EcdsaKeySize.__args__:
raise ValueError(
f"`{self.certificate_name}` key size {self.key_size} is invalid for ECDSA encryption. Allowed sizes are {EcdsaKeySize.__args__}."
)
return self
@AntaTest.anta_test
def test(self) -> None:
# Mark the result as success by default
self.result.is_success()
# Extract certificate and clock output
certificate_output = self.instance_commands[0].json_output
clock_output = self.instance_commands[1].json_output
current_timestamp = clock_output["utcTime"]
# Iterate over each API SSL certificate
for certificate in self.inputs.certificates:
# Collecting certificate expiry time and current EOS time.
# These times are used to calculate the number of days until the certificate expires.
if not (certificate_data := get_value(certificate_output, f"certificates..{certificate.certificate_name}", separator="..")):
self.result.is_failure(f"SSL certificate '{certificate.certificate_name}', is not configured.\n")
continue
expiry_time = certificate_data["notAfter"]
day_difference = (datetime.fromtimestamp(expiry_time) - datetime.fromtimestamp(current_timestamp)).days
# Verify certificate expiry
if 0 < day_difference < certificate.expiry_threshold:
self.result.is_failure(f"SSL certificate `{certificate.certificate_name}` is about to expire in {day_difference} days.\n")
elif day_difference < 0:
self.result.is_failure(f"SSL certificate `{certificate.certificate_name}` is expired.\n")
# Verify certificate common subject name, encryption algorithm and key size
keys_to_verify = ["subject.commonName", "publicKey.encryptionAlgorithm", "publicKey.size"]
actual_certificate_details = {key: get_value(certificate_data, key) for key in keys_to_verify}
expected_certificate_details = {
"subject.commonName": certificate.common_name,
"publicKey.encryptionAlgorithm": certificate.encryption_algorithm,
"publicKey.size": certificate.key_size,
}
if actual_certificate_details != expected_certificate_details:
failed_log = f"SSL certificate `{certificate.certificate_name}` is not configured properly:"
failed_log += get_failed_logs(expected_certificate_details, actual_certificate_details)
self.result.is_failure(f"{failed_log}\n")
class VerifyBannerLogin(AntaTest):
"""
Verifies the login banner of a device.
Expected results:
* success: The test will pass if the login banner matches the provided input.
* failure: The test will fail if the login banner does not match the provided input.
"""
name = "VerifyBannerLogin"
description = "Verifies the login banner of a device."
categories = ["security"]
commands = [AntaCommand(command="show banner login")]
class Input(AntaTest.Input):
"""Defines the input parameters for this test case."""
login_banner: str
"""Expected login banner of the device."""
@AntaTest.anta_test
def test(self) -> None:
login_banner = self.instance_commands[0].json_output["loginBanner"]
# Remove leading and trailing whitespaces from each line
cleaned_banner = "\n".join(line.strip() for line in self.inputs.login_banner.split("\n"))
if login_banner != cleaned_banner:
self.result.is_failure(f"Expected `{cleaned_banner}` as the login banner, but found `{login_banner}` instead.")
else:
self.result.is_success()
class VerifyBannerMotd(AntaTest):
"""
Verifies the motd banner of a device.
Expected results:
* success: The test will pass if the motd banner matches the provided input.
* failure: The test will fail if the motd banner does not match the provided input.
"""
name = "VerifyBannerMotd"
description = "Verifies the motd banner of a device."
categories = ["security"]
commands = [AntaCommand(command="show banner motd")]
class Input(AntaTest.Input):
"""Defines the input parameters for this test case."""
motd_banner: str
"""Expected motd banner of the device."""
@AntaTest.anta_test
def test(self) -> None:
motd_banner = self.instance_commands[0].json_output["motd"]
# Remove leading and trailing whitespaces from each line
cleaned_banner = "\n".join(line.strip() for line in self.inputs.motd_banner.split("\n"))
if motd_banner != cleaned_banner:
self.result.is_failure(f"Expected `{cleaned_banner}` as the motd banner, but found `{motd_banner}` instead.")
else:
self.result.is_success()
class VerifyIPv4ACL(AntaTest):
"""
Verifies the configuration of IPv4 ACLs.
Expected results:
* success: The test will pass if an IPv4 ACL is configured with the correct sequence entries.
* failure: The test will fail if an IPv4 ACL is not configured or entries are not in sequence.
"""
name = "VerifyIPv4ACL"
description = "Verifies the configuration of IPv4 ACLs."
categories = ["security"]
commands = [AntaTemplate(template="show ip access-lists {acl}")]
class Input(AntaTest.Input):
"""Inputs for the VerifyIPv4ACL test."""
ipv4_access_lists: List[IPv4ACL]
"""List of IPv4 ACLs to verify"""
class IPv4ACL(BaseModel):
"""Detail of IPv4 ACL"""
name: str
"""Name of IPv4 ACL"""
entries: List[IPv4ACLEntries]
"""List of IPv4 ACL entries"""
class IPv4ACLEntries(BaseModel):
"""IPv4 ACL entries details"""
sequence: int = Field(ge=1, le=4294967295)
"""Sequence number of an ACL entry"""
action: str
"""Action of an ACL entry"""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
return [template.render(acl=acl.name, entries=acl.entries) for acl in self.inputs.ipv4_access_lists]
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
for command_output in self.instance_commands:
# Collecting input ACL details
acl_name = command_output.params["acl"]
acl_entries = command_output.params["entries"]
# Check if ACL is configured
ipv4_acl_list = command_output.json_output["aclList"]
if not ipv4_acl_list:
self.result.is_failure(f"{acl_name}: Not found")
continue
# Check if the sequence number is configured and has the correct action applied
failed_log = f"{acl_name}:\n"
for acl_entry in acl_entries:
acl_seq = acl_entry.sequence
acl_action = acl_entry.action
if (actual_entry := get_item(ipv4_acl_list[0]["sequence"], "sequenceNumber", acl_seq)) is None:
failed_log += f"Sequence number `{acl_seq}` is not found.\n"
continue
if actual_entry["text"] != acl_action:
failed_log += f"Expected `{acl_action}` as sequence number {acl_seq} action but found `{actual_entry['text']}` instead.\n"
if failed_log != f"{acl_name}:\n":
self.result.is_failure(f"{failed_log}")

199
anta/tests/services.py Normal file
View file

@ -0,0 +1,199 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to the EOS various services settings
"""
from __future__ import annotations
from ipaddress import IPv4Address, IPv6Address
from typing import List, Union
from pydantic import BaseModel, Field
from anta.custom_types import ErrDisableInterval, ErrDisableReasons
from anta.models import AntaCommand, AntaTemplate, AntaTest
from anta.tools.get_dict_superset import get_dict_superset
from anta.tools.get_item import get_item
from anta.tools.utils import get_failed_logs
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
class VerifyHostname(AntaTest):
"""
Verifies the hostname of a device.
Expected results:
* success: The test will pass if the hostname matches the provided input.
* failure: The test will fail if the hostname does not match the provided input.
"""
name = "VerifyHostname"
description = "Verifies the hostname of a device."
categories = ["services"]
commands = [AntaCommand(command="show hostname")]
class Input(AntaTest.Input):
"""Defines the input parameters for this test case."""
hostname: str
"""Expected hostname of the device."""
@AntaTest.anta_test
def test(self) -> None:
hostname = self.instance_commands[0].json_output["hostname"]
if hostname != self.inputs.hostname:
self.result.is_failure(f"Expected `{self.inputs.hostname}` as the hostname, but found `{hostname}` instead.")
else:
self.result.is_success()
class VerifyDNSLookup(AntaTest):
"""
This class verifies the DNS (Domain name service) name to IP address resolution.
Expected Results:
* success: The test will pass if a domain name is resolved to an IP address.
* failure: The test will fail if a domain name does not resolve to an IP address.
* error: This test will error out if a domain name is invalid.
"""
name = "VerifyDNSLookup"
description = "Verifies the DNS name to IP address resolution."
categories = ["services"]
commands = [AntaTemplate(template="bash timeout 10 nslookup {domain}")]
class Input(AntaTest.Input):
"""Inputs for the VerifyDNSLookup test."""
domain_names: List[str]
"""List of domain names"""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
return [template.render(domain=domain_name) for domain_name in self.inputs.domain_names]
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
failed_domains = []
for command in self.instance_commands:
domain = command.params["domain"]
output = command.json_output["messages"][0]
if f"Can't find {domain}: No answer" in output:
failed_domains.append(domain)
if failed_domains:
self.result.is_failure(f"The following domain(s) are not resolved to an IP address: {', '.join(failed_domains)}")
class VerifyDNSServers(AntaTest):
"""
Verifies if the DNS (Domain Name Service) servers are correctly configured.
Expected Results:
* success: The test will pass if the DNS server specified in the input is configured with the correct VRF and priority.
* failure: The test will fail if the DNS server is not configured or if the VRF and priority of the DNS server do not match the input.
"""
name = "VerifyDNSServers"
description = "Verifies if the DNS servers are correctly configured."
categories = ["services"]
commands = [AntaCommand(command="show ip name-server")]
class Input(AntaTest.Input):
"""Inputs for the VerifyDNSServers test."""
dns_servers: List[DnsServers]
"""List of DNS servers to verify."""
class DnsServers(BaseModel):
"""DNS server details"""
server_address: Union[IPv4Address, IPv6Address]
"""The IPv4/IPv6 address of the DNS server."""
vrf: str = "default"
"""The VRF for the DNS server. Defaults to 'default' if not provided."""
priority: int = Field(ge=0, le=4)
"""The priority of the DNS server from 0 to 4, lower is first."""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output["nameServerConfigs"]
self.result.is_success()
for server in self.inputs.dns_servers:
address = str(server.server_address)
vrf = server.vrf
priority = server.priority
input_dict = {"ipAddr": address, "vrf": vrf}
if get_item(command_output, "ipAddr", address) is None:
self.result.is_failure(f"DNS server `{address}` is not configured with any VRF.")
continue
if (output := get_dict_superset(command_output, input_dict)) is None:
self.result.is_failure(f"DNS server `{address}` is not configured with VRF `{vrf}`.")
continue
if output["priority"] != priority:
self.result.is_failure(f"For DNS server `{address}`, the expected priority is `{priority}`, but `{output['priority']}` was found instead.")
class VerifyErrdisableRecovery(AntaTest):
"""
Verifies the errdisable recovery reason, status, and interval.
Expected Results:
* Success: The test will pass if the errdisable recovery reason status is enabled and the interval matches the input.
* Failure: The test will fail if the errdisable recovery reason is not found, the status is not enabled, or the interval does not match the input.
"""
name = "VerifyErrdisableRecovery"
description = "Verifies the errdisable recovery reason, status, and interval."
categories = ["services"]
commands = [AntaCommand(command="show errdisable recovery", ofmt="text")] # Command does not support JSON output hence using text output
class Input(AntaTest.Input):
"""Inputs for the VerifyErrdisableRecovery test."""
reasons: List[ErrDisableReason]
"""List of errdisable reasons"""
class ErrDisableReason(BaseModel):
"""Details of an errdisable reason"""
reason: ErrDisableReasons
"""Type or name of the errdisable reason"""
interval: ErrDisableInterval
"""Interval of the reason in seconds"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].text_output
self.result.is_success()
for error_reason in self.inputs.reasons:
input_reason = error_reason.reason
input_interval = error_reason.interval
reason_found = False
# Skip header and last empty line
lines = command_output.split("\n")[2:-1]
for line in lines:
# Skip empty lines
if not line.strip():
continue
# Split by first two whitespaces
reason, status, interval = line.split(None, 2)
if reason != input_reason:
continue
reason_found = True
actual_reason_data = {"interval": interval, "status": status}
expected_reason_data = {"interval": str(input_interval), "status": "Enabled"}
if actual_reason_data != expected_reason_data:
failed_log = get_failed_logs(expected_reason_data, actual_reason_data)
self.result.is_failure(f"`{input_reason}`:{failed_log}\n")
break
if not reason_found:
self.result.is_failure(f"`{input_reason}`: Not found.\n")

176
anta/tests/snmp.py Normal file
View file

@ -0,0 +1,176 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to the EOS various SNMP settings
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
from pydantic import conint
from anta.models import AntaCommand, AntaTest
class VerifySnmpStatus(AntaTest):
"""
Verifies whether the SNMP agent is enabled in a specified VRF.
Expected Results:
* success: The test will pass if the SNMP agent is enabled in the specified VRF.
* failure: The test will fail if the SNMP agent is disabled in the specified VRF.
"""
name = "VerifySnmpStatus"
description = "Verifies if the SNMP agent is enabled."
categories = ["snmp"]
commands = [AntaCommand(command="show snmp")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
vrf: str = "default"
"""The name of the VRF in which to check for the SNMP agent"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["enabled"] and self.inputs.vrf in command_output["vrfs"]["snmpVrfs"]:
self.result.is_success()
else:
self.result.is_failure(f"SNMP agent disabled in vrf {self.inputs.vrf}")
class VerifySnmpIPv4Acl(AntaTest):
"""
Verifies if the SNMP agent has the right number IPv4 ACL(s) configured for a specified VRF.
Expected results:
* success: The test will pass if the SNMP agent has the provided number of IPv4 ACL(s) in the specified VRF.
* failure: The test will fail if the SNMP agent has not the right number of IPv4 ACL(s) in the specified VRF.
"""
name = "VerifySnmpIPv4Acl"
description = "Verifies if the SNMP agent has IPv4 ACL(s) configured."
categories = ["snmp"]
commands = [AntaCommand(command="show snmp ipv4 access-list summary")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
number: conint(ge=0) # type:ignore
"""The number of expected IPv4 ACL(s)"""
vrf: str = "default"
"""The name of the VRF in which to check for the SNMP agent"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
ipv4_acl_list = command_output["ipAclList"]["aclList"]
ipv4_acl_number = len(ipv4_acl_list)
not_configured_acl_list = []
if ipv4_acl_number != self.inputs.number:
self.result.is_failure(f"Expected {self.inputs.number} SNMP IPv4 ACL(s) in vrf {self.inputs.vrf} but got {ipv4_acl_number}")
return
for ipv4_acl in ipv4_acl_list:
if self.inputs.vrf not in ipv4_acl["configuredVrfs"] or self.inputs.vrf not in ipv4_acl["activeVrfs"]:
not_configured_acl_list.append(ipv4_acl["name"])
if not_configured_acl_list:
self.result.is_failure(f"SNMP IPv4 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl_list}")
else:
self.result.is_success()
class VerifySnmpIPv6Acl(AntaTest):
"""
Verifies if the SNMP agent has the right number IPv6 ACL(s) configured for a specified VRF.
Expected results:
* success: The test will pass if the SNMP agent has the provided number of IPv6 ACL(s) in the specified VRF.
* failure: The test will fail if the SNMP agent has not the right number of IPv6 ACL(s) in the specified VRF.
"""
name = "VerifySnmpIPv6Acl"
description = "Verifies if the SNMP agent has IPv6 ACL(s) configured."
categories = ["snmp"]
commands = [AntaCommand(command="show snmp ipv6 access-list summary")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
number: conint(ge=0) # type:ignore
"""The number of expected IPv6 ACL(s)"""
vrf: str = "default"
"""The name of the VRF in which to check for the SNMP agent"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
ipv6_acl_list = command_output["ipv6AclList"]["aclList"]
ipv6_acl_number = len(ipv6_acl_list)
not_configured_acl_list = []
if ipv6_acl_number != self.inputs.number:
self.result.is_failure(f"Expected {self.inputs.number} SNMP IPv6 ACL(s) in vrf {self.inputs.vrf} but got {ipv6_acl_number}")
return
for ipv6_acl in ipv6_acl_list:
if self.inputs.vrf not in ipv6_acl["configuredVrfs"] or self.inputs.vrf not in ipv6_acl["activeVrfs"]:
not_configured_acl_list.append(ipv6_acl["name"])
if not_configured_acl_list:
self.result.is_failure(f"SNMP IPv6 ACL(s) not configured or active in vrf {self.inputs.vrf}: {not_configured_acl_list}")
else:
self.result.is_success()
class VerifySnmpLocation(AntaTest):
"""
This class verifies the SNMP location of a device.
Expected results:
* success: The test will pass if the SNMP location matches the provided input.
* failure: The test will fail if the SNMP location does not match the provided input.
"""
name = "VerifySnmpLocation"
description = "Verifies the SNMP location of a device."
categories = ["snmp"]
commands = [AntaCommand(command="show snmp")]
class Input(AntaTest.Input):
"""Defines the input parameters for this test case."""
location: str
"""Expected SNMP location of the device."""
@AntaTest.anta_test
def test(self) -> None:
location = self.instance_commands[0].json_output["location"]["location"]
if location != self.inputs.location:
self.result.is_failure(f"Expected `{self.inputs.location}` as the location, but found `{location}` instead.")
else:
self.result.is_success()
class VerifySnmpContact(AntaTest):
"""
This class verifies the SNMP contact of a device.
Expected results:
* success: The test will pass if the SNMP contact matches the provided input.
* failure: The test will fail if the SNMP contact does not match the provided input.
"""
name = "VerifySnmpContact"
description = "Verifies the SNMP contact of a device."
categories = ["snmp"]
commands = [AntaCommand(command="show snmp")]
class Input(AntaTest.Input):
"""Defines the input parameters for this test case."""
contact: str
"""Expected SNMP contact details of the device."""
@AntaTest.anta_test
def test(self) -> None:
contact = self.instance_commands[0].json_output["contact"]["contact"]
if contact != self.inputs.contact:
self.result.is_failure(f"Expected `{self.inputs.contact}` as the contact, but found `{contact}` instead.")
else:
self.result.is_success()

91
anta/tests/software.py Normal file
View file

@ -0,0 +1,91 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to the EOS software
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
# Need to keep List for pydantic in python 3.8
from typing import List
from anta.models import AntaCommand, AntaTest
class VerifyEOSVersion(AntaTest):
"""
Verifies the device is running one of the allowed EOS version.
"""
name = "VerifyEOSVersion"
description = "Verifies the device is running one of the allowed EOS version."
categories = ["software"]
commands = [AntaCommand(command="show version")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
versions: List[str]
"""List of allowed EOS versions"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["version"] in self.inputs.versions:
self.result.is_success()
else:
self.result.is_failure(f'device is running version "{command_output["version"]}" not in expected versions: {self.inputs.versions}')
class VerifyTerminAttrVersion(AntaTest):
"""
Verifies the device is running one of the allowed TerminAttr version.
"""
name = "VerifyTerminAttrVersion"
description = "Verifies the device is running one of the allowed TerminAttr version."
categories = ["software"]
commands = [AntaCommand(command="show version detail")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
versions: List[str]
"""List of allowed TerminAttr versions"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
command_output_data = command_output["details"]["packages"]["TerminAttr-core"]["version"]
if command_output_data in self.inputs.versions:
self.result.is_success()
else:
self.result.is_failure(f"device is running TerminAttr version {command_output_data} and is not in the allowed list: {self.inputs.versions}")
class VerifyEOSExtensions(AntaTest):
"""
Verifies all EOS extensions installed on the device are enabled for boot persistence.
"""
name = "VerifyEOSExtensions"
description = "Verifies all EOS extensions installed on the device are enabled for boot persistence."
categories = ["software"]
commands = [AntaCommand(command="show extensions"), AntaCommand(command="show boot-extensions")]
@AntaTest.anta_test
def test(self) -> None:
boot_extensions = []
show_extensions_command_output = self.instance_commands[0].json_output
show_boot_extensions_command_output = self.instance_commands[1].json_output
installed_extensions = [
extension for extension, extension_data in show_extensions_command_output["extensions"].items() if extension_data["status"] == "installed"
]
for extension in show_boot_extensions_command_output["extensions"]:
extension = extension.strip("\n")
if extension != "":
boot_extensions.append(extension)
installed_extensions.sort()
boot_extensions.sort()
if installed_extensions == boot_extensions:
self.result.is_success()
else:
self.result.is_failure(f"Missing EOS extensions: installed {installed_extensions} / configured: {boot_extensions}")

198
anta/tests/stp.py Normal file
View file

@ -0,0 +1,198 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to various Spanning Tree Protocol (STP) settings
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
# Need to keep List for pydantic in python 3.8
from typing import List, Literal
from anta.custom_types import Vlan
from anta.models import AntaCommand, AntaTemplate, AntaTest
from anta.tools.get_value import get_value
class VerifySTPMode(AntaTest):
"""
Verifies the configured STP mode for a provided list of VLAN(s).
Expected Results:
* success: The test will pass if the STP mode is configured properly in the specified VLAN(s).
* failure: The test will fail if the STP mode is NOT configured properly for one or more specified VLAN(s).
"""
name = "VerifySTPMode"
description = "Verifies the configured STP mode for a provided list of VLAN(s)."
categories = ["stp"]
commands = [AntaTemplate(template="show spanning-tree vlan {vlan}")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
mode: Literal["mstp", "rstp", "rapidPvst"] = "mstp"
"""STP mode to verify"""
vlans: List[Vlan]
"""List of VLAN on which to verify STP mode"""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
return [template.render(vlan=vlan) for vlan in self.inputs.vlans]
@AntaTest.anta_test
def test(self) -> None:
not_configured = []
wrong_stp_mode = []
for command in self.instance_commands:
if "vlan" in command.params:
vlan_id = command.params["vlan"]
if not (stp_mode := get_value(command.json_output, f"spanningTreeVlanInstances.{vlan_id}.spanningTreeVlanInstance.protocol")):
not_configured.append(vlan_id)
elif stp_mode != self.inputs.mode:
wrong_stp_mode.append(vlan_id)
if not_configured:
self.result.is_failure(f"STP mode '{self.inputs.mode}' not configured for the following VLAN(s): {not_configured}")
if wrong_stp_mode:
self.result.is_failure(f"Wrong STP mode configured for the following VLAN(s): {wrong_stp_mode}")
if not not_configured and not wrong_stp_mode:
self.result.is_success()
class VerifySTPBlockedPorts(AntaTest):
"""
Verifies there is no STP blocked ports.
Expected Results:
* success: The test will pass if there are NO ports blocked by STP.
* failure: The test will fail if there are ports blocked by STP.
"""
name = "VerifySTPBlockedPorts"
description = "Verifies there is no STP blocked ports."
categories = ["stp"]
commands = [AntaCommand(command="show spanning-tree blockedports")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if not (stp_instances := command_output["spanningTreeInstances"]):
self.result.is_success()
else:
for key, value in stp_instances.items():
stp_instances[key] = value.pop("spanningTreeBlockedPorts")
self.result.is_failure(f"The following ports are blocked by STP: {stp_instances}")
class VerifySTPCounters(AntaTest):
"""
Verifies there is no errors in STP BPDU packets.
Expected Results:
* success: The test will pass if there are NO STP BPDU packet errors under all interfaces participating in STP.
* failure: The test will fail if there are STP BPDU packet errors on one or many interface(s).
"""
name = "VerifySTPCounters"
description = "Verifies there is no errors in STP BPDU packets."
categories = ["stp"]
commands = [AntaCommand(command="show spanning-tree counters")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
interfaces_with_errors = [
interface for interface, counters in command_output["interfaces"].items() if counters["bpduTaggedError"] or counters["bpduOtherError"] != 0
]
if interfaces_with_errors:
self.result.is_failure(f"The following interfaces have STP BPDU packet errors: {interfaces_with_errors}")
else:
self.result.is_success()
class VerifySTPForwardingPorts(AntaTest):
"""
Verifies that all interfaces are in a forwarding state for a provided list of VLAN(s).
Expected Results:
* success: The test will pass if all interfaces are in a forwarding state for the specified VLAN(s).
* failure: The test will fail if one or many interfaces are NOT in a forwarding state in the specified VLAN(s).
"""
name = "VerifySTPForwardingPorts"
description = "Verifies that all interfaces are forwarding for a provided list of VLAN(s)."
categories = ["stp"]
commands = [AntaTemplate(template="show spanning-tree topology vlan {vlan} status")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
vlans: List[Vlan]
"""List of VLAN on which to verify forwarding states"""
def render(self, template: AntaTemplate) -> list[AntaCommand]:
return [template.render(vlan=vlan) for vlan in self.inputs.vlans]
@AntaTest.anta_test
def test(self) -> None:
not_configured = []
not_forwarding = []
for command in self.instance_commands:
if "vlan" in command.params:
vlan_id = command.params["vlan"]
if not (topologies := get_value(command.json_output, "topologies")):
not_configured.append(vlan_id)
else:
for value in topologies.values():
if int(vlan_id) in value["vlans"]:
interfaces_not_forwarding = [interface for interface, state in value["interfaces"].items() if state["state"] != "forwarding"]
if interfaces_not_forwarding:
not_forwarding.append({f"VLAN {vlan_id}": interfaces_not_forwarding})
if not_configured:
self.result.is_failure(f"STP instance is not configured for the following VLAN(s): {not_configured}")
if not_forwarding:
self.result.is_failure(f"The following VLAN(s) have interface(s) that are not in a fowarding state: {not_forwarding}")
if not not_configured and not interfaces_not_forwarding:
self.result.is_success()
class VerifySTPRootPriority(AntaTest):
"""
Verifies the STP root priority for a provided list of VLAN or MST instance ID(s).
Expected Results:
* success: The test will pass if the STP root priority is configured properly for the specified VLAN or MST instance ID(s).
* failure: The test will fail if the STP root priority is NOT configured properly for the specified VLAN or MST instance ID(s).
"""
name = "VerifySTPRootPriority"
description = "Verifies the STP root priority for a provided list of VLAN or MST instance ID(s)."
categories = ["stp"]
commands = [AntaCommand(command="show spanning-tree root detail")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
priority: int
"""STP root priority to verify"""
instances: List[Vlan] = []
"""List of VLAN or MST instance ID(s). If empty, ALL VLAN or MST instance ID(s) will be verified."""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if not (stp_instances := command_output["instances"]):
self.result.is_failure("No STP instances configured")
return
# Checking the type of instances based on first instance
first_name = list(stp_instances)[0]
if first_name.startswith("MST"):
prefix = "MST"
elif first_name.startswith("VL"):
prefix = "VL"
else:
self.result.is_failure(f"Unsupported STP instance type: {first_name}")
return
check_instances = [f"{prefix}{instance_id}" for instance_id in self.inputs.instances] if self.inputs.instances else command_output["instances"].keys()
wrong_priority_instances = [
instance for instance in check_instances if get_value(command_output, f"instances.{instance}.rootBridge.priority") != self.inputs.priority
]
if wrong_priority_instances:
self.result.is_failure(f"The following instance(s) have the wrong STP root priority configured: {wrong_priority_instances}")
else:
self.result.is_success()

227
anta/tests/system.py Normal file
View file

@ -0,0 +1,227 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to system-level features and protocols
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from __future__ import annotations
import re
from pydantic import conint
from anta.models import AntaCommand, AntaTest
class VerifyUptime(AntaTest):
"""
This test verifies if the device uptime is higher than the provided minimum uptime value.
Expected Results:
* success: The test will pass if the device uptime is higher than the provided value.
* failure: The test will fail if the device uptime is lower than the provided value.
"""
name = "VerifyUptime"
description = "Verifies the device uptime."
categories = ["system"]
commands = [AntaCommand(command="show uptime")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
minimum: conint(ge=0) # type: ignore
"""Minimum uptime in seconds"""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if command_output["upTime"] > self.inputs.minimum:
self.result.is_success()
else:
self.result.is_failure(f"Device uptime is {command_output['upTime']} seconds")
class VerifyReloadCause(AntaTest):
"""
This test verifies the last reload cause of the device.
Expected results:
* success: The test will pass if there are NO reload causes or if the last reload was caused by the user or after an FPGA upgrade.
* failure: The test will fail if the last reload was NOT caused by the user or after an FPGA upgrade.
* error: The test will report an error if the reload cause is NOT available.
"""
name = "VerifyReloadCause"
description = "Verifies the last reload cause of the device."
categories = ["system"]
commands = [AntaCommand(command="show reload cause")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if "resetCauses" not in command_output.keys():
self.result.is_error(message="No reload causes available")
return
if len(command_output["resetCauses"]) == 0:
# No reload causes
self.result.is_success()
return
reset_causes = command_output["resetCauses"]
command_output_data = reset_causes[0].get("description")
if command_output_data in [
"Reload requested by the user.",
"Reload requested after FPGA upgrade",
]:
self.result.is_success()
else:
self.result.is_failure(f"Reload cause is: '{command_output_data}'")
class VerifyCoredump(AntaTest):
"""
This test verifies if there are core dump files in the /var/core directory.
Expected Results:
* success: The test will pass if there are NO core dump(s) in /var/core.
* failure: The test will fail if there are core dump(s) in /var/core.
Note:
* This test will NOT check for minidump(s) generated by certain agents in /var/core/minidump.
"""
name = "VerifyCoredump"
description = "Verifies there are no core dump files."
categories = ["system"]
commands = [AntaCommand(command="show system coredump", ofmt="json")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
core_files = command_output["coreFiles"]
if "minidump" in core_files:
core_files.remove("minidump")
if not core_files:
self.result.is_success()
else:
self.result.is_failure(f"Core dump(s) have been found: {core_files}")
class VerifyAgentLogs(AntaTest):
"""
This test verifies that no agent crash reports are present on the device.
Expected Results:
* success: The test will pass if there is NO agent crash reported.
* failure: The test will fail if any agent crashes are reported.
"""
name = "VerifyAgentLogs"
description = "Verifies there are no agent crash reports."
categories = ["system"]
commands = [AntaCommand(command="show agent logs crash", ofmt="text")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].text_output
if len(command_output) == 0:
self.result.is_success()
else:
pattern = re.compile(r"^===> (.*?) <===$", re.MULTILINE)
agents = "\n * ".join(pattern.findall(command_output))
self.result.is_failure(f"Device has reported agent crashes:\n * {agents}")
class VerifyCPUUtilization(AntaTest):
"""
This test verifies whether the CPU utilization is below 75%.
Expected Results:
* success: The test will pass if the CPU utilization is below 75%.
* failure: The test will fail if the CPU utilization is over 75%.
"""
name = "VerifyCPUUtilization"
description = "Verifies whether the CPU utilization is below 75%."
categories = ["system"]
commands = [AntaCommand(command="show processes top once")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
command_output_data = command_output["cpuInfo"]["%Cpu(s)"]["idle"]
if command_output_data > 25:
self.result.is_success()
else:
self.result.is_failure(f"Device has reported a high CPU utilization: {100 - command_output_data}%")
class VerifyMemoryUtilization(AntaTest):
"""
This test verifies whether the memory utilization is below 75%.
Expected Results:
* success: The test will pass if the memory utilization is below 75%.
* failure: The test will fail if the memory utilization is over 75%.
"""
name = "VerifyMemoryUtilization"
description = "Verifies whether the memory utilization is below 75%."
categories = ["system"]
commands = [AntaCommand(command="show version")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
memory_usage = command_output["memFree"] / command_output["memTotal"]
if memory_usage > 0.25:
self.result.is_success()
else:
self.result.is_failure(f"Device has reported a high memory usage: {(1 - memory_usage)*100:.2f}%")
class VerifyFileSystemUtilization(AntaTest):
"""
This test verifies that no partition is utilizing more than 75% of its disk space.
Expected Results:
* success: The test will pass if all partitions are using less than 75% of its disk space.
* failure: The test will fail if any partitions are using more than 75% of its disk space.
"""
name = "VerifyFileSystemUtilization"
description = "Verifies that no partition is utilizing more than 75% of its disk space."
categories = ["system"]
commands = [AntaCommand(command="bash timeout 10 df -h", ofmt="text")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].text_output
self.result.is_success()
for line in command_output.split("\n")[1:]:
if "loop" not in line and len(line) > 0 and (percentage := int(line.split()[4].replace("%", ""))) > 75:
self.result.is_failure(f"Mount point {line} is higher than 75%: reported {percentage}%")
class VerifyNTP(AntaTest):
"""
This test verifies that the Network Time Protocol (NTP) is synchronized.
Expected Results:
* success: The test will pass if the NTP is synchronised.
* failure: The test will fail if the NTP is NOT synchronised.
"""
name = "VerifyNTP"
description = "Verifies if NTP is synchronised."
categories = ["system"]
commands = [AntaCommand(command="show ntp status", ofmt="text")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].text_output
if command_output.split("\n")[0].split(" ")[0] == "synchronised":
self.result.is_success()
else:
data = command_output.split("\n")[0]
self.result.is_failure(f"The device is not synchronized with the configured NTP server(s): '{data}'")

59
anta/tests/vlan.py Normal file
View file

@ -0,0 +1,59 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to VLAN
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from typing import Literal
from anta.custom_types import Vlan
from anta.models import AntaCommand, AntaTest
from anta.tools.get_value import get_value
from anta.tools.utils import get_failed_logs
class VerifyVlanInternalPolicy(AntaTest):
"""
This class checks if the VLAN internal allocation policy is ascending or descending and
if the VLANs are within the specified range.
Expected Results:
* Success: The test will pass if the VLAN internal allocation policy is either ascending or descending
and the VLANs are within the specified range.
* Failure: The test will fail if the VLAN internal allocation policy is neither ascending nor descending
or the VLANs are outside the specified range.
"""
name = "VerifyVlanInternalPolicy"
description = "This test checks the VLAN internal allocation policy and the range of VLANs."
categories = ["vlan"]
commands = [AntaCommand(command="show vlan internal allocation policy")]
class Input(AntaTest.Input):
"""Inputs for the VerifyVlanInternalPolicy test."""
policy: Literal["ascending", "descending"]
"""The VLAN internal allocation policy."""
start_vlan_id: Vlan
"""The starting VLAN ID in the range."""
end_vlan_id: Vlan
"""The ending VLAN ID in the range."""
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
keys_to_verify = ["policy", "startVlanId", "endVlanId"]
actual_policy_output = {key: get_value(command_output, key) for key in keys_to_verify}
expected_policy_output = {"policy": self.inputs.policy, "startVlanId": self.inputs.start_vlan_id, "endVlanId": self.inputs.end_vlan_id}
# Check if the actual output matches the expected output
if actual_policy_output != expected_policy_output:
failed_log = "The VLAN internal allocation policy is not configured properly:"
failed_log += get_failed_logs(expected_policy_output, actual_policy_output)
self.result.is_failure(failed_log)
else:
self.result.is_success()

219
anta/tests/vxlan.py Normal file
View file

@ -0,0 +1,219 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Test functions related to VXLAN
"""
# Mypy does not understand AntaTest.Input typing
# mypy: disable-error-code=attr-defined
from ipaddress import IPv4Address
# Need to keep List and Dict for pydantic in python 3.8
from typing import Dict, List
from pydantic import Field
from anta.custom_types import Vlan, Vni, VxlanSrcIntf
from anta.models import AntaCommand, AntaTest
from anta.tools.get_value import get_value
class VerifyVxlan1Interface(AntaTest):
"""
This test verifies if the Vxlan1 interface is configured and 'up/up'.
!!! warning
The name of this test has been updated from 'VerifyVxlan' for better representation.
Expected Results:
* success: The test will pass if the Vxlan1 interface is configured with line protocol status and interface status 'up'.
* failure: The test will fail if the Vxlan1 interface line protocol status or interface status are not 'up'.
* skipped: The test will be skipped if the Vxlan1 interface is not configured.
"""
name = "VerifyVxlan1Interface"
description = "Verifies the Vxlan1 interface status."
categories = ["vxlan"]
commands = [AntaCommand(command="show interfaces description", ofmt="json")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if "Vxlan1" not in command_output["interfaceDescriptions"]:
self.result.is_skipped("Vxlan1 interface is not configured")
elif (
command_output["interfaceDescriptions"]["Vxlan1"]["lineProtocolStatus"] == "up"
and command_output["interfaceDescriptions"]["Vxlan1"]["interfaceStatus"] == "up"
):
self.result.is_success()
else:
self.result.is_failure(
f"Vxlan1 interface is {command_output['interfaceDescriptions']['Vxlan1']['lineProtocolStatus']}"
f"/{command_output['interfaceDescriptions']['Vxlan1']['interfaceStatus']}"
)
class VerifyVxlanConfigSanity(AntaTest):
"""
This test verifies that no issues are detected with the VXLAN configuration.
Expected Results:
* success: The test will pass if no issues are detected with the VXLAN configuration.
* failure: The test will fail if issues are detected with the VXLAN configuration.
* skipped: The test will be skipped if VXLAN is not configured on the device.
"""
name = "VerifyVxlanConfigSanity"
description = "Verifies there are no VXLAN config-sanity inconsistencies."
categories = ["vxlan"]
commands = [AntaCommand(command="show vxlan config-sanity", ofmt="json")]
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
if "categories" not in command_output or len(command_output["categories"]) == 0:
self.result.is_skipped("VXLAN is not configured")
return
failed_categories = {
category: content
for category, content in command_output["categories"].items()
if category in ["localVtep", "mlag", "pd"] and content["allCheckPass"] is not True
}
if len(failed_categories) > 0:
self.result.is_failure(f"VXLAN config sanity check is not passing: {failed_categories}")
else:
self.result.is_success()
class VerifyVxlanVniBinding(AntaTest):
"""
This test verifies the VNI-VLAN bindings of the Vxlan1 interface.
Expected Results:
* success: The test will pass if the VNI-VLAN bindings provided are properly configured.
* failure: The test will fail if any VNI lacks bindings or if any bindings are incorrect.
* skipped: The test will be skipped if the Vxlan1 interface is not configured.
"""
name = "VerifyVxlanVniBinding"
description = "Verifies the VNI-VLAN bindings of the Vxlan1 interface."
categories = ["vxlan"]
commands = [AntaCommand(command="show vxlan vni", ofmt="json")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
bindings: Dict[Vni, Vlan]
"""VNI to VLAN bindings to verify"""
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
no_binding = []
wrong_binding = []
if (vxlan1 := get_value(self.instance_commands[0].json_output, "vxlanIntfs.Vxlan1")) is None:
self.result.is_skipped("Vxlan1 interface is not configured")
return
for vni, vlan in self.inputs.bindings.items():
vni = str(vni)
if vni in vxlan1["vniBindings"]:
retrieved_vlan = vxlan1["vniBindings"][vni]["vlan"]
elif vni in vxlan1["vniBindingsToVrf"]:
retrieved_vlan = vxlan1["vniBindingsToVrf"][vni]["vlan"]
else:
no_binding.append(vni)
retrieved_vlan = None
if retrieved_vlan and vlan != retrieved_vlan:
wrong_binding.append({vni: retrieved_vlan})
if no_binding:
self.result.is_failure(f"The following VNI(s) have no binding: {no_binding}")
if wrong_binding:
self.result.is_failure(f"The following VNI(s) have the wrong VLAN binding: {wrong_binding}")
class VerifyVxlanVtep(AntaTest):
"""
This test verifies the VTEP peers of the Vxlan1 interface.
Expected Results:
* success: The test will pass if all provided VTEP peers are identified and matching.
* failure: The test will fail if any VTEP peer is missing or there are unexpected VTEP peers.
* skipped: The test will be skipped if the Vxlan1 interface is not configured.
"""
name = "VerifyVxlanVtep"
description = "Verifies the VTEP peers of the Vxlan1 interface"
categories = ["vxlan"]
commands = [AntaCommand(command="show vxlan vtep", ofmt="json")]
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
vteps: List[IPv4Address]
"""List of VTEP peers to verify"""
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
inputs_vteps = [str(input_vtep) for input_vtep in self.inputs.vteps]
if (vxlan1 := get_value(self.instance_commands[0].json_output, "interfaces.Vxlan1")) is None:
self.result.is_skipped("Vxlan1 interface is not configured")
return
difference1 = set(inputs_vteps).difference(set(vxlan1["vteps"]))
difference2 = set(vxlan1["vteps"]).difference(set(inputs_vteps))
if difference1:
self.result.is_failure(f"The following VTEP peer(s) are missing from the Vxlan1 interface: {sorted(difference1)}")
if difference2:
self.result.is_failure(f"Unexpected VTEP peer(s) on Vxlan1 interface: {sorted(difference2)}")
class VerifyVxlan1ConnSettings(AntaTest):
"""
Verifies the interface vxlan1 source interface and UDP port.
Expected Results:
* success: Passes if the interface vxlan1 source interface and UDP port are correct.
* failure: Fails if the interface vxlan1 source interface or UDP port are incorrect.
* skipped: Skips if the Vxlan1 interface is not configured.
"""
name = "VerifyVxlan1ConnSettings"
description = "Verifies the interface vxlan1 source interface and UDP port."
categories = ["vxlan"]
commands = [AntaCommand(command="show interfaces")]
class Input(AntaTest.Input):
"""Inputs for the VerifyVxlan1ConnSettings test."""
source_interface: VxlanSrcIntf
"""Source loopback interface of vxlan1 interface"""
udp_port: int = Field(ge=1024, le=65335)
"""UDP port used for vxlan1 interface"""
@AntaTest.anta_test
def test(self) -> None:
self.result.is_success()
command_output = self.instance_commands[0].json_output
# Skip the test case if vxlan1 interface is not configured
vxlan_output = get_value(command_output, "interfaces.Vxlan1")
if not vxlan_output:
self.result.is_skipped("Vxlan1 interface is not configured.")
return
src_intf = vxlan_output.get("srcIpIntf")
port = vxlan_output.get("udpPort")
# Check vxlan1 source interface and udp port
if src_intf != self.inputs.source_interface:
self.result.is_failure(f"Source interface is not correct. Expected `{self.inputs.source_interface}` as source interface but found `{src_intf}` instead.")
if port != self.inputs.udp_port:
self.result.is_failure(f"UDP port is not correct. Expected `{self.inputs.udp_port}` as UDP port but found `{port}` instead.")

3
anta/tools/__init__.py Normal file
View file

@ -0,0 +1,3 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.

View file

@ -0,0 +1,64 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Get one dictionary from a list of dictionaries by matching the given key and values."""
from __future__ import annotations
from typing import Any, Optional
def get_dict_superset(
list_of_dicts: list[dict[Any, Any]],
input_dict: dict[Any, Any],
default: Optional[Any] = None,
required: bool = False,
var_name: Optional[str] = None,
custom_error_msg: Optional[str] = None,
) -> Any:
"""Get the first dictionary from a list of dictionaries that is a superset of the input dict.
Returns the supplied default value or None if there is no match and "required" is False.
Will return the first matching item if there are multiple matching items.
Parameters
----------
list_of_dicts: list(dict)
List of Dictionaries to get list items from
input_dict : dict
Dictionary to check subset with a list of dict
default: any
Default value returned if the key and value are not found
required: bool
Fail if there is no match
var_name : str
String used for raising an exception with the full variable name
custom_error_msg : str
Custom error message to raise when required is True and the value is not found
Returns
-------
any
Dict or default value
Raises
------
ValueError
If the keys and values are not found and "required" == True
"""
if not isinstance(list_of_dicts, list) or not list_of_dicts or not isinstance(input_dict, dict) or not input_dict:
if required:
error_msg = custom_error_msg or f"{var_name} not found in the provided list."
raise ValueError(error_msg)
return default
for list_item in list_of_dicts:
if isinstance(list_item, dict) and input_dict.items() <= list_item.items():
return list_item
if required:
error_msg = custom_error_msg or f"{var_name} not found in the provided list."
raise ValueError(error_msg)
return default

83
anta/tools/get_item.py Normal file
View file

@ -0,0 +1,83 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""Get one dictionary from a list of dictionaries by matching the given key and value."""
from __future__ import annotations
from typing import Any, Optional
# pylint: disable=too-many-arguments
def get_item(
list_of_dicts: list[dict[Any, Any]],
key: Any,
value: Any,
default: Optional[Any] = None,
required: bool = False,
case_sensitive: bool = False,
var_name: Optional[str] = None,
custom_error_msg: Optional[str] = None,
) -> Any:
"""Get one dictionary from a list of dictionaries by matching the given key and value.
Returns the supplied default value or None if there is no match and "required" is False.
Will return the first matching item if there are multiple matching items.
Parameters
----------
list_of_dicts : list(dict)
List of Dictionaries to get list item from
key : any
Dictionary Key to match on
value : any
Value that must match
default : any
Default value returned if the key and value is not found
required : bool
Fail if there is no match
case_sensitive : bool
If the search value is a string, the comparison will ignore case by default
var_name : str
String used for raising exception with the full variable name
custom_error_msg : str
Custom error message to raise when required is True and the value is not found
Returns
-------
any
Dict or default value
Raises
------
ValueError
If the key and value is not found and "required" == True
"""
if var_name is None:
var_name = key
if (not isinstance(list_of_dicts, list)) or list_of_dicts == [] or value is None or key is None:
if required is True:
raise ValueError(custom_error_msg or var_name)
return default
for list_item in list_of_dicts:
if not isinstance(list_item, dict):
# List item is not a dict as required. Skip this item
continue
item_value = list_item.get(key)
# Perform case-insensitive comparison if value and item_value are strings and case_sensitive is False
if not case_sensitive and isinstance(value, str) and isinstance(item_value, str):
if item_value.casefold() == value.casefold():
return list_item
elif item_value == value:
# Match. Return this item
return list_item
# No Match
if required is True:
raise ValueError(custom_error_msg or var_name)
return default

56
anta/tools/get_value.py Normal file
View file

@ -0,0 +1,56 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Get a value from a dictionary or nested dictionaries.
"""
from __future__ import annotations
from typing import Any, Optional
# pylint: disable=too-many-arguments
def get_value(
dictionary: dict[Any, Any], key: str, default: Optional[Any] = None, required: bool = False, org_key: Optional[str] = None, separator: str = "."
) -> Any:
"""
Get a value from a dictionary or nested dictionaries.
Key supports dot-notation like "foo.bar" to do deeper lookups.
Returns the supplied default value or None if the key is not found and required is False.
Parameters
----------
dictionary : dict
Dictionary to get key from
key : str
Dictionary Key - supporting dot-notation for nested dictionaries
default : any
Default value returned if the key is not found
required : bool
Fail if the key is not found
org_key : str
Internal variable used for raising exception with the full key name even when called recursively
separator: str
String to use as the separator parameter in the split function. Useful in cases when the key
can contain variables with "." inside (e.g. hostnames)
Returns
-------
any
Value or default value
Raises
------
ValueError
If the key is not found and required == True
"""
if org_key is None:
org_key = key
keys = key.split(separator)
value = dictionary.get(keys[0])
if value is None:
if required:
raise ValueError(org_key)
return default
if len(keys) > 1:
return get_value(value, separator.join(keys[1:]), default=default, required=required, org_key=org_key, separator=separator)
return value

26
anta/tools/misc.py Normal file
View file

@ -0,0 +1,26 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Toolkit for ANTA.
"""
from __future__ import annotations
import logging
import traceback
logger = logging.getLogger(__name__)
def exc_to_str(exception: BaseException) -> str:
"""
Helper function that returns a human readable string from an BaseException object
"""
return f"{type(exception).__name__}{f' ({exception})' if str(exception) else ''}"
def tb_to_str(exception: BaseException) -> str:
"""
Helper function that returns a traceback string from an BaseException object
"""
return "Traceback (most recent call last):\n" + "".join(traceback.format_tb(exception.__traceback__))

34
anta/tools/utils.py Normal file
View file

@ -0,0 +1,34 @@
# Copyright (c) 2023-2024 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
"""
Toolkit for ANTA.
"""
from __future__ import annotations
from typing import Any
def get_failed_logs(expected_output: dict[Any, Any], actual_output: dict[Any, Any]) -> str:
"""
Get the failed log for a test.
Returns the failed log or an empty string if there is no difference between the expected and actual output.
Parameters:
expected_output (dict): Expected output of a test.
actual_output (dict): Actual output of a test
Returns:
str: Failed log of a test.
"""
failed_logs = []
for element, expected_data in expected_output.items():
actual_data = actual_output.get(element)
if actual_data is None:
failed_logs.append(f"\nExpected `{expected_data}` as the {element}, but it was not found in the actual output.")
elif actual_data != expected_data:
failed_logs.append(f"\nExpected `{expected_data}` as the {element}, but found `{actual_data}` instead.")
return "".join(failed_logs)

70
docs/README.md Executable file
View file

@ -0,0 +1,70 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
[![License](https://img.shields.io/badge/license-Apache%202.0-brightgreen.svg)](https://github.com/arista-netdevops-community/anta/blob/main/LICENSE)
[![Linting and Testing Anta](https://github.com/arista-netdevops-community/anta/actions/workflows/code-testing.yml/badge.svg)](https://github.com/arista-netdevops-community/anta/actions/workflows/code-testing.yml)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
![GitHub commit activity (branch)](https://img.shields.io/github/commit-activity/m/arista-netdevops-community/anta)
[![github release](https://img.shields.io/github/release/arista-netdevops-community/anta.svg)](https://github.com/arista-netdevops-community/anta/releases/)
![PyPI - Downloads](https://img.shields.io/pypi/dm/anta)
![coverage](https://raw.githubusercontent.com/arista-netdevops-community/anta/coverage-badge/latest-release-coverage.svg)
# Arista Network Test Automation (ANTA) Framework
ANTA is Python framework that automates tests for Arista devices.
- ANTA provides a [set of tests](api/tests.md) to validate the state of your network
- ANTA can be used to:
- Automate NRFU (Network Ready For Use) test on a preproduction network
- Automate tests on a live network (periodically or on demand)
- ANTA can be used with:
- The [ANTA CLI](cli/overview.md)
- As a [Python library](advanced_usages/as-python-lib.md) in your own application
![anta nrfu](https://raw.githubusercontent.com/arista-netdevops-community/anta/main/docs/imgs/anta-nrfu.svg)
```bash
# Install ANTA CLI
$ pip install anta
# Run ANTA CLI
$ anta --help
Usage: anta [OPTIONS] COMMAND [ARGS]...
Arista Network Test Automation (ANTA) CLI
Options:
--version Show the version and exit.
--log-file FILE Send the logs to a file. If logging level is
DEBUG, only INFO or higher will be sent to
stdout. [env var: ANTA_LOG_FILE]
-l, --log-level [CRITICAL|ERROR|WARNING|INFO|DEBUG]
ANTA logging level [env var:
ANTA_LOG_LEVEL; default: INFO]
--help Show this message and exit.
Commands:
check Commands to validate configuration files
debug Commands to execute EOS commands on remote devices
exec Commands to execute various scripts on EOS devices
get Commands to get information from or generate inventories
nrfu Run ANTA tests on devices
```
> [!WARNING]
> The ANTA CLI options have changed after version 0.11 and have moved away from the top level `anta` and are now required at their respective commands (e.g. `anta nrfu`). This breaking change occurs after users feedback on making the CLI more intuitive. This change should not affect user experience when using environment variables.
## Documentation
The documentation is published on [ANTA package website](https://www.anta.ninja). Also, a [demo repository](https://github.com/titom73/atd-anta-demo) is available to facilitate your journey with ANTA.
## Contribution guide
Contributions are welcome. Please refer to the [contribution guide](contribution.md)
## Credits
Thank you to [Angélique Phillipps](https://github.com/aphillipps), [Colin MacGiollaEáin](https://github.com/colinmacgiolla), [Khelil Sator](https://github.com/ksator), [Matthieu Tache](https://github.com/mtache), [Onur Gashi](https://github.com/onurgashi), [Paul Lavelle](https://github.com/paullavelle), [Guillaume Mulocher](https://github.com/gmuloc) and [Thomas Grimonet](https://github.com/titom73) for their contributions and guidances.

View file

@ -0,0 +1,315 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
ANTA is a Python library that can be used in user applications. This section describes how you can leverage ANTA Python modules to help you create your own NRFU solution.
!!! tip
If you are unfamiliar with asyncio, refer to the Python documentation relevant to your Python version - https://docs.python.org/3/library/asyncio.html
## [AntaDevice](../api/device.md#anta.device.AntaDevice) Abstract Class
A device is represented in ANTA as a instance of a subclass of the [AntaDevice](../api/device.md### ::: anta.device.AntaDevice) abstract class.
There are few abstract methods that needs to be implemented by child classes:
- The [collect()](../api/device.md#anta.device.AntaDevice.collect) coroutine is in charge of collecting outputs of [AntaCommand](../api/models.md#anta.models.AntaCommand) instances.
- The [refresh()](../api/device.md#anta.device.AntaDevice.refresh) coroutine is in charge of updating attributes of the [AntaDevice](../api/device.md### ::: anta.device.AntaDevice) instance. These attributes are used by [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) to filter out unreachable devices or by [AntaTest](../api/models.md#anta.models.AntaTest) to skip devices based on their hardware models.
The [copy()](../api/device.md#anta.device.AntaDevice.copy) coroutine is used to copy files to and from the device. It does not need to be implemented if tests are not using it.
### [AsyncEOSDevice](../api/device.md#anta.device.AsyncEOSDevice) Class
The [AsyncEOSDevice](../api/device.md#anta.device.AsyncEOSDevice) class is an implementation of [AntaDevice](../api/device.md#anta.device.AntaDevice) for Arista EOS.
It uses the [aio-eapi](https://github.com/jeremyschulman/aio-eapi) eAPI client and the [AsyncSSH](https://github.com/ronf/asyncssh) library.
- The [collect()](../api/device.md#anta.device.AsyncEOSDevice.collect) coroutine collects [AntaCommand](../api/models.md#anta.models.AntaCommand) outputs using eAPI.
- The [refresh()](../api/device.md#anta.device.AsyncEOSDevice.refresh) coroutine tries to open a TCP connection on the eAPI port and update the `is_online` attribute accordingly. If the TCP connection succeeds, it sends a `show version` command to gather the hardware model of the device and updates the `established` and `hw_model` attributes.
- The [copy()](../api/device.md#anta.device.AsyncEOSDevice.copy) coroutine copies files to and from the device using the SCP protocol.
## [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) Class
The [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) class is a subclass of the standard Python type [dict](https://docs.python.org/3/library/stdtypes.html#dict). The keys of this dictionary are the device names, the values are [AntaDevice](../api/device.md#anta.device.AntaDevice) instances.
[AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) provides methods to interact with the ANTA inventory:
- The [add_device()](../api/inventory.md#anta.inventory.AntaInventory.add_device) method adds an [AntaDevice](../api/device.md### ::: anta.device.AntaDevice) instance to the inventory. Adding an entry to [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) with a key different from the device name is not allowed.
- The [get_inventory()](../api/inventory.md#anta.inventory.AntaInventory.get_inventory) returns a new [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) instance with filtered out devices based on the method inputs.
- The [connect_inventory()](../api/inventory.md#anta.inventory.AntaInventory.connect_inventory) coroutine will execute the [refresh()](../api/device.md#anta.device.AntaDevice.refresh) coroutines of all the devices in the inventory.
- The [parse()](../api/inventory.md#anta.inventory.AntaInventory.parse) static method creates an [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) instance from a YAML file and returns it. The devices are [AsyncEOSDevice](../api/device.md#anta.device.AsyncEOSDevice) instances.
To parse a YAML inventory file and print the devices connection status:
```python
"""
Example
"""
import asyncio
from anta.inventory import AntaInventory
async def main(inv: AntaInventory) -> None:
"""
Take an AntaInventory and:
1. try to connect to every device in the inventory
2. print a message for every device connection status
"""
await inv.connect_inventory()
for device in inv.values():
if device.established:
print(f"Device {device.name} is online")
else:
print(f"Could not connect to device {device.name}")
if __name__ == "__main__":
# Create the AntaInventory instance
inventory = AntaInventory.parse(
filename="inv.yml",
username="arista",
password="@rista123",
timeout=15,
)
# Run the main coroutine
res = asyncio.run(main(inventory))
```
??? note "How to create your inventory file"
Please visit this [dedicated section](../usage-inventory-catalog.md) for how to use inventory and catalog files.
To run an EOS commands list on the reachable devices from the inventory:
```python
"""
Example
"""
# This is needed to run the script for python < 3.10 for typing annotations
from __future__ import annotations
import asyncio
from pprint import pprint
from anta.inventory import AntaInventory
from anta.models import AntaCommand
async def main(inv: AntaInventory, commands: list[str]) -> dict[str, list[AntaCommand]]:
"""
Take an AntaInventory and a list of commands as string and:
1. try to connect to every device in the inventory
2. collect the results of the commands from each device
Returns:
a dictionary where key is the device name and the value is the list of AntaCommand ran towards the device
"""
await inv.connect_inventory()
# Make a list of coroutine to run commands towards each connected device
coros = []
# dict to keep track of the commands per device
result_dict = {}
for name, device in inv.get_inventory(established_only=True).items():
anta_commands = [AntaCommand(command=command, ofmt="json") for command in commands]
result_dict[name] = anta_commands
coros.append(device.collect_commands(anta_commands))
# Run the coroutines
await asyncio.gather(*coros)
return result_dict
if __name__ == "__main__":
# Create the AntaInventory instance
inventory = AntaInventory.parse(
filename="inv.yml",
username="arista",
password="@rista123",
timeout=15,
)
# Create a list of commands with json output
commands = ["show version", "show ip bgp summary"]
# Run the main asyncio entry point
res = asyncio.run(main(inventory, commands))
pprint(res)
```
## Use tests from ANTA
All the test classes inherit from the same abstract Base Class AntaTest. The Class definition indicates which commands are required for the test and the user should focus only on writing the `test` function with optional keywords argument. The instance of the class upon creation instantiates a TestResult object that can be accessed later on to check the status of the test ([unset, skipped, success, failure, error]).
### Test structure
All tests are built on a class named `AntaTest` which provides a complete toolset for a test:
- Object creation
- Test definition
- TestResult definition
- Abstracted method to collect data
This approach means each time you create a test it will be based on this `AntaTest` class. Besides that, you will have to provide some elements:
- `name`: Name of the test
- `description`: A human readable description of your test
- `categories`: a list of categories to sort test.
- `commands`: a list of command to run. This list _must_ be a list of `AntaCommand` which is described in the next part of this document.
Here is an example of a hardware test related to device temperature:
```python
from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional, cast
from anta.models import AntaTest, AntaCommand
class VerifyTemperature(AntaTest):
"""
Verifies device temparture is currently OK.
"""
# The test name
name = "VerifyTemperature"
# A small description of the test, usually the first line of the class docstring
description = "Verifies device temparture is currently OK"
# The category of the test, usually the module name
categories = ["hardware"]
# The command(s) used for the test. Could be a template instead
commands = [AntaCommand(command="show system environment temperature", ofmt="json")]
# Decorator
@AntaTest.anta_test
# abstract method that must be defined by the child Test class
def test(self) -> None:
"""Run VerifyTemperature validation"""
command_output = cast(Dict[str, Dict[Any, Any]], self.instance_commands[0].output)
temperature_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
if temperature_status == "temperatureOk":
self.result.is_success()
else:
self.result.is_failure(f"Device temperature is not OK, systemStatus: {temperature_status }")
```
When you run the test, object will automatically call its `anta.models.AntaTest.collect()` method to get device output for each command if no pre-collected data was given to the test. This method does a loop to call `anta.inventory.models.InventoryDevice.collect()` methods which is in charge of managing device connection and how to get data.
??? info "run test offline"
You can also pass eos data directly to your test if you want to validate data collected in a different workflow. An example is provided below just for information:
```python
test = VerifyTemperature(device, eos_data=test_data["eos_data"])
asyncio.run(test.test())
```
The `test` function is always the same and __must__ be defined with the `@AntaTest.anta_test` decorator. This function takes at least one argument which is a `anta.inventory.models.InventoryDevice` object.
In some cases a test would rely on some additional inputs from the user, for instance the number of expected peers or some expected numbers. All parameters __must__ come with a default value and the test function __should__ validate the parameters values (at this stage this is the only place where validation can be done but there are future plans to make this better).
```python
class VerifyTemperature(AntaTest):
...
@AntaTest.anta_test
def test(self) -> None:
pass
class VerifyTransceiversManufacturers(AntaTest):
...
@AntaTest.anta_test
def test(self, manufacturers: Optional[List[str]] = None) -> None:
# validate the manufactures parameter
pass
```
The test itself does not return any value, but the result is directly availble from your AntaTest object and exposes a `anta.result_manager.models.TestResult` object with result, name of the test and optional messages:
- `name` (str): Device name where the test has run.
- `test` (str): Test name runs on the device.
- `categories` (List[str]): List of categories the TestResult belongs to, by default the AntaTest categories.
- `description` (str): TestResult description, by default the AntaTest description.
- `results` (str): Result of the test. Can be one of ["unset", "success", "failure", "error", "skipped"].
- `message` (str, optional): Message to report after the test if any.
- `custom_field` (str, optional): Custom field to store a string for flexibility in integrating with ANTA
```python
from anta.tests.hardware import VerifyTemperature
test = VerifyTemperature(device, eos_data=test_data["eos_data"])
asyncio.run(test.test())
assert test.result.result == "success"
```
### Classes for commands
To make it easier to get data, ANTA defines 2 different classes to manage commands to send to devices:
#### [AntaCommand](../api/models.md#anta.models.AntaCommand) Class
Represent a command with following information:
- Command to run
- Ouput format expected
- eAPI version
- Output of the command
Usage example:
```python
from anta.models import AntaCommand
cmd1 = AntaCommand(command="show zerotouch")
cmd2 = AntaCommand(command="show running-config diffs", ofmt="text")
```
!!! tip "Command revision and version"
* Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use `text` outformat sometimes.
* The model can change across time (adding feature, ... ) and when the model is changed in a non backward-compatible way, the __revision__ number is bumped. The initial model starts with __revision__ 1.
* A __revision__ applies to a particular CLI command whereas a __version__ is global to an eAPI call. The __version__ is internally translated to a specific __revision__ for each CLI command in the RPC call. The currently supported __version__ vaues are `1` and `latest`.
* A __revision takes precedence over a version__ (e.g. if a command is run with version="latest" and revision=1, the first revision of the model is returned)
* By default eAPI returns the first revision of each model to ensure that when upgrading, intergation with existing tools is not broken. This is done by using by default `version=1` in eAPI calls.
ANTA uses by default `version="latest"` in AntaCommand. For some commands, you may want to run them with a different revision or version.
For instance the `VerifyRoutingTableSize` test leverages the first revision of `show bfd peers`:
```
# revision 1 as later revision introduce additional nesting for type
commands = [AntaCommand(command="show bfd peers", revision=1)]
```
#### [AntaTemplate](../api/models.md#anta.models.AntaTemplate) Class
Because some command can require more dynamic than just a command with no parameter provided by user, ANTA supports command template: you define a template in your test class and user provide parameters when creating test object.
```python
class RunArbitraryTemplateCommand(AntaTest):
"""
Run an EOS command and return result
Based on AntaTest to build relevant output for pytest
"""
name = "Run aributrary EOS command"
description = "To be used only with anta debug commands"
template = AntaTemplate(template="show interfaces {ifd}")
categories = ["debug"]
@AntaTest.anta_test
def test(self) -> None:
errdisabled_interfaces = [interface for interface, value in response["interfaceStatuses"].items() if value["linkStatus"] == "errdisabled"]
...
params = [{"ifd": "Ethernet2"}, {"ifd": "Ethernet49/1"}]
run_command1 = RunArbitraryTemplateCommand(device_anta, params)
```
In this example, test waits for interfaces to check from user setup and will only check for interfaces in `params`

View file

@ -0,0 +1,87 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
ANTA is a streamlined Python framework designed for efficient interaction with network devices. This section outlines how ANTA incorporates caching mechanisms to collect command outputs from network devices.
## Configuration
By default, ANTA utilizes [aiocache](https://github.com/aio-libs/aiocache)'s memory cache backend, also called [`SimpleMemoryCache`](https://aiocache.aio-libs.org/en/v0.12.2/caches.html#simplememorycache). This library aims for simplicity and supports asynchronous operations to go along with Python `asyncio` used in ANTA.
The `_init_cache()` method of the [AntaDevice](../advanced_usages/as-python-lib.md#antadevice-abstract-class) abstract class initializes the cache. Child classes can override this method to tweak the cache configuration:
```python
def _init_cache(self) -> None:
"""
Initialize cache for the device, can be overridden by subclasses to manipulate how it works
"""
self.cache = Cache(cache_class=Cache.MEMORY, ttl=60, namespace=self.name, plugins=[HitMissRatioPlugin()])
self.cache_locks = defaultdict(asyncio.Lock)
```
The cache is also configured with `aiocache`'s [`HitMissRatioPlugin`](https://aiocache.aio-libs.org/en/v0.12.2/plugins.html#hitmissratioplugin) plugin to calculate the ratio of hits the cache has and give useful statistics for logging purposes in ANTA.
## Cache key design
The cache is initialized per `AntaDevice` and uses the following cache key design:
`<device_name>:<uid>`
The `uid` is an attribute of [AntaCommand](../advanced_usages/as-python-lib.md#antacommand-class), which is a unique identifier generated from the command, version, revision and output format.
Each UID has its own asyncio lock. This design allows coroutines that need to access the cache for different UIDs to do so concurrently. The locks are managed by the `self.cache_locks` dictionary.
## Mechanisms
By default, once the cache is initialized, it is used in the `collect()` method of `AntaDevice`. The `collect()` method prioritizes retrieving the output of the command from the cache. If the output is not in the cache, the private `_collect()` method will retrieve and then store it for future access.
## How to disable caching
Caching is enabled by default in ANTA following the previous configuration and mechanisms.
There might be scenarios where caching is not wanted. You can disable caching in multiple ways in ANTA:
1. Caching can be disabled globally, for **ALL** commands on **ALL** devices, using the `--disable-cache` global flag when invoking anta at the [CLI](../cli/overview.md#invoking-anta-cli):
```bash
anta --disable-cache --username arista --password arista nrfu table
```
2. Caching can be disabled per device, network or range by setting the `disable_cache` key to `True` when definining the ANTA [Inventory](../usage-inventory-catalog.md#create-an-inventory-file) file:
```yaml
anta_inventory:
hosts:
- host: 172.20.20.101
name: DC1-SPINE1
tags: ["SPINE", "DC1"]
disable_cache: True # Set this key to True
- host: 172.20.20.102
name: DC1-SPINE2
tags: ["SPINE", "DC1"]
disable_cache: False # Optional since it's the default
networks:
- network: "172.21.21.0/24"
disable_cache: True
ranges:
- start: 172.22.22.10
end: 172.22.22.19
disable_cache: True
```
This approach effectively disables caching for **ALL** commands sent to devices targeted by the `disable_cache` key.
3. For tests developpers, caching can be disabled for a specific [`AntaCommand`](../advanced_usages/as-python-lib.md#antacommand-class) or [`AntaTemplate`](../advanced_usages/as-python-lib.md#antatemplate-class) by setting the `use_cache` attribute to `False`. That means the command output will always be collected on the device and therefore, never use caching.
### Disable caching in a child class of `AntaDevice`
Since caching is implemented at the `AntaDevice` abstract class level, all subclasses will inherit that default behavior. As a result, if you need to disable caching in any custom implementation of `AntaDevice` outside of the ANTA framework, you must initialize `AntaDevice` with `disable_cache` set to `True`:
```python
class AnsibleEOSDevice(AntaDevice):
"""
Implementation of an AntaDevice using Ansible HttpApi plugin for EOS.
"""
def __init__(self, name: str, connection: ConnectionBase, tags: list = None) -> None:
super().__init__(name, tags, disable_cache=True)
```

View file

@ -0,0 +1,324 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
!!! info ""
This documentation applies for both creating tests in ANTA or creating your own test package.
ANTA is not only a Python library with a CLI and a collection of built-in tests, it is also a framework you can extend by building your own tests.
## Generic approach
A test is a Python class where a test function is defined and will be run by the framework.
ANTA provides an abstract class [AntaTest](../api/models.md#anta.models.AntaTest). This class does the heavy lifting and provide the logic to define, collect and test data. The code below is an example of a simple test in ANTA, which is an [AntaTest](../api/models.md#anta.models.AntaTest) subclass:
```python
from anta.models import AntaTest, AntaCommand
from anta.decorators import skip_on_platforms
class VerifyTemperature(AntaTest):
"""
This test verifies if the device temperature is within acceptable limits.
Expected Results:
* success: The test will pass if the device temperature is currently OK: 'temperatureOk'.
* failure: The test will fail if the device temperature is NOT OK.
"""
name = "VerifyTemperature"
description = "Verifies if the device temperature is within the acceptable range."
categories = ["hardware"]
commands = [AntaCommand(command="show system environment temperature", ofmt="json")]
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
command_output = self.instance_commands[0].json_output
temperature_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
if temperature_status == "temperatureOk":
self.result.is_success()
else:
self.result.is_failure(f"Device temperature exceeds acceptable limits. Current system status: '{temperature_status}'")
```
[AntaTest](../api/models.md#anta.models.AntaTest) also provide more advanced capabilities like [AntaCommand](../api/models.md#anta.models.AntaCommand) templating using the [AntaTemplate](../api/models.md#anta.models.AntaTemplate) class or test inputs definition and validation using [AntaTest.Input](../api/models.md#anta.models.AntaTest.Input) [pydantic](https://docs.pydantic.dev/latest/) model. This will be discussed in the sections below.
## [AntaTest](../api/models.md#anta.models.AntaTest) structure
### Class Attributes
- `name` (`str`): Name of the test. Used during reporting.
- `description` (`str`): A human readable description of your test.
- `categories` (`list[str]`): A list of categories in which the test belongs.
- `commands` (`list[Union[AntaTemplate, AntaCommand]]`): A list of command to collect from devices. This list __must__ be a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) or [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances. Rendering [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances will be discussed later.
!!! info
All these class attributes are mandatory. If any attribute is missing, a `NotImplementedError` exception will be raised during class instantiation.
### Instance Attributes
!!! info
You can access an instance attribute in your code using the `self` reference. E.g. you can access the test input values using `self.inputs`.
::: anta.models.AntaTest
options:
show_docstring_attributes: true
show_root_heading: false
show_bases: false
show_docstring_description: false
show_docstring_examples: false
show_docstring_parameters: false
members: false
show_source: false
show_root_toc_entry: false
heading_level: 10
!!! note "Logger object"
ANTA already provides comprehensive logging at every steps of a test execution. The [AntaTest](../api/models.md#anta.models.AntaTest) class also provides a `logger` attribute that is a Python logger specific to the test instance. See [Python documentation](https://docs.python.org/3/library/logging.html) for more information.
!!! note "AntaDevice object"
Even if `device` is not a private attribute, you should not need to access this object in your code.
### Test Inputs
[AntaTest.Input](../api/models.md#anta.models.AntaTest.Input) is a [pydantic model](https://docs.pydantic.dev/latest/usage/models/) that allow test developers to define their test inputs. [pydantic](https://docs.pydantic.dev/latest/) provides out of the box [error handling](https://docs.pydantic.dev/latest/usage/models/#error-handling) for test input validation based on the type hints defined by the test developer.
The base definition of [AntaTest.Input](../api/models.md#anta.models.AntaTest.Input) provides common test inputs for all [AntaTest](../api/models.md#anta.models.AntaTest) instances:
#### [Input](../api/models.md#anta.models.AntaTest.Input) model
::: anta.models.AntaTest.Input
options:
show_docstring_attributes: true
show_root_heading: false
show_category_heading: false
show_bases: false
show_docstring_description: false
show_docstring_examples: false
show_docstring_parameters: false
show_source: false
members: false
show_root_toc_entry: false
heading_level: 10
#### [ResultOverwrite](../api/models.md#anta.models.AntaTest.Input.ResultOverwrite) model
::: anta.models.AntaTest.Input.ResultOverwrite
options:
show_docstring_attributes: true
show_root_heading: false
show_category_heading: false
show_bases: false
show_docstring_description: false
show_docstring_examples: false
show_docstring_parameters: false
show_source: false
show_root_toc_entry: false
heading_level: 10
!!! note
The pydantic model is configured using the [`extra=forbid`](https://docs.pydantic.dev/latest/usage/model_config/#extra-attributes) that will fail input validation if extra fields are provided.
### Methods
- [test(self) -> None](../api/models.md#anta.models.AntaTest.test): This is an abstract method that __must__ be implemented. It contains the test logic that can access the collected command outputs using the `instance_commands` instance attribute, access the test inputs using the `inputs` instance attribute and __must__ set the `result` instance attribute accordingly. It must be implemented using the `AntaTest.anta_test` decorator that provides logging and will collect commands before executing the `test()` method.
- [render(self, template: AntaTemplate) -> list[AntaCommand]](../api/models.md#anta.models.AntaTest.render): This method only needs to be implemented if [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances are present in the `commands` class attribute. It will be called for every [AntaTemplate](../api/models.md#anta.models.AntaTemplate) occurence and __must__ return a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) using the [AntaTemplate.render()](../api/models.md#anta.models.AntaTemplate.render) method. It can access test inputs using the `inputs` instance attribute.
## Test execution
Below is a high level description of the test execution flow in ANTA:
1. ANTA will parse the test catalog to get the list of [AntaTest](../api/models.md#anta.models.AntaTest) subclasses to instantiate and their associated input values. We consider a single [AntaTest](../api/models.md#anta.models.AntaTest) subclass in the following steps.
2. ANTA will instantiate the [AntaTest](../api/models.md#anta.models.AntaTest) subclass and a single device will be provided to the test instance. The `Input` model defined in the class will also be instantiated at this moment. If any [ValidationError](https://docs.pydantic.dev/latest/errors/errors/) is raised, the test execution will be stopped.
3. If there is any [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instance in the `commands` class attribute, [render()](../api/models.md#anta.models.AntaTest.render) will be called for every occurrence. At this moment, the `instance_commands` attribute has been initialized. If any rendering error occurs, the test execution will be stopped.
4. The `AntaTest.anta_test` decorator will collect the commands from the device and update the `instance_commands` attribute with the outputs. If any collection error occurs, the test execution will be stopped.
5. The [test()](../api/models.md#anta.models.AntaTest.test) method is executed.
## Writing an AntaTest subclass
In this section, we will go into all the details of writing an [AntaTest](../api/models.md#anta.models.AntaTest) subclass.
### Class definition
Import [anta.models.AntaTest](../api/models.md#anta.models.AntaTest) and define your own class.
Define the mandatory class attributes using [anta.models.AntaCommand](../api/models.md#anta.models.AntaCommand), [anta.models.AntaTemplate](../api/models.md#anta.models.AntaTemplate) or both.
!!! info
Caching can be disabled per `AntaCommand` or `AntaTemplate` by setting the `use_cache` argument to `False`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md).
```python
from anta.models import AntaTest, AntaCommand, AntaTemplate
class <YourTestName>(AntaTest):
"""
<a docstring description of your test>
"""
name = "YourTestName" # should be your class name
description = "<test description in human reading format>"
categories = ["<arbitrary category>", "<another arbitrary category>"]
commands = [
AntaCommand(
command="<EOS command to run>",
ofmt="<command format output>",
version="<eAPI version to use>",
revision="<revision to use for the command>", # revision has precedence over version
use_cache="<Use cache for the command>",
),
AntaTemplate(
template="<Python f-string to render an EOS command>",
ofmt="<command format output>",
version="<eAPI version to use>",
revision="<revision to use for the command>", # revision has precedence over version
use_cache="<Use cache for the command>",
)
]
```
### Inputs definition
If the user needs to provide inputs for your test, you need to define a [pydantic model](https://docs.pydantic.dev/latest/usage/models/) that defines the schema of the test inputs:
```python
class <YourTestName>(AntaTest):
...
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
<input field name>: <input field type>
"""<input field docstring>"""
```
To define an input field type, refer to the [pydantic documentation](https://docs.pydantic.dev/latest/usage/types/types/) about types.
You can also leverage [anta.custom_types](../api/types.md) that provides reusable types defined in ANTA tests.
Regarding required, optional and nullable fields, refer to this [documentation](https://docs.pydantic.dev/latest/migration/#required-optional-and-nullable-fields) on how to define them.
!!! note
All the `pydantic` features are supported. For instance you can define [validators](https://docs.pydantic.dev/latest/usage/validators/) for complex input validation.
### Template rendering
Define the `render()` method if you have [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances in your `commands` class attribute:
```python
class <YourTestName>(AntaTest):
...
def render(self, template: AntaTemplate) -> list[AntaCommand]:
return [template.render(<template param>=input_value) for input_value in self.inputs.<input_field>]
```
You can access test inputs and render as many [AntaCommand](../api/models.md#anta.models.AntaCommand) as desired.
### Test definition
Implement the `test()` method with your test logic:
```python
class <YourTestName>(AntaTest):
...
@AntaTest.anta_test
def test(self) -> None:
pass
```
The logic usually includes the following different stages:
1. Parse the command outputs using the `self.instance_commands` instance attribute.
2. If needed, access the test inputs using the `self.inputs` instance attribute and write your conditional logic.
3. Set the `result` instance attribute to reflect the test result by either calling `self.result.is_success()` or `self.result.is_failure("<FAILURE REASON>")`. Sometimes, setting the test result to `skipped` using `self.result.is_skipped("<SKIPPED REASON>")` can make sense (e.g. testing the OSPF neighbor states but no neighbor was found). However, you should not need to catch any exception and set the test result to `error` since the error handling is done by the framework, see below.
The example below is based on the [VerifyTemperature](../api/tests.hardware.md#anta.tests.hardware.VerifyTemperature) test.
```python
class VerifyTemperature(AntaTest):
...
@AntaTest.anta_test
def test(self) -> None:
# Grab output of the collected command
command_output = self.instance_commands[0].json_output
# Do your test: In this example we check a specific field of the JSON output from EOS
temperature_status = command_output["systemStatus"] if "systemStatus" in command_output.keys() else ""
if temperature_status == "temperatureOk":
self.result.is_success()
else:
self.result.is_failure(f"Device temperature exceeds acceptable limits. Current system status: '{temperature_status}'")
```
As you can see there is no error handling to do in your code. Everything is packaged in the `AntaTest.anta_tests` decorator and below is a simple example of error captured when trying to access a dictionary with an incorrect key:
```python
class VerifyTemperature(AntaTest):
...
@AntaTest.anta_test
def test(self) -> None:
# Grab output of the collected command
command_output = self.instance_commands[0].json_output
# Access the dictionary with an incorrect key
command_output['incorrectKey']
```
```bash
ERROR Exception raised for test VerifyTemperature (on device 192.168.0.10) - KeyError ('incorrectKey')
```
!!! info "Get stack trace for debugging"
If you want to access to the full exception stack, you can run ANTA in debug mode by setting the `ANTA_DEBUG` environment variable to `true`. Example:
```bash
$ ANTA_DEBUG=true anta nrfu --catalog test_custom.yml text
```
### Test decorators
In addition to the required `AntaTest.anta_tests` decorator, ANTA offers a set of optional decorators for further test customization:
- `anta.decorators.deprecated_test`: Use this to log a message of WARNING severity when a test is deprecated.
- `anta.decorators.skip_on_platforms`: Use this to skip tests for functionalities that are not supported on specific platforms.
```python
from anta.decorators import skip_on_platforms
class VerifyTemperature(AntaTest):
...
@skip_on_platforms(["cEOSLab", "vEOS-lab"])
@AntaTest.anta_test
def test(self) -> None:
pass
```
## Access your custom tests in the test catalog
!!! warning ""
This section is required only if you are not merging your development into ANTA. Otherwise, just follow [contribution guide](../contribution.md).
For that, you need to create your own Python package as described in this [hitchhiker's guide](https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/) to package Python code. We assume it is well known and we won't focus on this aspect. Thus, your package must be impartable by ANTA hence available in the module search path `sys.path` (you can use `PYTHONPATH` for example).
It is very similar to what is documented in [catalog section](../usage-inventory-catalog.md) but you have to use your own package name.2
Let say the custom Python package is `anta_titom73` and the test is defined in `anta_titom73.dc_project` Python module, the test catalog would look like:
```yaml
anta_titom73.dc_project:
- VerifyFeatureX:
minimum: 1
```
And now you can run your NRFU tests with the CLI:
```bash
anta nrfu text --catalog test_custom.yml
spine01 :: verify_dynamic_vlan :: FAILURE (Device has 0 configured, we expect at least 1)
spine02 :: verify_dynamic_vlan :: FAILURE (Device has 0 configured, we expect at least 1)
leaf01 :: verify_dynamic_vlan :: SUCCESS
leaf02 :: verify_dynamic_vlan :: SUCCESS
leaf03 :: verify_dynamic_vlan :: SUCCESS
leaf04 :: verify_dynamic_vlan :: SUCCESS
```

13
docs/api/catalog.md Normal file
View file

@ -0,0 +1,13 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
### ::: anta.catalog.AntaCatalog
options:
filters: ["!^_[^_]", "!__str__"]
### ::: anta.catalog.AntaTestDefinition
### ::: anta.catalog.AntaCatalogFile

25
docs/api/device.md Normal file
View file

@ -0,0 +1,25 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
# AntaDevice base class
## UML representation
![](../imgs/uml/anta.device.AntaDevice.jpeg)
### ::: anta.device.AntaDevice
options:
filters: ["!^_[^_]", "!__(eq|rich_repr)__"]
# Async EOS device class
## UML representation
![](../imgs/uml/anta.device.AsyncEOSDevice.jpeg)
### ::: anta.device.AsyncEOSDevice
options:
filters: ["!^_[^_]", "!__(eq|rich_repr)__"]

11
docs/api/inventory.md Normal file
View file

@ -0,0 +1,11 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
### ::: anta.inventory.AntaInventory
options:
filters: ["!^_[^_]", "!__str__"]
### ::: anta.inventory.exceptions

View file

@ -0,0 +1,13 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
### ::: anta.inventory.models.AntaInventoryInput
### ::: anta.inventory.models.AntaInventoryHost
### ::: anta.inventory.models.AntaInventoryNetwork
### ::: anta.inventory.models.AntaInventoryRange

37
docs/api/models.md Normal file
View file

@ -0,0 +1,37 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
# Test definition
## UML Diagram
![](../imgs/uml/anta.models.AntaTest.jpeg)
### ::: anta.models.AntaTest
options:
filters: ["!^_[^_]", "!__init_subclass__", "!update_progress"]
# Command definition
## UML Diagram
![](../imgs/uml/anta.models.AntaCommand.jpeg)
### ::: anta.models.AntaCommand
!!! warning
CLI commands are protected to avoid execution of critical commands such as `reload` or `write erase`.
- Reload command: `^reload\s*\w*`
- Configure mode: `^conf\w*\s*(terminal|session)*`
- Write: `^wr\w*\s*\w+`
# Template definition
## UML Diagram
![](../imgs/uml/anta.models.AntaTemplate.jpeg)
### ::: anta.models.AntaTemplate

View file

@ -0,0 +1,7 @@
<!--
~ Copyright (c) 2023-2024 Arista Networks, Inc.
~ Use of this source code is governed by the Apache License 2.0
~ that can be found in the LICENSE file.
-->
### ::: anta.reporter.ReportTable

Some files were not shown because too many files have changed in this diff Show more