Merging upstream version 0.14.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
082ce481df
commit
2265bd9c67
211 changed files with 12174 additions and 6401 deletions
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Unit tests for anta."""
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test for anta.tests submodule."""
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test for anta.tests.routing submodule."""
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.routing.generic.py
|
||||
"""
|
||||
"""Tests for anta.tests.routing.generic.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -43,9 +42,9 @@ DATA: list[dict[str, Any]] = [
|
|||
# Output truncated
|
||||
"maskLen": {"8": 2},
|
||||
"totalRoutes": 123,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"minimum": 42, "maximum": 666},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -60,9 +59,9 @@ DATA: list[dict[str, Any]] = [
|
|||
# Output truncated
|
||||
"maskLen": {"8": 2},
|
||||
"totalRoutes": 1000,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"minimum": 42, "maximum": 666},
|
||||
"expected": {"result": "failure", "messages": ["routing-table has 1000 routes and not between min (42) and maximum (666)"]},
|
||||
|
@ -99,10 +98,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"preference": 20,
|
||||
"metric": 0,
|
||||
"vias": [{"nexthopAddr": "10.1.255.4", "interface": "Ethernet1"}],
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"vrfs": {
|
||||
|
@ -122,10 +121,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"preference": 20,
|
||||
"metric": 0,
|
||||
"vias": [{"nexthopAddr": "10.1.255.6", "interface": "Ethernet2"}],
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"vrf": "default", "routes": ["10.1.0.1", "10.1.0.2"]},
|
||||
|
@ -143,8 +142,8 @@ DATA: list[dict[str, Any]] = [
|
|||
"allRoutesProgrammedKernel": True,
|
||||
"defaultRouteState": "notSet",
|
||||
"routes": {},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"vrfs": {
|
||||
|
@ -164,10 +163,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"preference": 20,
|
||||
"metric": 0,
|
||||
"vias": [{"nexthopAddr": "10.1.255.6", "interface": "Ethernet2"}],
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"vrf": "default", "routes": ["10.1.0.1", "10.1.0.2"]},
|
||||
|
@ -195,10 +194,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"preference": 20,
|
||||
"metric": 0,
|
||||
"vias": [{"nexthopAddr": "10.1.255.4", "interface": "Ethernet1"}],
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"vrfs": {
|
||||
|
@ -218,10 +217,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"preference": 20,
|
||||
"metric": 0,
|
||||
"vias": [{"nexthopAddr": "10.1.255.6", "interface": "Ethernet2"}],
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"vrf": "default", "routes": ["10.1.0.1", "10.1.0.2"]},
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.routing.ospf.py
|
||||
"""
|
||||
"""Tests for anta.tests.routing.ospf.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from anta.tests.routing.ospf import VerifyOSPFNeighborCount, VerifyOSPFNeighborState
|
||||
from anta.tests.routing.ospf import VerifyOSPFMaxLSA, VerifyOSPFNeighborCount, VerifyOSPFNeighborState
|
||||
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
|
||||
|
||||
DATA: list[dict[str, Any]] = [
|
||||
|
@ -40,9 +39,9 @@ DATA: list[dict[str, Any]] = [
|
|||
"inactivity": 1683298014.844345,
|
||||
"interfaceAddress": "10.3.0.1",
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
"BLAH": {
|
||||
"instList": {
|
||||
|
@ -56,13 +55,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"adjacencyState": "full",
|
||||
"inactivity": 1683298014.844345,
|
||||
"interfaceAddress": "10.3.0.1",
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -95,9 +94,9 @@ DATA: list[dict[str, Any]] = [
|
|||
"inactivity": 1683298014.844345,
|
||||
"interfaceAddress": "10.3.0.1",
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
"BLAH": {
|
||||
"instList": {
|
||||
|
@ -111,20 +110,20 @@ DATA: list[dict[str, Any]] = [
|
|||
"adjacencyState": "down",
|
||||
"inactivity": 1683298014.844345,
|
||||
"interfaceAddress": "10.3.0.1",
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"Some neighbors are not correctly configured: [{'vrf': 'default', 'instance': '666', 'neighbor': '7.7.7.7', 'state': '2-way'},"
|
||||
" {'vrf': 'BLAH', 'instance': '777', 'neighbor': '8.8.8.8', 'state': 'down'}]."
|
||||
" {'vrf': 'BLAH', 'instance': '777', 'neighbor': '8.8.8.8', 'state': 'down'}].",
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -134,7 +133,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"vrfs": {},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["no OSPF neighbor found"]},
|
||||
|
@ -167,9 +166,9 @@ DATA: list[dict[str, Any]] = [
|
|||
"inactivity": 1683298014.844345,
|
||||
"interfaceAddress": "10.3.0.1",
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
"BLAH": {
|
||||
"instList": {
|
||||
|
@ -183,13 +182,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"adjacencyState": "full",
|
||||
"inactivity": 1683298014.844345,
|
||||
"interfaceAddress": "10.3.0.1",
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"number": 3},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -213,12 +212,12 @@ DATA: list[dict[str, Any]] = [
|
|||
"inactivity": 1683298014.844345,
|
||||
"interfaceAddress": "10.3.0.1",
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"number": 3},
|
||||
"expected": {"result": "failure", "messages": ["device has 1 neighbors (expected 3)"]},
|
||||
|
@ -251,9 +250,9 @@ DATA: list[dict[str, Any]] = [
|
|||
"inactivity": 1683298014.844345,
|
||||
"interfaceAddress": "10.3.0.1",
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
"BLAH": {
|
||||
"instList": {
|
||||
|
@ -267,20 +266,20 @@ DATA: list[dict[str, Any]] = [
|
|||
"adjacencyState": "down",
|
||||
"inactivity": 1683298014.844345,
|
||||
"interfaceAddress": "10.3.0.1",
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"number": 3},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"Some neighbors are not correctly configured: [{'vrf': 'default', 'instance': '666', 'neighbor': '7.7.7.7', 'state': '2-way'},"
|
||||
" {'vrf': 'BLAH', 'instance': '777', 'neighbor': '8.8.8.8', 'state': 'down'}]."
|
||||
" {'vrf': 'BLAH', 'instance': '777', 'neighbor': '8.8.8.8', 'state': 'down'}].",
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -290,9 +289,123 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"vrfs": {},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"number": 3},
|
||||
"expected": {"result": "skipped", "messages": ["no OSPF neighbor found"]},
|
||||
},
|
||||
{
|
||||
"name": "success",
|
||||
"test": VerifyOSPFMaxLSA,
|
||||
"eos_data": [
|
||||
{
|
||||
"vrfs": {
|
||||
"default": {
|
||||
"instList": {
|
||||
"1": {
|
||||
"instanceId": 1,
|
||||
"maxLsaInformation": {
|
||||
"maxLsa": 12000,
|
||||
"maxLsaThreshold": 75,
|
||||
},
|
||||
"routerId": "1.1.1.1",
|
||||
"lsaInformation": {
|
||||
"lsaArrivalInterval": 1000,
|
||||
"lsaStartInterval": 1000,
|
||||
"lsaHoldInterval": 5000,
|
||||
"lsaMaxWaitInterval": 5000,
|
||||
"numLsa": 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"TEST": {
|
||||
"instList": {
|
||||
"10": {
|
||||
"instanceId": 10,
|
||||
"maxLsaInformation": {
|
||||
"maxLsa": 1000,
|
||||
"maxLsaThreshold": 75,
|
||||
},
|
||||
"routerId": "20.20.20.20",
|
||||
"lsaInformation": {
|
||||
"lsaArrivalInterval": 1000,
|
||||
"lsaStartInterval": 1000,
|
||||
"lsaHoldInterval": 5000,
|
||||
"lsaMaxWaitInterval": 5000,
|
||||
"numLsa": 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "failure",
|
||||
"test": VerifyOSPFMaxLSA,
|
||||
"eos_data": [
|
||||
{
|
||||
"vrfs": {
|
||||
"default": {
|
||||
"instList": {
|
||||
"1": {
|
||||
"instanceId": 1,
|
||||
"maxLsaInformation": {
|
||||
"maxLsa": 12000,
|
||||
"maxLsaThreshold": 75,
|
||||
},
|
||||
"routerId": "1.1.1.1",
|
||||
"lsaInformation": {
|
||||
"lsaArrivalInterval": 1000,
|
||||
"lsaStartInterval": 1000,
|
||||
"lsaHoldInterval": 5000,
|
||||
"lsaMaxWaitInterval": 5000,
|
||||
"numLsa": 11500,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"TEST": {
|
||||
"instList": {
|
||||
"10": {
|
||||
"instanceId": 10,
|
||||
"maxLsaInformation": {
|
||||
"maxLsa": 1000,
|
||||
"maxLsaThreshold": 75,
|
||||
},
|
||||
"routerId": "20.20.20.20",
|
||||
"lsaInformation": {
|
||||
"lsaArrivalInterval": 1000,
|
||||
"lsaStartInterval": 1000,
|
||||
"lsaHoldInterval": 5000,
|
||||
"lsaMaxWaitInterval": 5000,
|
||||
"numLsa": 1500,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": ["OSPF Instances ['1', '10'] crossed the maximum LSA threshold."],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "skipped",
|
||||
"test": VerifyOSPFMaxLSA,
|
||||
"eos_data": [
|
||||
{
|
||||
"vrfs": {},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["No OSPF instance found."]},
|
||||
},
|
||||
]
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.aaa.py
|
||||
"""
|
||||
"""Tests for anta.tests.aaa.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -28,11 +27,11 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [
|
||||
{
|
||||
"serverInfo": {"hostname": "10.22.10.91", "authport": 49, "vrf": "MGMT"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"groups": {"GROUP1": {"serverGroup": "TACACS+", "members": [{"hostname": "SERVER1", "authport": 49, "vrf": "MGMT"}]}},
|
||||
"srcIntf": {"MGMT": "Management0"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"intf": "Management0", "vrf": "MGMT"},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -45,7 +44,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [],
|
||||
"groups": {},
|
||||
"srcIntf": {},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"intf": "Management0", "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["Source-interface Management0 is not configured in VRF MGMT"]},
|
||||
|
@ -58,11 +57,11 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [
|
||||
{
|
||||
"serverInfo": {"hostname": "10.22.10.91", "authport": 49, "vrf": "MGMT"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"groups": {"GROUP1": {"serverGroup": "TACACS+", "members": [{"hostname": "SERVER1", "authport": 49, "vrf": "MGMT"}]}},
|
||||
"srcIntf": {"MGMT": "Management1"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"intf": "Management0", "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["Wrong source-interface configured in VRF MGMT"]},
|
||||
|
@ -75,11 +74,11 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [
|
||||
{
|
||||
"serverInfo": {"hostname": "10.22.10.91", "authport": 49, "vrf": "MGMT"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"groups": {"GROUP1": {"serverGroup": "TACACS+", "members": [{"hostname": "SERVER1", "authport": 49, "vrf": "MGMT"}]}},
|
||||
"srcIntf": {"PROD": "Management0"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"intf": "Management0", "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["Source-interface Management0 is not configured in VRF MGMT"]},
|
||||
|
@ -92,11 +91,11 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [
|
||||
{
|
||||
"serverInfo": {"hostname": "10.22.10.91", "authport": 49, "vrf": "MGMT"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"groups": {"GROUP1": {"serverGroup": "TACACS+", "members": [{"hostname": "SERVER1", "authport": 49, "vrf": "MGMT"}]}},
|
||||
"srcIntf": {"MGMT": "Management0"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"servers": ["10.22.10.91"], "vrf": "MGMT"},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -109,7 +108,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [],
|
||||
"groups": {},
|
||||
"srcIntf": {},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"servers": ["10.22.10.91"], "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["No TACACS servers are configured"]},
|
||||
|
@ -122,11 +121,11 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [
|
||||
{
|
||||
"serverInfo": {"hostname": "10.22.10.91", "authport": 49, "vrf": "MGMT"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"groups": {"GROUP1": {"serverGroup": "TACACS+", "members": [{"hostname": "SERVER1", "authport": 49, "vrf": "MGMT"}]}},
|
||||
"srcIntf": {"MGMT": "Management0"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"servers": ["10.22.10.91", "10.22.10.92"], "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["TACACS servers ['10.22.10.92'] are not configured in VRF MGMT"]},
|
||||
|
@ -139,11 +138,11 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [
|
||||
{
|
||||
"serverInfo": {"hostname": "10.22.10.91", "authport": 49, "vrf": "PROD"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"groups": {"GROUP1": {"serverGroup": "TACACS+", "members": [{"hostname": "SERVER1", "authport": 49, "vrf": "MGMT"}]}},
|
||||
"srcIntf": {"MGMT": "Management0"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"servers": ["10.22.10.91"], "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["TACACS servers ['10.22.10.91'] are not configured in VRF MGMT"]},
|
||||
|
@ -156,11 +155,11 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [
|
||||
{
|
||||
"serverInfo": {"hostname": "10.22.10.91", "authport": 49, "vrf": "MGMT"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"groups": {"GROUP1": {"serverGroup": "TACACS+", "members": [{"hostname": "SERVER1", "authport": 49, "vrf": "MGMT"}]}},
|
||||
"srcIntf": {"MGMT": "Management0"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"groups": ["GROUP1"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -173,7 +172,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [],
|
||||
"groups": {},
|
||||
"srcIntf": {},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"groups": ["GROUP1"]},
|
||||
"expected": {"result": "failure", "messages": ["No TACACS server group(s) are configured"]},
|
||||
|
@ -186,11 +185,11 @@ DATA: list[dict[str, Any]] = [
|
|||
"tacacsServers": [
|
||||
{
|
||||
"serverInfo": {"hostname": "10.22.10.91", "authport": 49, "vrf": "MGMT"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"groups": {"GROUP2": {"serverGroup": "TACACS+", "members": [{"hostname": "SERVER1", "authport": 49, "vrf": "MGMT"}]}},
|
||||
"srcIntf": {"MGMT": "Management0"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"groups": ["GROUP1"]},
|
||||
"expected": {"result": "failure", "messages": ["TACACS server group(s) ['GROUP1'] are not configured"]},
|
||||
|
@ -203,7 +202,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"loginAuthenMethods": {"default": {"methods": ["group tacacs+", "local"]}, "login": {"methods": ["group tacacs+", "local"]}},
|
||||
"enableAuthenMethods": {"default": {"methods": ["group tacacs+", "local"]}},
|
||||
"dot1xAuthenMethods": {"default": {"methods": ["group radius"]}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "local"], "types": ["login", "enable"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -216,7 +215,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"loginAuthenMethods": {"default": {"methods": ["group tacacs+", "local"]}, "login": {"methods": ["group tacacs+", "local"]}},
|
||||
"enableAuthenMethods": {"default": {"methods": ["group tacacs+", "local"]}},
|
||||
"dot1xAuthenMethods": {"default": {"methods": ["group radius"]}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["radius"], "types": ["dot1x"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -229,7 +228,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"loginAuthenMethods": {"default": {"methods": ["group tacacs+", "local"]}},
|
||||
"enableAuthenMethods": {"default": {"methods": ["group tacacs+", "local"]}},
|
||||
"dot1xAuthenMethods": {"default": {"methods": ["group radius"]}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "local"], "types": ["login", "enable"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA authentication methods are not configured for login console"]},
|
||||
|
@ -242,7 +241,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"loginAuthenMethods": {"default": {"methods": ["group tacacs+", "local"]}, "login": {"methods": ["group radius", "local"]}},
|
||||
"enableAuthenMethods": {"default": {"methods": ["group tacacs+", "local"]}},
|
||||
"dot1xAuthenMethods": {"default": {"methods": ["group radius"]}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "local"], "types": ["login", "enable"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA authentication methods ['group tacacs+', 'local'] are not matching for login console"]},
|
||||
|
@ -255,7 +254,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"loginAuthenMethods": {"default": {"methods": ["group radius", "local"]}, "login": {"methods": ["group tacacs+", "local"]}},
|
||||
"enableAuthenMethods": {"default": {"methods": ["group tacacs+", "local"]}},
|
||||
"dot1xAuthenMethods": {"default": {"methods": ["group radius"]}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "local"], "types": ["login", "enable"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA authentication methods ['group tacacs+', 'local'] are not matching for ['login']"]},
|
||||
|
@ -267,7 +266,7 @@ DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"commandsAuthzMethods": {"privilege0-15": {"methods": ["group tacacs+", "local"]}},
|
||||
"execAuthzMethods": {"exec": {"methods": ["group tacacs+", "local"]}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "local"], "types": ["commands", "exec"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -279,7 +278,7 @@ DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"commandsAuthzMethods": {"privilege0-15": {"methods": ["group radius", "local"]}},
|
||||
"execAuthzMethods": {"exec": {"methods": ["group tacacs+", "local"]}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "local"], "types": ["commands", "exec"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA authorization methods ['group tacacs+', 'local'] are not matching for ['commands']"]},
|
||||
|
@ -291,7 +290,7 @@ DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"commandsAuthzMethods": {"privilege0-15": {"methods": ["group tacacs+", "local"]}},
|
||||
"execAuthzMethods": {"exec": {"methods": ["group radius", "local"]}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "local"], "types": ["commands", "exec"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA authorization methods ['group tacacs+', 'local'] are not matching for ['exec']"]},
|
||||
|
@ -305,7 +304,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"execAcctMethods": {"exec": {"defaultAction": "startStop", "defaultMethods": ["group tacacs+", "logging"], "consoleMethods": []}},
|
||||
"systemAcctMethods": {"system": {"defaultAction": "startStop", "defaultMethods": ["group tacacs+", "logging"], "consoleMethods": []}},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultMethods": [], "consoleMethods": []}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "logging"], "types": ["commands", "exec", "system"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -319,7 +318,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"execAcctMethods": {"exec": {"defaultAction": "startStop", "defaultMethods": ["group tacacs+", "logging"], "consoleMethods": []}},
|
||||
"systemAcctMethods": {"system": {"defaultAction": "startStop", "defaultMethods": ["group tacacs+", "logging"], "consoleMethods": []}},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultAction": "startStop", "defaultMethods": ["group radius", "logging"], "consoleMethods": []}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["radius", "logging"], "types": ["dot1x"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -333,7 +332,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"execAcctMethods": {"exec": {"defaultAction": "startStop", "defaultMethods": ["group tacacs+", "logging"], "consoleMethods": []}},
|
||||
"systemAcctMethods": {"system": {"defaultAction": "startStop", "defaultMethods": ["group tacacs+", "logging"], "consoleMethods": []}},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultMethods": [], "consoleMethods": []}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "logging"], "types": ["commands", "exec", "system"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA default accounting is not configured for ['commands']"]},
|
||||
|
@ -347,7 +346,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"execAcctMethods": {"exec": {"defaultMethods": [], "consoleMethods": []}},
|
||||
"commandsAcctMethods": {"privilege0-15": {"defaultMethods": [], "consoleMethods": []}},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultMethods": [], "consoleMethods": []}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "logging"], "types": ["commands", "exec", "system"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA default accounting is not configured for ['system', 'exec', 'commands']"]},
|
||||
|
@ -361,7 +360,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"execAcctMethods": {"exec": {"defaultAction": "startStop", "defaultMethods": ["group tacacs+", "logging"], "consoleMethods": []}},
|
||||
"systemAcctMethods": {"system": {"defaultAction": "startStop", "defaultMethods": ["group tacacs+", "logging"], "consoleMethods": []}},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultMethods": [], "consoleMethods": []}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "logging"], "types": ["commands", "exec", "system"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA accounting default methods ['group tacacs+', 'logging'] are not matching for ['commands']"]},
|
||||
|
@ -376,24 +375,24 @@ DATA: list[dict[str, Any]] = [
|
|||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"execAcctMethods": {
|
||||
"exec": {
|
||||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"systemAcctMethods": {
|
||||
"system": {
|
||||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultMethods": [], "consoleMethods": []}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "logging"], "types": ["commands", "exec", "system"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -408,30 +407,30 @@ DATA: list[dict[str, Any]] = [
|
|||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"execAcctMethods": {
|
||||
"exec": {
|
||||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"systemAcctMethods": {
|
||||
"system": {
|
||||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"dot1xAcctMethods": {
|
||||
"dot1x": {
|
||||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "logging"], "types": ["dot1x"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -445,24 +444,24 @@ DATA: list[dict[str, Any]] = [
|
|||
"privilege0-15": {
|
||||
"defaultMethods": [],
|
||||
"consoleMethods": [],
|
||||
}
|
||||
},
|
||||
},
|
||||
"execAcctMethods": {
|
||||
"exec": {
|
||||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"systemAcctMethods": {
|
||||
"system": {
|
||||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultMethods": [], "consoleMethods": []}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "logging"], "types": ["commands", "exec", "system"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA console accounting is not configured for ['commands']"]},
|
||||
|
@ -476,7 +475,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"execAcctMethods": {"exec": {"defaultMethods": [], "consoleMethods": []}},
|
||||
"commandsAcctMethods": {"privilege0-15": {"defaultMethods": [], "consoleMethods": []}},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultMethods": [], "consoleMethods": []}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "logging"], "types": ["commands", "exec", "system"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA console accounting is not configured for ['system', 'exec', 'commands']"]},
|
||||
|
@ -491,24 +490,24 @@ DATA: list[dict[str, Any]] = [
|
|||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group radius", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"execAcctMethods": {
|
||||
"exec": {
|
||||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"systemAcctMethods": {
|
||||
"system": {
|
||||
"defaultMethods": [],
|
||||
"consoleAction": "startStop",
|
||||
"consoleMethods": ["group tacacs+", "logging"],
|
||||
}
|
||||
},
|
||||
},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultMethods": [], "consoleMethods": []}},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"methods": ["tacacs+", "logging"], "types": ["commands", "exec", "system"]},
|
||||
"expected": {"result": "failure", "messages": ["AAA accounting console methods ['group tacacs+', 'logging'] are not matching for ['commands']"]},
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.bfd.py
|
||||
"""
|
||||
"""Tests for anta.tests.bfd.py."""
|
||||
|
||||
# pylint: disable=C0302
|
||||
from __future__ import annotations
|
||||
|
||||
|
@ -11,7 +10,7 @@ from typing import Any
|
|||
|
||||
# pylint: disable=C0413
|
||||
# because of the patch above
|
||||
from anta.tests.bfd import VerifyBFDPeersHealth, VerifyBFDPeersIntervals, VerifyBFDSpecificPeers # noqa: E402
|
||||
from anta.tests.bfd import VerifyBFDPeersHealth, VerifyBFDPeersIntervals, VerifyBFDSpecificPeers
|
||||
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
|
||||
|
||||
DATA: list[dict[str, Any]] = [
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Data for testing anta.tests.configuration"""
|
||||
"""Data for testing anta.tests.configuration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.connectivity.py
|
||||
"""
|
||||
"""Tests for anta.tests.connectivity.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -27,8 +26,8 @@ DATA: list[dict[str, Any]] = [
|
|||
2 packets transmitted, 2 received, 0% packet loss, time 0ms
|
||||
rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms
|
||||
|
||||
"""
|
||||
]
|
||||
""",
|
||||
],
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
|
@ -40,8 +39,8 @@ DATA: list[dict[str, Any]] = [
|
|||
2 packets transmitted, 2 received, 0% packet loss, time 0ms
|
||||
rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms
|
||||
|
||||
"""
|
||||
]
|
||||
""",
|
||||
],
|
||||
},
|
||||
],
|
||||
"expected": {"result": "success"},
|
||||
|
@ -61,8 +60,8 @@ DATA: list[dict[str, Any]] = [
|
|||
2 packets transmitted, 2 received, 0% packet loss, time 0ms
|
||||
rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms
|
||||
|
||||
"""
|
||||
]
|
||||
""",
|
||||
],
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
|
@ -74,8 +73,8 @@ DATA: list[dict[str, Any]] = [
|
|||
2 packets transmitted, 2 received, 0% packet loss, time 0ms
|
||||
rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms
|
||||
|
||||
"""
|
||||
]
|
||||
""",
|
||||
],
|
||||
},
|
||||
],
|
||||
"expected": {"result": "success"},
|
||||
|
@ -94,8 +93,8 @@ DATA: list[dict[str, Any]] = [
|
|||
1 packets transmitted, 1 received, 0% packet loss, time 0ms
|
||||
rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms
|
||||
|
||||
"""
|
||||
]
|
||||
""",
|
||||
],
|
||||
},
|
||||
],
|
||||
"expected": {"result": "success"},
|
||||
|
@ -115,8 +114,8 @@ DATA: list[dict[str, Any]] = [
|
|||
2 packets transmitted, 0 received, 100% packet loss, time 10ms
|
||||
|
||||
|
||||
"""
|
||||
]
|
||||
""",
|
||||
],
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
|
@ -128,8 +127,8 @@ DATA: list[dict[str, Any]] = [
|
|||
2 packets transmitted, 2 received, 0% packet loss, time 0ms
|
||||
rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms
|
||||
|
||||
"""
|
||||
]
|
||||
""",
|
||||
],
|
||||
},
|
||||
],
|
||||
"expected": {"result": "failure", "messages": ["Connectivity test failed for the following source-destination pairs: [('10.0.0.5', '10.0.0.11')]"]},
|
||||
|
@ -149,8 +148,8 @@ DATA: list[dict[str, Any]] = [
|
|||
2 packets transmitted, 0 received, 100% packet loss, time 10ms
|
||||
|
||||
|
||||
"""
|
||||
]
|
||||
""",
|
||||
],
|
||||
},
|
||||
{
|
||||
"messages": [
|
||||
|
@ -162,8 +161,8 @@ DATA: list[dict[str, Any]] = [
|
|||
2 packets transmitted, 2 received, 0% packet loss, time 0ms
|
||||
rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms
|
||||
|
||||
"""
|
||||
]
|
||||
""",
|
||||
],
|
||||
},
|
||||
],
|
||||
"expected": {"result": "failure", "messages": ["Connectivity test failed for the following source-destination pairs: [('Management0', '10.0.0.11')]"]},
|
||||
|
@ -175,7 +174,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"neighbors": [
|
||||
{"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"},
|
||||
{"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"},
|
||||
]
|
||||
],
|
||||
},
|
||||
"eos_data": [
|
||||
{
|
||||
|
@ -192,8 +191,8 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaceId_v2": "Ethernet1",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet1",
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
"Ethernet2": {
|
||||
"lldpNeighborInfo": [
|
||||
|
@ -207,11 +206,53 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaceId_v2": "Ethernet1",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet2",
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "success-multiple-neighbors",
|
||||
"test": VerifyLLDPNeighbors,
|
||||
"inputs": {
|
||||
"neighbors": [
|
||||
{"port": "Ethernet1", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"},
|
||||
],
|
||||
},
|
||||
"eos_data": [
|
||||
{
|
||||
"lldpNeighbors": {
|
||||
"Ethernet1": {
|
||||
"lldpNeighborInfo": [
|
||||
{
|
||||
"chassisIdType": "macAddress",
|
||||
"chassisId": "001c.73a0.fc18",
|
||||
"systemName": "DC1-SPINE1",
|
||||
"neighborInterfaceInfo": {
|
||||
"interfaceIdType": "interfaceName",
|
||||
"interfaceId": '"Ethernet1"',
|
||||
"interfaceId_v2": "Ethernet1",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet1",
|
||||
},
|
||||
},
|
||||
{
|
||||
"chassisIdType": "macAddress",
|
||||
"chassisId": "001c.73f7.d138",
|
||||
"systemName": "DC1-SPINE2",
|
||||
"neighborInterfaceInfo": {
|
||||
"interfaceIdType": "interfaceName",
|
||||
"interfaceId": '"Ethernet1"',
|
||||
"interfaceId_v2": "Ethernet1",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet2",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
|
@ -222,7 +263,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"neighbors": [
|
||||
{"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"},
|
||||
{"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"},
|
||||
]
|
||||
],
|
||||
},
|
||||
"eos_data": [
|
||||
{
|
||||
|
@ -239,13 +280,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaceId_v2": "Ethernet1",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet1",
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"expected": {"result": "failure", "messages": ["The following port(s) have issues: {'port_not_configured': ['Ethernet2']}"]},
|
||||
"expected": {"result": "failure", "messages": ["Port(s) not configured:\n Ethernet2"]},
|
||||
},
|
||||
{
|
||||
"name": "failure-no-neighbor",
|
||||
|
@ -254,7 +295,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"neighbors": [
|
||||
{"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"},
|
||||
{"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"},
|
||||
]
|
||||
],
|
||||
},
|
||||
"eos_data": [
|
||||
{
|
||||
|
@ -271,14 +312,14 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaceId_v2": "Ethernet1",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet1",
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
"Ethernet2": {"lldpNeighborInfo": []},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"expected": {"result": "failure", "messages": ["The following port(s) have issues: {'no_lldp_neighbor': ['Ethernet2']}"]},
|
||||
"expected": {"result": "failure", "messages": ["No LLDP neighbor(s) on port(s):\n Ethernet2"]},
|
||||
},
|
||||
{
|
||||
"name": "failure-wrong-neighbor",
|
||||
|
@ -287,7 +328,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"neighbors": [
|
||||
{"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"},
|
||||
{"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"},
|
||||
]
|
||||
],
|
||||
},
|
||||
"eos_data": [
|
||||
{
|
||||
|
@ -304,8 +345,8 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaceId_v2": "Ethernet1",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet1",
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
"Ethernet2": {
|
||||
"lldpNeighborInfo": [
|
||||
|
@ -319,13 +360,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaceId_v2": "Ethernet2",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet2",
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"expected": {"result": "failure", "messages": ["The following port(s) have issues: {'wrong_lldp_neighbor': ['Ethernet2']}"]},
|
||||
"expected": {"result": "failure", "messages": ["Wrong LLDP neighbor(s) on port(s):\n Ethernet2\n DC1-SPINE2_Ethernet2"]},
|
||||
},
|
||||
{
|
||||
"name": "failure-multiple",
|
||||
|
@ -335,7 +376,7 @@ DATA: list[dict[str, Any]] = [
|
|||
{"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"},
|
||||
{"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"},
|
||||
{"port": "Ethernet3", "neighbor_device": "DC1-SPINE3", "neighbor_port": "Ethernet1"},
|
||||
]
|
||||
],
|
||||
},
|
||||
"eos_data": [
|
||||
{
|
||||
|
@ -352,18 +393,62 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaceId_v2": "Ethernet2",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet1",
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
"Ethernet2": {"lldpNeighborInfo": []},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"The following port(s) have issues: {'wrong_lldp_neighbor': ['Ethernet1'], 'no_lldp_neighbor': ['Ethernet2'], 'port_not_configured': ['Ethernet3']}"
|
||||
"Wrong LLDP neighbor(s) on port(s):\n Ethernet1\n DC1-SPINE1_Ethernet2\n"
|
||||
"No LLDP neighbor(s) on port(s):\n Ethernet2\n"
|
||||
"Port(s) not configured:\n Ethernet3"
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "failure-multiple-neighbors",
|
||||
"test": VerifyLLDPNeighbors,
|
||||
"inputs": {
|
||||
"neighbors": [
|
||||
{"port": "Ethernet1", "neighbor_device": "DC1-SPINE3", "neighbor_port": "Ethernet1"},
|
||||
],
|
||||
},
|
||||
"eos_data": [
|
||||
{
|
||||
"lldpNeighbors": {
|
||||
"Ethernet1": {
|
||||
"lldpNeighborInfo": [
|
||||
{
|
||||
"chassisIdType": "macAddress",
|
||||
"chassisId": "001c.73a0.fc18",
|
||||
"systemName": "DC1-SPINE1",
|
||||
"neighborInterfaceInfo": {
|
||||
"interfaceIdType": "interfaceName",
|
||||
"interfaceId": '"Ethernet1"',
|
||||
"interfaceId_v2": "Ethernet1",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet1",
|
||||
},
|
||||
},
|
||||
{
|
||||
"chassisIdType": "macAddress",
|
||||
"chassisId": "001c.73f7.d138",
|
||||
"systemName": "DC1-SPINE2",
|
||||
"neighborInterfaceInfo": {
|
||||
"interfaceIdType": "interfaceName",
|
||||
"interfaceId": '"Ethernet1"',
|
||||
"interfaceId_v2": "Ethernet1",
|
||||
"interfaceDescription": "P2P_LINK_TO_DC1-LEAF1A_Ethernet2",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"expected": {"result": "failure", "messages": ["Wrong LLDP neighbor(s) on port(s):\n Ethernet1\n DC1-SPINE1_Ethernet1\n DC1-SPINE2_Ethernet1"]},
|
||||
},
|
||||
]
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test inputs for anta.tests.field_notices"""
|
||||
"""Test inputs for anta.tests.field_notices."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -22,7 +23,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "Aboot", "version": "Aboot-veos-8.0.0-3255441"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -39,10 +40,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "Aboot", "version": "Aboot-veos-4.0.1-3255441"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["device is running incorrect version of aboot (4.0.1)"]},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": ["device is running incorrect version of aboot (4.0.1)"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "failure-4.1",
|
||||
|
@ -56,10 +60,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "Aboot", "version": "Aboot-veos-4.1.0-3255441"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["device is running incorrect version of aboot (4.1.0)"]},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": ["device is running incorrect version of aboot (4.1.0)"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "failure-6.0",
|
||||
|
@ -73,10 +80,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "Aboot", "version": "Aboot-veos-6.0.1-3255441"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["device is running incorrect version of aboot (6.0.1)"]},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": ["device is running incorrect version of aboot (6.0.1)"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "failure-6.1",
|
||||
|
@ -90,10 +100,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "Aboot", "version": "Aboot-veos-6.1.1-3255441"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["device is running incorrect version of aboot (6.1.1)"]},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": ["device is running incorrect version of aboot (6.1.1)"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "skipped-model",
|
||||
|
@ -107,10 +120,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "Aboot", "version": "Aboot-veos-8.0.0-3255441"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["device is not impacted by FN044"]},
|
||||
"expected": {
|
||||
"result": "skipped",
|
||||
"messages": ["device is not impacted by FN044"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "success-JPE",
|
||||
|
@ -123,7 +139,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "7"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success", "messages": ["FN72 is mitigated"]},
|
||||
|
@ -139,7 +155,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "7"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success", "messages": ["FN72 is mitigated"]},
|
||||
|
@ -155,7 +171,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "7"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success", "messages": ["FN72 is mitigated"]},
|
||||
|
@ -171,7 +187,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "7"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success", "messages": ["FN72 is mitigated"]},
|
||||
|
@ -187,7 +203,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "7"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["Device not exposed"]},
|
||||
|
@ -203,10 +219,13 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "5"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["Platform is not impacted by FN072"]},
|
||||
"expected": {
|
||||
"result": "skipped",
|
||||
"messages": ["Platform is not impacted by FN072"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "skipped-range-JPE",
|
||||
|
@ -219,7 +238,39 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "5"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["Device not exposed"]},
|
||||
},
|
||||
{
|
||||
"name": "skipped-range-K-JPE",
|
||||
"test": VerifyFieldNotice72Resolution,
|
||||
"eos_data": [
|
||||
{
|
||||
"modelName": "DCS-7280SR3K-48YC8",
|
||||
"serialNumber": "JPE2134000",
|
||||
"details": {
|
||||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "5"}],
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["Device not exposed"]},
|
||||
},
|
||||
{
|
||||
"name": "skipped-range-JAS",
|
||||
"test": VerifyFieldNotice72Resolution,
|
||||
"eos_data": [
|
||||
{
|
||||
"modelName": "DCS-7280SR3-48YC8",
|
||||
"serialNumber": "JAS2041000",
|
||||
"details": {
|
||||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "5"}],
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["Device not exposed"]},
|
||||
|
@ -235,7 +286,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "5"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["Device not exposed"]},
|
||||
|
@ -251,7 +302,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "5"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["Device is exposed to FN72"]},
|
||||
|
@ -267,7 +318,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm1", "version": "5"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["Device is exposed to FN72"]},
|
||||
|
@ -283,9 +334,12 @@ DATA: list[dict[str, Any]] = [
|
|||
"deviations": [],
|
||||
"components": [{"name": "FixedSystemvrm2", "version": "5"}],
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "error", "messages": ["Error in running test - FixedSystemvrm1 not found"]},
|
||||
"expected": {
|
||||
"result": "error",
|
||||
"messages": ["Error in running test - FixedSystemvrm1 not found"],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Data for testing anta.tests.configuration"""
|
||||
"""Data for testing anta.tests.configuration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from anta.tests.greent import VerifyGreenT, VerifyGreenTCounters
|
||||
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
|
||||
|
||||
DATA: list[dict[str, Any]] = [
|
||||
{
|
||||
|
@ -21,12 +23,19 @@ DATA: list[dict[str, Any]] = [
|
|||
"test": VerifyGreenTCounters,
|
||||
"eos_data": [{"sampleRcvd": 0, "sampleDiscarded": 0, "multiDstSampleRcvd": 0, "grePktSent": 0, "sampleSent": 0}],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure"},
|
||||
"expected": {"result": "failure", "messages": ["GreenT counters are not incremented"]},
|
||||
},
|
||||
{
|
||||
"name": "success",
|
||||
"test": VerifyGreenT,
|
||||
"eos_data": [{"sampleRcvd": 0, "sampleDiscarded": 0, "multiDstSampleRcvd": 0, "grePktSent": 1, "sampleSent": 0}],
|
||||
"eos_data": [
|
||||
{
|
||||
"profiles": {
|
||||
"default": {"interfaces": [], "appliedInterfaces": [], "samplePolicy": "default", "failures": {}, "appliedInterfaces6": [], "failures6": {}},
|
||||
"testProfile": {"interfaces": [], "appliedInterfaces": [], "samplePolicy": "default", "failures": {}, "appliedInterfaces6": [], "failures6": {}},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
|
@ -37,11 +46,10 @@ DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"profiles": {
|
||||
"default": {"interfaces": [], "appliedInterfaces": [], "samplePolicy": "default", "failures": {}, "appliedInterfaces6": [], "failures6": {}},
|
||||
"testProfile": {"interfaces": [], "appliedInterfaces": [], "samplePolicy": "default", "failures": {}, "appliedInterfaces6": [], "failures6": {}},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure"},
|
||||
"expected": {"result": "failure", "messages": ["No GreenT policy is created"]},
|
||||
},
|
||||
]
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test inputs for anta.tests.hardware"""
|
||||
"""Test inputs for anta.tests.hardware."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -26,8 +27,8 @@ DATA: list[dict[str, Any]] = [
|
|||
"xcvrSlots": {
|
||||
"1": {"mfgName": "Arista Networks", "modelName": "QSFP-100G-DR", "serialNum": "XKT203501340", "hardwareRev": "21"},
|
||||
"2": {"mfgName": "Arista Networks", "modelName": "QSFP-100G-DR", "serialNum": "XKT203501337", "hardwareRev": "21"},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"manufacturers": ["Arista Networks"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -40,8 +41,8 @@ DATA: list[dict[str, Any]] = [
|
|||
"xcvrSlots": {
|
||||
"1": {"mfgName": "Arista Networks", "modelName": "QSFP-100G-DR", "serialNum": "XKT203501340", "hardwareRev": "21"},
|
||||
"2": {"mfgName": "Arista Networks", "modelName": "QSFP-100G-DR", "serialNum": "XKT203501337", "hardwareRev": "21"},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"manufacturers": ["Arista"]},
|
||||
"expected": {"result": "failure", "messages": ["Some transceivers are from unapproved manufacturers: {'1': 'Arista Networks', '2': 'Arista Networks'}"]},
|
||||
|
@ -57,7 +58,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"shutdownOnOverheat": "True",
|
||||
"systemStatus": "temperatureOk",
|
||||
"recoveryModeOnOverheat": "recoveryModeNA",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -73,7 +74,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"shutdownOnOverheat": "True",
|
||||
"systemStatus": "temperatureKO",
|
||||
"recoveryModeOnOverheat": "recoveryModeNA",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["Device temperature exceeds acceptable limits. Current system status: 'temperatureKO'"]},
|
||||
|
@ -100,10 +101,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"pidDriverCount": 0,
|
||||
"isPidDriver": False,
|
||||
"name": "DomTemperatureSensor54",
|
||||
}
|
||||
},
|
||||
],
|
||||
"cardSlots": [],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -130,10 +131,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"pidDriverCount": 0,
|
||||
"isPidDriver": False,
|
||||
"name": "DomTemperatureSensor54",
|
||||
}
|
||||
},
|
||||
],
|
||||
"cardSlots": [],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
|
@ -141,7 +142,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"messages": [
|
||||
"The following sensors are operating outside the acceptable temperature range or have raised alerts: "
|
||||
"{'DomTemperatureSensor54': "
|
||||
"{'hwStatus': 'ko', 'alertCount': 0}}"
|
||||
"{'hwStatus': 'ko', 'alertCount': 0}}",
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -167,10 +168,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"pidDriverCount": 0,
|
||||
"isPidDriver": False,
|
||||
"name": "DomTemperatureSensor54",
|
||||
}
|
||||
},
|
||||
],
|
||||
"cardSlots": [],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
|
@ -178,7 +179,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"messages": [
|
||||
"The following sensors are operating outside the acceptable temperature range or have raised alerts: "
|
||||
"{'DomTemperatureSensor54': "
|
||||
"{'hwStatus': 'ok', 'alertCount': 1}}"
|
||||
"{'hwStatus': 'ok', 'alertCount': 1}}",
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -200,7 +201,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"currentZones": 1,
|
||||
"configuredZones": 0,
|
||||
"systemStatus": "coolingOk",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -223,7 +224,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"currentZones": 1,
|
||||
"configuredZones": 0,
|
||||
"systemStatus": "coolingKo",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["Device system cooling is not OK: 'coolingKo'"]},
|
||||
|
@ -254,7 +255,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": True,
|
||||
"speedStable": True,
|
||||
"label": "PowerSupply1/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "PowerSupply1",
|
||||
|
@ -272,7 +273,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": True,
|
||||
"speedStable": True,
|
||||
"label": "PowerSupply2/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "PowerSupply2",
|
||||
|
@ -292,7 +293,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "1/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "1",
|
||||
|
@ -310,7 +311,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "2/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "2",
|
||||
|
@ -328,7 +329,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "3/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "3",
|
||||
|
@ -346,7 +347,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "4/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "4",
|
||||
|
@ -356,7 +357,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"currentZones": 1,
|
||||
"configuredZones": 0,
|
||||
"systemStatus": "coolingOk",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"states": ["ok"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -387,7 +388,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": True,
|
||||
"speedStable": True,
|
||||
"label": "PowerSupply1/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "PowerSupply1",
|
||||
|
@ -405,7 +406,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": True,
|
||||
"speedStable": True,
|
||||
"label": "PowerSupply2/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "PowerSupply2",
|
||||
|
@ -425,7 +426,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "1/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "1",
|
||||
|
@ -443,7 +444,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "2/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "2",
|
||||
|
@ -461,7 +462,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "3/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "3",
|
||||
|
@ -479,7 +480,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "4/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "4",
|
||||
|
@ -489,7 +490,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"currentZones": 1,
|
||||
"configuredZones": 0,
|
||||
"systemStatus": "coolingOk",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"states": ["ok", "Not Inserted"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -520,7 +521,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": True,
|
||||
"speedStable": True,
|
||||
"label": "PowerSupply1/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "PowerSupply1",
|
||||
|
@ -538,7 +539,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": True,
|
||||
"speedStable": True,
|
||||
"label": "PowerSupply2/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "PowerSupply2",
|
||||
|
@ -558,7 +559,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "1/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "1",
|
||||
|
@ -576,7 +577,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "2/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "2",
|
||||
|
@ -594,7 +595,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "3/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "3",
|
||||
|
@ -612,7 +613,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "4/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "4",
|
||||
|
@ -622,7 +623,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"currentZones": 1,
|
||||
"configuredZones": 0,
|
||||
"systemStatus": "CoolingKo",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"states": ["ok", "Not Inserted"]},
|
||||
"expected": {"result": "failure", "messages": ["Fan 1/1 on Fan Tray 1 is: 'down'"]},
|
||||
|
@ -653,7 +654,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": True,
|
||||
"speedStable": True,
|
||||
"label": "PowerSupply1/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "PowerSupply1",
|
||||
|
@ -671,7 +672,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": True,
|
||||
"speedStable": True,
|
||||
"label": "PowerSupply2/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "PowerSupply2",
|
||||
|
@ -691,7 +692,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "1/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "1",
|
||||
|
@ -709,7 +710,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "2/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "2",
|
||||
|
@ -727,7 +728,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "3/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "3",
|
||||
|
@ -745,7 +746,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"speedHwOverride": False,
|
||||
"speedStable": True,
|
||||
"label": "4/1",
|
||||
}
|
||||
},
|
||||
],
|
||||
"speed": 30,
|
||||
"label": "4",
|
||||
|
@ -755,7 +756,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"currentZones": 1,
|
||||
"configuredZones": 0,
|
||||
"systemStatus": "CoolingKo",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"states": ["ok", "Not Inserted"]},
|
||||
"expected": {"result": "failure", "messages": ["Fan PowerSupply1/1 on PowerSupply PowerSupply1 is: 'down'"]},
|
||||
|
@ -801,8 +802,8 @@ DATA: list[dict[str, Any]] = [
|
|||
"outputCurrent": 9.828125,
|
||||
"managed": True,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"states": ["ok"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -848,8 +849,8 @@ DATA: list[dict[str, Any]] = [
|
|||
"outputCurrent": 9.828125,
|
||||
"managed": True,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"states": ["ok", "Not Inserted"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -895,8 +896,8 @@ DATA: list[dict[str, Any]] = [
|
|||
"outputCurrent": 9.828125,
|
||||
"managed": True,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"states": ["ok"]},
|
||||
"expected": {"result": "failure", "messages": ["The following power supplies status are not in the accepted states list: {'1': {'state': 'powerLoss'}}"]},
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,7 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Data for testing anta.tests.configuration"""
|
||||
"""Data for testing anta.tests.lanz."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -15,7 +16,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"test": VerifyLANZ,
|
||||
"eos_data": [{"lanzEnabled": True}],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success", "messages": ["LANZ is enabled"]},
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "failure",
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Data for testing anta.tests.logging"""
|
||||
"""Data for testing anta.tests.logging."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -77,7 +78,7 @@ DATA: list[dict[str, Any]] = [
|
|||
Logging to '10.22.10.93' port 514 in VRF MGMT via tcp
|
||||
Logging to '10.22.10.94' port 911 in VRF MGMT via udp
|
||||
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": {"interface": "Management0", "vrf": "MGMT"},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -92,7 +93,7 @@ DATA: list[dict[str, Any]] = [
|
|||
Logging to '10.22.10.93' port 514 in VRF MGMT via tcp
|
||||
Logging to '10.22.10.94' port 911 in VRF MGMT via udp
|
||||
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": {"interface": "Management0", "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["Source-interface 'Management0' is not configured in VRF MGMT"]},
|
||||
|
@ -107,7 +108,7 @@ DATA: list[dict[str, Any]] = [
|
|||
Logging to '10.22.10.93' port 514 in VRF MGMT via tcp
|
||||
Logging to '10.22.10.94' port 911 in VRF MGMT via udp
|
||||
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": {"interface": "Management0", "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["Source-interface 'Management0' is not configured in VRF MGMT"]},
|
||||
|
@ -122,7 +123,7 @@ DATA: list[dict[str, Any]] = [
|
|||
Logging to '10.22.10.93' port 514 in VRF MGMT via tcp
|
||||
Logging to '10.22.10.94' port 911 in VRF MGMT via udp
|
||||
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": {"hosts": ["10.22.10.92", "10.22.10.93", "10.22.10.94"], "vrf": "MGMT"},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -137,7 +138,7 @@ DATA: list[dict[str, Any]] = [
|
|||
Logging to '10.22.10.103' port 514 in VRF MGMT via tcp
|
||||
Logging to '10.22.10.104' port 911 in VRF MGMT via udp
|
||||
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": {"hosts": ["10.22.10.92", "10.22.10.93", "10.22.10.94"], "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["Syslog servers ['10.22.10.93', '10.22.10.94'] are not configured in VRF MGMT"]},
|
||||
|
@ -152,7 +153,7 @@ DATA: list[dict[str, Any]] = [
|
|||
Logging to '10.22.10.93' port 514 in VRF default via tcp
|
||||
Logging to '10.22.10.94' port 911 in VRF default via udp
|
||||
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": {"hosts": ["10.22.10.92", "10.22.10.93", "10.22.10.94"], "vrf": "MGMT"},
|
||||
"expected": {"result": "failure", "messages": ["Syslog servers ['10.22.10.93', '10.22.10.94'] are not configured in VRF MGMT"]},
|
||||
|
@ -246,7 +247,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"name": "failure",
|
||||
"test": VerifyLoggingErrors,
|
||||
"eos_data": [
|
||||
"Aug 2 19:57:42 DC1-LEAF1A Mlag: %FWK-3-SOCKET_CLOSE_REMOTE: Connection to Mlag (pid:27200) at tbt://192.168.0.1:4432/+n closed by peer (EOF)"
|
||||
"Aug 2 19:57:42 DC1-LEAF1A Mlag: %FWK-3-SOCKET_CLOSE_REMOTE: Connection to Mlag (pid:27200) at tbt://192.168.0.1:4432/+n closed by peer (EOF)",
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["Device has reported syslog messages with a severity of ERRORS or higher"]},
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.mlag.py
|
||||
"""
|
||||
"""Tests for anta.tests.mlag.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -25,7 +24,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"state": "disabled",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["MLAG is disabled"]},
|
||||
|
@ -47,7 +46,7 @@ DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"state": "active",
|
||||
"mlagPorts": {"Disabled": 0, "Configured": 0, "Inactive": 0, "Active-partial": 0, "Active-full": 1},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -58,7 +57,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"state": "disabled",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["MLAG is disabled"]},
|
||||
|
@ -70,7 +69,7 @@ DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"state": "active",
|
||||
"mlagPorts": {"Disabled": 0, "Configured": 0, "Inactive": 0, "Active-partial": 1, "Active-full": 1},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
|
@ -85,7 +84,7 @@ DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"state": "active",
|
||||
"mlagPorts": {"Disabled": 0, "Configured": 0, "Inactive": 1, "Active-partial": 1, "Active-full": 1},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
|
@ -106,7 +105,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"mlagActive": False,
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["MLAG is disabled"]},
|
||||
|
@ -117,7 +116,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"dummy": False,
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "error", "messages": ["Incorrect JSON response - 'mlagActive' state was not found"]},
|
||||
|
@ -131,7 +130,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaceConfiguration": {},
|
||||
"mlagActive": True,
|
||||
"mlagConnected": True,
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
|
@ -140,7 +139,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"MLAG config-sanity returned inconsistencies: "
|
||||
"{'globalConfiguration': {'mlag': {'globalParameters': "
|
||||
"{'dual-primary-detection-delay': {'localValue': '0', 'peerValue': '200'}}}}, "
|
||||
"'interfaceConfiguration': {}}"
|
||||
"'interfaceConfiguration': {}}",
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -153,7 +152,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaceConfiguration": {"trunk-native-vlan mlag30": {"interface": {"Port-Channel30": {"localValue": "123", "peerValue": "3700"}}}},
|
||||
"mlagActive": True,
|
||||
"mlagConnected": True,
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
|
@ -162,7 +161,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"MLAG config-sanity returned inconsistencies: "
|
||||
"{'globalConfiguration': {}, "
|
||||
"'interfaceConfiguration': {'trunk-native-vlan mlag30': "
|
||||
"{'interface': {'Port-Channel30': {'localValue': '123', 'peerValue': '3700'}}}}}"
|
||||
"{'interface': {'Port-Channel30': {'localValue': '123', 'peerValue': '3700'}}}}}",
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -179,7 +178,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"state": "disabled",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"reload_delay": 300, "reload_delay_non_mlag": 330},
|
||||
"expected": {"result": "skipped", "messages": ["MLAG is disabled"]},
|
||||
|
@ -202,7 +201,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"dualPrimaryMlagRecoveryDelay": 60,
|
||||
"dualPrimaryNonMlagRecoveryDelay": 0,
|
||||
"detail": {"dualPrimaryDetectionDelay": 200, "dualPrimaryAction": "none"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"detection_delay": 200, "errdisabled": False, "recovery_delay": 60, "recovery_delay_non_mlag": 0},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -213,7 +212,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"state": "disabled",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"detection_delay": 200, "errdisabled": False, "recovery_delay": 60, "recovery_delay_non_mlag": 0},
|
||||
"expected": {"result": "skipped", "messages": ["MLAG is disabled"]},
|
||||
|
@ -226,7 +225,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"state": "active",
|
||||
"dualPrimaryDetectionState": "disabled",
|
||||
"dualPrimaryPortsErrdisabled": False,
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"detection_delay": 200, "errdisabled": False, "recovery_delay": 60, "recovery_delay_non_mlag": 0},
|
||||
"expected": {"result": "failure", "messages": ["Dual-primary detection is disabled"]},
|
||||
|
@ -242,7 +241,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"dualPrimaryMlagRecoveryDelay": 160,
|
||||
"dualPrimaryNonMlagRecoveryDelay": 0,
|
||||
"detail": {"dualPrimaryDetectionDelay": 300, "dualPrimaryAction": "none"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"detection_delay": 200, "errdisabled": False, "recovery_delay": 60, "recovery_delay_non_mlag": 0},
|
||||
"expected": {
|
||||
|
@ -254,7 +253,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"'detail.dualPrimaryAction': 'none', "
|
||||
"'dualPrimaryMlagRecoveryDelay': 160, "
|
||||
"'dualPrimaryNonMlagRecoveryDelay': 0}"
|
||||
)
|
||||
),
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -269,7 +268,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"dualPrimaryMlagRecoveryDelay": 60,
|
||||
"dualPrimaryNonMlagRecoveryDelay": 0,
|
||||
"detail": {"dualPrimaryDetectionDelay": 200, "dualPrimaryAction": "none"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"detection_delay": 200, "errdisabled": True, "recovery_delay": 60, "recovery_delay_non_mlag": 0},
|
||||
"expected": {
|
||||
|
@ -281,7 +280,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"'detail.dualPrimaryAction': 'none', "
|
||||
"'dualPrimaryMlagRecoveryDelay': 60, "
|
||||
"'dualPrimaryNonMlagRecoveryDelay': 0}"
|
||||
)
|
||||
),
|
||||
],
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test inputs for anta.tests.multicast"""
|
||||
"""Test inputs for anta.tests.multicast."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -44,7 +45,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"robustness": 2,
|
||||
"immediateLeave": "enabled",
|
||||
"reportFloodingSwitchPorts": [],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"vlans": {1: True, 42: True}},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -67,12 +68,12 @@ DATA: list[dict[str, Any]] = [
|
|||
"maxGroups": 65534,
|
||||
"immediateLeave": "default",
|
||||
"floodingTraffic": True,
|
||||
}
|
||||
},
|
||||
},
|
||||
"robustness": 2,
|
||||
"immediateLeave": "enabled",
|
||||
"reportFloodingSwitchPorts": [],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"vlans": {42: False}},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -100,7 +101,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"robustness": 2,
|
||||
"immediateLeave": "enabled",
|
||||
"reportFloodingSwitchPorts": [],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"vlans": {1: False, 42: False}},
|
||||
"expected": {"result": "failure", "messages": ["IGMP state for vlan 1 is enabled", "Supplied vlan 42 is not present on the device."]},
|
||||
|
@ -128,7 +129,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"robustness": 2,
|
||||
"immediateLeave": "enabled",
|
||||
"reportFloodingSwitchPorts": [],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"vlans": {1: True}},
|
||||
"expected": {"result": "failure", "messages": ["IGMP state for vlan 1 is disabled"]},
|
||||
|
@ -143,7 +144,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"robustness": 2,
|
||||
"immediateLeave": "enabled",
|
||||
"reportFloodingSwitchPorts": [],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"enabled": True},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -155,7 +156,7 @@ DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"reportFlooding": "disabled",
|
||||
"igmpSnoopingState": "disabled",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"enabled": False},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -167,7 +168,7 @@ DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"reportFlooding": "disabled",
|
||||
"igmpSnoopingState": "disabled",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"enabled": True},
|
||||
"expected": {"result": "failure", "messages": ["IGMP state is not valid: disabled"]},
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.profiles.py
|
||||
"""
|
||||
"""Tests for anta.tests.profiles.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -30,7 +29,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"name": "success",
|
||||
"test": VerifyTcamProfile,
|
||||
"eos_data": [
|
||||
{"pmfProfiles": {"FixedSystem": {"config": "test", "configType": "System Profile", "status": "test", "mode": "tcam"}}, "lastProgrammingStatus": {}}
|
||||
{"pmfProfiles": {"FixedSystem": {"config": "test", "configType": "System Profile", "status": "test", "mode": "tcam"}}, "lastProgrammingStatus": {}},
|
||||
],
|
||||
"inputs": {"profile": "test"},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -39,7 +38,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"name": "failure",
|
||||
"test": VerifyTcamProfile,
|
||||
"eos_data": [
|
||||
{"pmfProfiles": {"FixedSystem": {"config": "test", "configType": "System Profile", "status": "default", "mode": "tcam"}}, "lastProgrammingStatus": {}}
|
||||
{"pmfProfiles": {"FixedSystem": {"config": "test", "configType": "System Profile", "status": "default", "mode": "tcam"}}, "lastProgrammingStatus": {}},
|
||||
],
|
||||
"inputs": {"profile": "test"},
|
||||
"expected": {"result": "failure", "messages": ["Incorrect profile running on device: default"]},
|
||||
|
|
|
@ -1,17 +1,19 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Data for testing anta.tests.configuration"""
|
||||
"""Data for testing anta.tests.ptp."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from anta.tests.ptp import VerifyPtpStatus
|
||||
from anta.tests.ptp import VerifyPtpGMStatus, VerifyPtpLockStatus, VerifyPtpModeStatus, VerifyPtpOffset, VerifyPtpPortModeStatus
|
||||
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
|
||||
|
||||
DATA: list[dict[str, Any]] = [
|
||||
{
|
||||
"name": "success",
|
||||
"test": VerifyPtpStatus,
|
||||
"test": VerifyPtpModeStatus,
|
||||
"eos_data": [
|
||||
{
|
||||
"ptpMode": "ptpBoundaryClock",
|
||||
|
@ -34,9 +36,305 @@ DATA: list[dict[str, Any]] = [
|
|||
},
|
||||
{
|
||||
"name": "failure",
|
||||
"test": VerifyPtpStatus,
|
||||
"test": VerifyPtpModeStatus,
|
||||
"eos_data": [{"ptpMode": "ptpDisabled", "ptpIntfSummaries": {}}],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["The device is not configured as a PTP Boundary Clock: 'ptpDisabled'"]},
|
||||
},
|
||||
{
|
||||
"name": "error",
|
||||
"test": VerifyPtpModeStatus,
|
||||
"eos_data": [{"ptpIntfSummaries": {}}],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure"},
|
||||
"expected": {"result": "error", "messages": ["'ptpMode' variable is not present in the command output"]},
|
||||
},
|
||||
{
|
||||
"name": "success",
|
||||
"test": VerifyPtpGMStatus,
|
||||
"eos_data": [
|
||||
{
|
||||
"ptpMode": "ptpBoundaryClock",
|
||||
"ptpProfile": "ptpDefaultProfile",
|
||||
"ptpClockSummary": {
|
||||
"clockIdentity": "0x00:1c:73:ff:ff:14:00:01",
|
||||
"gmClockIdentity": "0xec:46:70:ff:fe:00:ff:a8",
|
||||
"numberOfSlavePorts": 1,
|
||||
"numberOfMasterPorts": 8,
|
||||
"slavePort": "Ethernet27/1",
|
||||
"slaveVlanId": 0,
|
||||
"offsetFromMaster": -11,
|
||||
"meanPathDelay": 105,
|
||||
"stepsRemoved": 2,
|
||||
"skew": 1.0000015265007687,
|
||||
"lastSyncTime": 1708599750,
|
||||
"currentPtpSystemTime": 1708599750,
|
||||
},
|
||||
}
|
||||
],
|
||||
"inputs": {"gmid": "0xec:46:70:ff:fe:00:ff:a8"},
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "failure",
|
||||
"test": VerifyPtpGMStatus,
|
||||
"eos_data": [
|
||||
{
|
||||
"ptpMode": "ptpBoundaryClock",
|
||||
"ptpProfile": "ptpDefaultProfile",
|
||||
"ptpClockSummary": {
|
||||
"clockIdentity": "0x00:1c:73:ff:ff:0a:00:01",
|
||||
"gmClockIdentity": "0x00:1c:73:ff:ff:0a:00:01",
|
||||
"numberOfSlavePorts": 0,
|
||||
"numberOfMasterPorts": 4,
|
||||
"offsetFromMaster": 3,
|
||||
"meanPathDelay": 496,
|
||||
"stepsRemoved": 0,
|
||||
"skew": 1.0000074628720317,
|
||||
"lastSyncTime": 1708600129,
|
||||
"currentPtpSystemTime": 1708600153,
|
||||
},
|
||||
}
|
||||
],
|
||||
"inputs": {"gmid": "0xec:46:70:ff:fe:00:ff:a8"},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"The device is locked to the following Grandmaster: '0x00:1c:73:ff:ff:0a:00:01', which differ from the expected one.",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "error",
|
||||
"test": VerifyPtpGMStatus,
|
||||
"eos_data": [{"ptpIntfSummaries": {}}],
|
||||
"inputs": {"gmid": "0xec:46:70:ff:fe:00:ff:a8"},
|
||||
"expected": {"result": "error", "messages": ["'ptpClockSummary' variable is not present in the command output"]},
|
||||
},
|
||||
{
|
||||
"name": "success",
|
||||
"test": VerifyPtpLockStatus,
|
||||
"eos_data": [
|
||||
{
|
||||
"ptpMode": "ptpBoundaryClock",
|
||||
"ptpProfile": "ptpDefaultProfile",
|
||||
"ptpClockSummary": {
|
||||
"clockIdentity": "0x00:1c:73:ff:ff:14:00:01",
|
||||
"gmClockIdentity": "0xec:46:70:ff:fe:00:ff:a8",
|
||||
"numberOfSlavePorts": 1,
|
||||
"numberOfMasterPorts": 8,
|
||||
"slavePort": "Ethernet27/1",
|
||||
"slaveVlanId": 0,
|
||||
"offsetFromMaster": -11,
|
||||
"meanPathDelay": 105,
|
||||
"stepsRemoved": 2,
|
||||
"skew": 1.0000015265007687,
|
||||
"lastSyncTime": 1708599750,
|
||||
"currentPtpSystemTime": 1708599750,
|
||||
},
|
||||
}
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "failure",
|
||||
"test": VerifyPtpLockStatus,
|
||||
"eos_data": [
|
||||
{
|
||||
"ptpMode": "ptpBoundaryClock",
|
||||
"ptpProfile": "ptpDefaultProfile",
|
||||
"ptpClockSummary": {
|
||||
"clockIdentity": "0x00:1c:73:ff:ff:0a:00:01",
|
||||
"gmClockIdentity": "0x00:1c:73:ff:ff:0a:00:01",
|
||||
"numberOfSlavePorts": 0,
|
||||
"numberOfMasterPorts": 4,
|
||||
"offsetFromMaster": 3,
|
||||
"meanPathDelay": 496,
|
||||
"stepsRemoved": 0,
|
||||
"skew": 1.0000074628720317,
|
||||
"lastSyncTime": 1708600129,
|
||||
"currentPtpSystemTime": 1708600286,
|
||||
},
|
||||
}
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["The device lock is more than 60s old: 157s"]},
|
||||
},
|
||||
{
|
||||
"name": "error",
|
||||
"test": VerifyPtpLockStatus,
|
||||
"eos_data": [{"ptpIntfSummaries": {}}],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
"result": "error",
|
||||
"messages": [
|
||||
"'ptpClockSummary' variable is not present in the command output",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "success",
|
||||
"test": VerifyPtpOffset,
|
||||
"eos_data": [
|
||||
{
|
||||
"monitorEnabled": True,
|
||||
"ptpMode": "ptpBoundaryClock",
|
||||
"offsetFromMasterThreshold": 250,
|
||||
"meanPathDelayThreshold": 1500,
|
||||
"ptpMonitorData": [
|
||||
{
|
||||
"intf": "Ethernet27/1",
|
||||
"realLastSyncTime": 1708599815611398400,
|
||||
"lastSyncSeqId": 44413,
|
||||
"offsetFromMaster": 2,
|
||||
"meanPathDelay": 105,
|
||||
"skew": 1.000001614,
|
||||
},
|
||||
{
|
||||
"intf": "Ethernet27/1",
|
||||
"realLastSyncTime": 1708599815486101500,
|
||||
"lastSyncSeqId": 44412,
|
||||
"offsetFromMaster": -13,
|
||||
"meanPathDelay": 105,
|
||||
"skew": 1.000001614,
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "failure",
|
||||
"test": VerifyPtpOffset,
|
||||
"eos_data": [
|
||||
{
|
||||
"monitorEnabled": True,
|
||||
"ptpMode": "ptpBoundaryClock",
|
||||
"offsetFromMasterThreshold": 250,
|
||||
"meanPathDelayThreshold": 1500,
|
||||
"ptpMonitorData": [
|
||||
{
|
||||
"intf": "Ethernet27/1",
|
||||
"realLastSyncTime": 1708599815611398400,
|
||||
"lastSyncSeqId": 44413,
|
||||
"offsetFromMaster": 1200,
|
||||
"meanPathDelay": 105,
|
||||
"skew": 1.000001614,
|
||||
},
|
||||
{
|
||||
"intf": "Ethernet27/1",
|
||||
"realLastSyncTime": 1708599815486101500,
|
||||
"lastSyncSeqId": 44412,
|
||||
"offsetFromMaster": -1300,
|
||||
"meanPathDelay": 105,
|
||||
"skew": 1.000001614,
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [("The device timing offset from master is greater than +/- 1000ns: {'Ethernet27/1': [1200, -1300]}")],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "skipped",
|
||||
"test": VerifyPtpOffset,
|
||||
"eos_data": [
|
||||
{
|
||||
"monitorEnabled": True,
|
||||
"ptpMonitorData": [],
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "skipped", "messages": ["PTP is not configured"]},
|
||||
},
|
||||
{
|
||||
"name": "success",
|
||||
"test": VerifyPtpPortModeStatus,
|
||||
"eos_data": [
|
||||
{
|
||||
"ptpMode": "ptpBoundaryClock",
|
||||
"ptpProfile": "ptpDefaultProfile",
|
||||
"ptpClockSummary": {
|
||||
"clockIdentity": "0x00:1c:73:ff:ff:0a:00:01",
|
||||
"gmClockIdentity": "0x00:1c:73:ff:ff:0a:00:01",
|
||||
"numberOfSlavePorts": 0,
|
||||
"numberOfMasterPorts": 4,
|
||||
"offsetFromMaster": 0,
|
||||
"meanPathDelay": 0,
|
||||
"stepsRemoved": 0,
|
||||
"skew": 1.0,
|
||||
},
|
||||
"ptpIntfSummaries": {
|
||||
"Ethernet53": {
|
||||
"interface": "Ethernet53",
|
||||
"ptpIntfVlanSummaries": [
|
||||
{
|
||||
"vlanId": 0,
|
||||
"portState": "psDisabled",
|
||||
"delayMechanism": "e2e",
|
||||
"transportMode": "ipv4",
|
||||
"mpassEnabled": False,
|
||||
"mpassStatus": "active",
|
||||
}
|
||||
],
|
||||
},
|
||||
"Ethernet1": {
|
||||
"interface": "Ethernet1",
|
||||
"ptpIntfVlanSummaries": [
|
||||
{"vlanId": 0, "portState": "psMaster", "delayMechanism": "e2e", "transportMode": "ipv4", "mpassEnabled": False, "mpassStatus": "active"}
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "failure",
|
||||
"test": VerifyPtpPortModeStatus,
|
||||
"eos_data": [{"ptpIntfSummaries": {}}],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["No interfaces are PTP enabled"]},
|
||||
},
|
||||
{
|
||||
"name": "failure",
|
||||
"test": VerifyPtpPortModeStatus,
|
||||
"eos_data": [
|
||||
{
|
||||
"ptpMode": "ptpBoundaryClock",
|
||||
"ptpProfile": "ptpDefaultProfile",
|
||||
"ptpClockSummary": {
|
||||
"clockIdentity": "0x00:1c:73:ff:ff:0a:00:01",
|
||||
"gmClockIdentity": "0x00:1c:73:ff:ff:0a:00:01",
|
||||
"numberOfSlavePorts": 0,
|
||||
"numberOfMasterPorts": 4,
|
||||
"offsetFromMaster": 0,
|
||||
"meanPathDelay": 0,
|
||||
"stepsRemoved": 0,
|
||||
"skew": 1.0,
|
||||
},
|
||||
"ptpIntfSummaries": {
|
||||
"Ethernet53": {
|
||||
"interface": "Ethernet53",
|
||||
"ptpIntfVlanSummaries": [
|
||||
{"vlanId": 0, "portState": "none", "delayMechanism": "e2e", "transportMode": "ipv4", "mpassEnabled": False, "mpassStatus": "active"}
|
||||
],
|
||||
},
|
||||
"Ethernet1": {
|
||||
"interface": "Ethernet1",
|
||||
"ptpIntfVlanSummaries": [
|
||||
{"vlanId": 0, "portState": "none", "delayMechanism": "e2e", "transportMode": "ipv4", "mpassEnabled": False, "mpassStatus": "active"}
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["The following interface(s) are not in a valid PTP state: '['Ethernet53', 'Ethernet1']'"]},
|
||||
},
|
||||
]
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.security.py
|
||||
"""
|
||||
"""Tests for anta.tests.security.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -16,7 +15,9 @@ from anta.tests.security import (
|
|||
VerifyAPISSLCertificate,
|
||||
VerifyBannerLogin,
|
||||
VerifyBannerMotd,
|
||||
VerifyIPSecConnHealth,
|
||||
VerifyIPv4ACL,
|
||||
VerifySpecificIPSecConn,
|
||||
VerifySSHIPv4Acl,
|
||||
VerifySSHIPv6Acl,
|
||||
VerifySSHStatus,
|
||||
|
@ -107,7 +108,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"unixSocketServer": {"configured": False, "running": False},
|
||||
"sslProfile": {"name": "API_SSL_Profile", "configured": True, "state": "valid"},
|
||||
"tlsProtocol": ["1.2"],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -124,7 +125,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"unixSocketServer": {"configured": False, "running": False},
|
||||
"sslProfile": {"name": "API_SSL_Profile", "configured": True, "state": "valid"},
|
||||
"tlsProtocol": ["1.2"],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["eAPI HTTP server is enabled globally"]},
|
||||
|
@ -141,7 +142,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"unixSocketServer": {"configured": False, "running": False},
|
||||
"sslProfile": {"name": "API_SSL_Profile", "configured": True, "state": "valid"},
|
||||
"tlsProtocol": ["1.2"],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"profile": "API_SSL_Profile"},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -157,7 +158,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"httpsServer": {"configured": True, "running": True, "port": 443},
|
||||
"unixSocketServer": {"configured": False, "running": False},
|
||||
"tlsProtocol": ["1.2"],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"profile": "API_SSL_Profile"},
|
||||
"expected": {"result": "failure", "messages": ["eAPI HTTPS server SSL profile (API_SSL_Profile) is not configured"]},
|
||||
|
@ -174,7 +175,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"unixSocketServer": {"configured": False, "running": False},
|
||||
"sslProfile": {"name": "Wrong_SSL_Profile", "configured": True, "state": "valid"},
|
||||
"tlsProtocol": ["1.2"],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"profile": "API_SSL_Profile"},
|
||||
"expected": {"result": "failure", "messages": ["eAPI HTTPS server SSL profile (API_SSL_Profile) is misconfigured or invalid"]},
|
||||
|
@ -897,4 +898,278 @@ DATA: list[dict[str, Any]] = [
|
|||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "success",
|
||||
"test": VerifyIPSecConnHealth,
|
||||
"eos_data": [
|
||||
{
|
||||
"connections": {
|
||||
"default-172.18.3.2-172.18.5.2-srcUnused-0": {
|
||||
"pathDict": {"path9": "Established"},
|
||||
},
|
||||
"default-100.64.3.2-100.64.5.2-srcUnused-0": {
|
||||
"pathDict": {"path10": "Established"},
|
||||
},
|
||||
}
|
||||
}
|
||||
],
|
||||
"inputs": {},
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "failure-no-connection",
|
||||
"test": VerifyIPSecConnHealth,
|
||||
"eos_data": [{"connections": {}}],
|
||||
"inputs": {},
|
||||
"expected": {"result": "failure", "messages": ["No IPv4 security connection configured."]},
|
||||
},
|
||||
{
|
||||
"name": "failure-not-established",
|
||||
"test": VerifyIPSecConnHealth,
|
||||
"eos_data": [
|
||||
{
|
||||
"connections": {
|
||||
"default-172.18.3.2-172.18.5.2-srcUnused-0": {
|
||||
"pathDict": {"path9": "Idle"},
|
||||
"saddr": "172.18.3.2",
|
||||
"daddr": "172.18.2.2",
|
||||
"tunnelNs": "default",
|
||||
},
|
||||
"Guest-100.64.3.2-100.64.5.2-srcUnused-0": {"pathDict": {"path10": "Idle"}, "saddr": "100.64.3.2", "daddr": "100.64.5.2", "tunnelNs": "Guest"},
|
||||
}
|
||||
}
|
||||
],
|
||||
"inputs": {},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"The following IPv4 security connections are not established:\n"
|
||||
"source:172.18.3.2 destination:172.18.2.2 vrf:default\n"
|
||||
"source:100.64.3.2 destination:100.64.5.2 vrf:Guest."
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "success-with-connection",
|
||||
"test": VerifySpecificIPSecConn,
|
||||
"eos_data": [
|
||||
{
|
||||
"connections": {
|
||||
"Guest-172.18.3.2-172.18.2.2-srcUnused-0": {
|
||||
"pathDict": {"path9": "Established"},
|
||||
"saddr": "172.18.3.2",
|
||||
"daddr": "172.18.2.2",
|
||||
"tunnelNs": "Guest",
|
||||
},
|
||||
"Guest-100.64.3.2-100.64.2.2-srcUnused-0": {
|
||||
"pathDict": {"path10": "Established"},
|
||||
"saddr": "100.64.3.2",
|
||||
"daddr": "100.64.2.2",
|
||||
"tunnelNs": "Guest",
|
||||
},
|
||||
}
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"ip_security_connections": [
|
||||
{
|
||||
"peer": "10.255.0.1",
|
||||
"vrf": "Guest",
|
||||
"connections": [
|
||||
{"source_address": "100.64.3.2", "destination_address": "100.64.2.2"},
|
||||
{"source_address": "172.18.3.2", "destination_address": "172.18.2.2"},
|
||||
],
|
||||
},
|
||||
]
|
||||
},
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "success-without-connection",
|
||||
"test": VerifySpecificIPSecConn,
|
||||
"eos_data": [
|
||||
{
|
||||
"connections": {
|
||||
"default-172.18.3.2-172.18.2.2-srcUnused-0": {
|
||||
"pathDict": {"path9": "Established"},
|
||||
"saddr": "172.18.3.2",
|
||||
"daddr": "172.18.2.2",
|
||||
"tunnelNs": "default",
|
||||
},
|
||||
"default-100.64.3.2-100.64.2.2-srcUnused-0": {"pathDict": {"path10": "Established"}, "saddr": "100.64.3.2", "daddr": "100.64.2.2"},
|
||||
}
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"ip_security_connections": [
|
||||
{
|
||||
"peer": "10.255.0.1",
|
||||
"vrf": "default",
|
||||
},
|
||||
]
|
||||
},
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "failure-no-connection",
|
||||
"test": VerifySpecificIPSecConn,
|
||||
"eos_data": [
|
||||
{"connections": {}},
|
||||
{
|
||||
"connections": {
|
||||
"DATA-172.18.3.2-172.18.2.2-srcUnused-0": {
|
||||
"pathDict": {"path9": "Established"},
|
||||
"saddr": "172.18.3.2",
|
||||
"daddr": "172.18.2.2",
|
||||
"tunnelNs": "DATA",
|
||||
},
|
||||
"DATA-100.64.3.2-100.64.2.2-srcUnused-0": {
|
||||
"pathDict": {"path10": "Established"},
|
||||
"saddr": "100.64.3.2",
|
||||
"daddr": "100.64.2.2",
|
||||
"tunnelNs": "DATA",
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {
|
||||
"ip_security_connections": [
|
||||
{
|
||||
"peer": "10.255.0.1",
|
||||
"vrf": "default",
|
||||
},
|
||||
{
|
||||
"peer": "10.255.0.2",
|
||||
"vrf": "DATA",
|
||||
"connections": [
|
||||
{"source_address": "100.64.3.2", "destination_address": "100.64.2.2"},
|
||||
{"source_address": "172.18.3.2", "destination_address": "172.18.2.2"},
|
||||
],
|
||||
},
|
||||
]
|
||||
},
|
||||
"expected": {"result": "failure", "messages": ["No IPv4 security connection configured for peer `10.255.0.1`."]},
|
||||
},
|
||||
{
|
||||
"name": "failure-not-established",
|
||||
"test": VerifySpecificIPSecConn,
|
||||
"eos_data": [
|
||||
{
|
||||
"connections": {
|
||||
"default-172.18.3.2-172.18.5.2-srcUnused-0": {
|
||||
"pathDict": {"path9": "Idle"},
|
||||
"saddr": "172.18.3.2",
|
||||
"daddr": "172.18.2.2",
|
||||
"tunnelNs": "default",
|
||||
},
|
||||
"default-100.64.3.2-100.64.5.2-srcUnused-0": {
|
||||
"pathDict": {"path10": "Idle"},
|
||||
"saddr": "100.64.2.2",
|
||||
"daddr": "100.64.1.2",
|
||||
"tunnelNs": "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"connections": {
|
||||
"MGMT-172.18.2.2-172.18.1.2-srcUnused-0": {"pathDict": {"path9": "Idle"}, "saddr": "172.18.2.2", "daddr": "172.18.1.2", "tunnelNs": "MGMT"},
|
||||
"MGMT-100.64.2.2-100.64.1.2-srcUnused-0": {"pathDict": {"path10": "Idle"}, "saddr": "100.64.2.2", "daddr": "100.64.1.2", "tunnelNs": "MGMT"},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {
|
||||
"ip_security_connections": [
|
||||
{
|
||||
"peer": "10.255.0.1",
|
||||
"vrf": "default",
|
||||
},
|
||||
{
|
||||
"peer": "10.255.0.2",
|
||||
"vrf": "MGMT",
|
||||
"connections": [
|
||||
{"source_address": "100.64.2.2", "destination_address": "100.64.1.2"},
|
||||
{"source_address": "172.18.2.2", "destination_address": "172.18.1.2"},
|
||||
],
|
||||
},
|
||||
]
|
||||
},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"Expected state of IPv4 security connection `source:172.18.3.2 destination:172.18.2.2 vrf:default` for peer `10.255.0.1` is `Established` "
|
||||
"but found `Idle` instead.",
|
||||
"Expected state of IPv4 security connection `source:100.64.2.2 destination:100.64.1.2 vrf:default` for peer `10.255.0.1` is `Established` "
|
||||
"but found `Idle` instead.",
|
||||
"Expected state of IPv4 security connection `source:100.64.2.2 destination:100.64.1.2 vrf:MGMT` for peer `10.255.0.2` is `Established` "
|
||||
"but found `Idle` instead.",
|
||||
"Expected state of IPv4 security connection `source:172.18.2.2 destination:172.18.1.2 vrf:MGMT` for peer `10.255.0.2` is `Established` "
|
||||
"but found `Idle` instead.",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "failure-missing-connection",
|
||||
"test": VerifySpecificIPSecConn,
|
||||
"eos_data": [
|
||||
{
|
||||
"connections": {
|
||||
"default-172.18.3.2-172.18.5.2-srcUnused-0": {
|
||||
"pathDict": {"path9": "Idle"},
|
||||
"saddr": "172.18.3.2",
|
||||
"daddr": "172.18.2.2",
|
||||
"tunnelNs": "default",
|
||||
},
|
||||
"default-100.64.3.2-100.64.5.2-srcUnused-0": {
|
||||
"pathDict": {"path10": "Idle"},
|
||||
"saddr": "100.64.3.2",
|
||||
"daddr": "100.64.2.2",
|
||||
"tunnelNs": "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"connections": {
|
||||
"default-172.18.2.2-172.18.1.2-srcUnused-0": {
|
||||
"pathDict": {"path9": "Idle"},
|
||||
"saddr": "172.18.2.2",
|
||||
"daddr": "172.18.1.2",
|
||||
"tunnelNs": "default",
|
||||
},
|
||||
"default-100.64.2.2-100.64.1.2-srcUnused-0": {
|
||||
"pathDict": {"path10": "Idle"},
|
||||
"saddr": "100.64.2.2",
|
||||
"daddr": "100.64.1.2",
|
||||
"tunnelNs": "default",
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {
|
||||
"ip_security_connections": [
|
||||
{
|
||||
"peer": "10.255.0.1",
|
||||
"vrf": "default",
|
||||
},
|
||||
{
|
||||
"peer": "10.255.0.2",
|
||||
"vrf": "default",
|
||||
"connections": [
|
||||
{"source_address": "100.64.4.2", "destination_address": "100.64.1.2"},
|
||||
{"source_address": "172.18.4.2", "destination_address": "172.18.1.2"},
|
||||
],
|
||||
},
|
||||
]
|
||||
},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"Expected state of IPv4 security connection `source:172.18.3.2 destination:172.18.2.2 vrf:default` for peer `10.255.0.1` is `Established` "
|
||||
"but found `Idle` instead.",
|
||||
"Expected state of IPv4 security connection `source:100.64.3.2 destination:100.64.2.2 vrf:default` for peer `10.255.0.1` is `Established` "
|
||||
"but found `Idle` instead.",
|
||||
"IPv4 security connection `source:100.64.4.2 destination:100.64.1.2 vrf:default` for peer `10.255.0.2` is not found.",
|
||||
"IPv4 security connection `source:172.18.4.2 destination:172.18.1.2 vrf:default` for peer `10.255.0.2` is not found.",
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.services.py
|
||||
"""
|
||||
"""Tests for anta.tests.services.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.snmp.py
|
||||
"""
|
||||
"""Tests for anta.tests.snmp.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test inputs for anta.tests.hardware"""
|
||||
"""Test inputs for anta.tests.hardware."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -18,7 +19,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"modelName": "vEOS-lab",
|
||||
"internalVersion": "4.27.0F-24305004.4270F",
|
||||
"version": "4.27.0F",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"versions": ["4.27.0F", "4.28.0F"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -31,7 +32,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"modelName": "vEOS-lab",
|
||||
"internalVersion": "4.27.0F-24305004.4270F",
|
||||
"version": "4.27.0F",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"versions": ["4.27.1F"]},
|
||||
"expected": {"result": "failure", "messages": ["device is running version \"4.27.0F\" not in expected versions: ['4.27.1F']"]},
|
||||
|
@ -52,7 +53,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"TerminAttr-core": {"release": "1", "version": "v1.17.0"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"versions": ["v1.17.0", "v1.18.1"]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -73,7 +74,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"TerminAttr-core": {"release": "1", "version": "v1.17.0"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {"versions": ["v1.17.1", "v1.18.1"]},
|
||||
"expected": {"result": "failure", "messages": ["device is running TerminAttr version v1.17.0 and is not in the allowed list: ['v1.17.1', 'v1.18.1']"]},
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.stp.py
|
||||
"""
|
||||
"""Tests for anta.tests.stp.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -84,8 +83,8 @@ DATA: list[dict[str, Any]] = [
|
|||
"interfaces": {
|
||||
"Ethernet10": {"bpduSent": 201, "bpduReceived": 0, "bpduTaggedError": 3, "bpduOtherError": 0, "bpduRateLimitCount": 0},
|
||||
"Ethernet11": {"bpduSent": 99, "bpduReceived": 0, "bpduTaggedError": 0, "bpduOtherError": 6, "bpduRateLimitCount": 0},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["The following interfaces have STP BPDU packet errors: ['Ethernet10', 'Ethernet11']"]},
|
||||
|
@ -145,7 +144,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"inputs": {"vlans": [10, 20]},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": ["The following VLAN(s) have interface(s) that are not in a fowarding state: [{'VLAN 10': ['Ethernet10']}, {'VLAN 20': ['Ethernet10']}]"],
|
||||
"messages": ["The following VLAN(s) have interface(s) that are not in a forwarding state: [{'VLAN 10': ['Ethernet10']}, {'VLAN 20': ['Ethernet10']}]"],
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -162,7 +161,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
},
|
||||
},
|
||||
"VL20": {
|
||||
"rootBridge": {
|
||||
|
@ -172,7 +171,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
},
|
||||
},
|
||||
"VL30": {
|
||||
"rootBridge": {
|
||||
|
@ -182,10 +181,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"priority": 32768, "instances": [10, 20]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -204,7 +203,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
},
|
||||
},
|
||||
"VL20": {
|
||||
"rootBridge": {
|
||||
|
@ -214,7 +213,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
},
|
||||
},
|
||||
"VL30": {
|
||||
"rootBridge": {
|
||||
|
@ -224,10 +223,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"priority": 32768},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -246,10 +245,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"priority": 16384, "instances": [0]},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -268,10 +267,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"priority": 32768, "instances": [0]},
|
||||
"expected": {"result": "failure", "messages": ["Unsupported STP instance type: WRONG0"]},
|
||||
|
@ -297,7 +296,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
},
|
||||
},
|
||||
"VL20": {
|
||||
"rootBridge": {
|
||||
|
@ -307,7 +306,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
},
|
||||
},
|
||||
"VL30": {
|
||||
"rootBridge": {
|
||||
|
@ -317,10 +316,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"helloTime": 2.0,
|
||||
"maxAge": 20,
|
||||
"forwardDelay": 15,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"priority": 32768, "instances": [10, 20, 30]},
|
||||
"expected": {"result": "failure", "messages": ["The following instance(s) have the wrong STP root priority configured: ['VL20', 'VL30']"]},
|
||||
|
|
176
tests/units/anta_tests/test_stun.py
Normal file
176
tests/units/anta_tests/test_stun.py
Normal file
|
@ -0,0 +1,176 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test inputs for anta.tests.stun.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from anta.tests.stun import VerifyStunClient
|
||||
from tests.lib.anta import test # noqa: F401; pylint: disable=W0611
|
||||
|
||||
DATA: list[dict[str, Any]] = [
|
||||
{
|
||||
"name": "success",
|
||||
"test": VerifyStunClient,
|
||||
"eos_data": [
|
||||
{
|
||||
"bindings": {
|
||||
"000000010a64ff0100000000": {
|
||||
"sourceAddress": {"ip": "100.64.3.2", "port": 4500},
|
||||
"publicAddress": {"ip": "192.64.3.2", "port": 6006},
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"bindings": {
|
||||
"000000040a64ff0100000000": {
|
||||
"sourceAddress": {"ip": "172.18.3.2", "port": 4500},
|
||||
"publicAddress": {"ip": "192.18.3.2", "port": 6006},
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"bindings": {
|
||||
"000000040a64ff0100000000": {
|
||||
"sourceAddress": {"ip": "172.18.4.2", "port": 4500},
|
||||
"publicAddress": {"ip": "192.18.4.2", "port": 6006},
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"bindings": {
|
||||
"000000040a64ff0100000000": {
|
||||
"sourceAddress": {"ip": "172.18.6.2", "port": 4500},
|
||||
"publicAddress": {"ip": "192.18.6.2", "port": 6006},
|
||||
}
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {
|
||||
"stun_clients": [
|
||||
{"source_address": "100.64.3.2", "public_address": "192.64.3.2", "source_port": 4500, "public_port": 6006},
|
||||
{"source_address": "172.18.3.2"},
|
||||
{"source_address": "172.18.4.2", "source_port": 4500, "public_address": "192.18.4.2"},
|
||||
{"source_address": "172.18.6.2", "source_port": 4500, "public_port": 6006},
|
||||
]
|
||||
},
|
||||
"expected": {"result": "success"},
|
||||
},
|
||||
{
|
||||
"name": "failure-incorrect-public-ip",
|
||||
"test": VerifyStunClient,
|
||||
"eos_data": [
|
||||
{
|
||||
"bindings": {
|
||||
"000000010a64ff0100000000": {
|
||||
"sourceAddress": {"ip": "100.64.3.2", "port": 4500},
|
||||
"publicAddress": {"ip": "192.64.3.2", "port": 6006},
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"bindings": {
|
||||
"000000040a64ff0100000000": {
|
||||
"sourceAddress": {"ip": "172.18.3.2", "port": 4500},
|
||||
"publicAddress": {"ip": "192.18.3.2", "port": 6006},
|
||||
}
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {
|
||||
"stun_clients": [
|
||||
{"source_address": "100.64.3.2", "public_address": "192.164.3.2", "source_port": 4500, "public_port": 6006},
|
||||
{"source_address": "172.18.3.2", "public_address": "192.118.3.2", "source_port": 4500, "public_port": 6006},
|
||||
]
|
||||
},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"For STUN source `100.64.3.2:4500`:\nExpected `192.164.3.2` as the public ip, but found `192.64.3.2` instead.",
|
||||
"For STUN source `172.18.3.2:4500`:\nExpected `192.118.3.2` as the public ip, but found `192.18.3.2` instead.",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "failure-no-client",
|
||||
"test": VerifyStunClient,
|
||||
"eos_data": [
|
||||
{"bindings": {}},
|
||||
{"bindings": {}},
|
||||
],
|
||||
"inputs": {
|
||||
"stun_clients": [
|
||||
{"source_address": "100.64.3.2", "public_address": "192.164.3.2", "source_port": 4500, "public_port": 6006},
|
||||
{"source_address": "172.18.3.2", "public_address": "192.118.3.2", "source_port": 4500, "public_port": 6006},
|
||||
]
|
||||
},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": ["STUN client transaction for source `100.64.3.2:4500` is not found.", "STUN client transaction for source `172.18.3.2:4500` is not found."],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "failure-incorrect-public-port",
|
||||
"test": VerifyStunClient,
|
||||
"eos_data": [
|
||||
{"bindings": {}},
|
||||
{
|
||||
"bindings": {
|
||||
"000000040a64ff0100000000": {
|
||||
"sourceAddress": {"ip": "172.18.3.2", "port": 4500},
|
||||
"publicAddress": {"ip": "192.18.3.2", "port": 4800},
|
||||
}
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {
|
||||
"stun_clients": [
|
||||
{"source_address": "100.64.3.2", "public_address": "192.164.3.2", "source_port": 4500, "public_port": 6006},
|
||||
{"source_address": "172.18.3.2", "public_address": "192.118.3.2", "source_port": 4500, "public_port": 6006},
|
||||
]
|
||||
},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"STUN client transaction for source `100.64.3.2:4500` is not found.",
|
||||
"For STUN source `172.18.3.2:4500`:\n"
|
||||
"Expected `192.118.3.2` as the public ip, but found `192.18.3.2` instead.\n"
|
||||
"Expected `6006` as the public port, but found `4800` instead.",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "failure-all-type",
|
||||
"test": VerifyStunClient,
|
||||
"eos_data": [
|
||||
{"bindings": {}},
|
||||
{
|
||||
"bindings": {
|
||||
"000000040a64ff0100000000": {
|
||||
"sourceAddress": {"ip": "172.18.3.2", "port": 4500},
|
||||
"publicAddress": {"ip": "192.18.3.2", "port": 4800},
|
||||
}
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": {
|
||||
"stun_clients": [
|
||||
{"source_address": "100.64.3.2", "public_address": "192.164.3.2", "source_port": 4500, "public_port": 6006},
|
||||
{"source_address": "172.18.4.2", "public_address": "192.118.3.2", "source_port": 4800, "public_port": 6006},
|
||||
]
|
||||
},
|
||||
"expected": {
|
||||
"result": "failure",
|
||||
"messages": [
|
||||
"STUN client transaction for source `100.64.3.2:4500` is not found.",
|
||||
"For STUN source `172.18.4.2:4800`:\n"
|
||||
"Expected `172.18.4.2` as the source ip, but found `172.18.3.2` instead.\n"
|
||||
"Expected `4800` as the source port, but found `4500` instead.\n"
|
||||
"Expected `192.118.3.2` as the public ip, but found `192.18.3.2` instead.\n"
|
||||
"Expected `6006` as the public port, but found `4800` instead.",
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
|
@ -1,7 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test inputs for anta.tests.system"""
|
||||
"""Test inputs for anta.tests.system."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -46,10 +47,15 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"resetCauses": [
|
||||
{"recommendedAction": "No action necessary.", "description": "Reload requested by the user.", "timestamp": 1683186892.0, "debugInfoIsDir": False}
|
||||
{
|
||||
"recommendedAction": "No action necessary.",
|
||||
"description": "Reload requested by the user.",
|
||||
"timestamp": 1683186892.0,
|
||||
"debugInfoIsDir": False,
|
||||
},
|
||||
],
|
||||
"full": False,
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -61,10 +67,10 @@ DATA: list[dict[str, Any]] = [
|
|||
"eos_data": [
|
||||
{
|
||||
"resetCauses": [
|
||||
{"recommendedAction": "No action necessary.", "description": "Reload after crash.", "timestamp": 1683186892.0, "debugInfoIsDir": False}
|
||||
{"recommendedAction": "No action necessary.", "description": "Reload after crash.", "timestamp": 1683186892.0, "debugInfoIsDir": False},
|
||||
],
|
||||
"full": False,
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["Reload cause is: 'Reload after crash.'"]},
|
||||
|
@ -125,7 +131,7 @@ EntityManager::doBackoff waiting for remote sysdb version ....ok
|
|||
===> /var/log/agents/Acl-830 Fri Jul 7 15:07:00 2023 <===
|
||||
===== Output from /usr/bin/Acl [] (PID=830) started Jul 7 15:06:10.871700 ===
|
||||
EntityManager::doBackoff waiting for remote sysdb version ...................ok
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
|
@ -158,9 +164,9 @@ EntityManager::doBackoff waiting for remote sysdb version ...................ok
|
|||
"activeTime": 360,
|
||||
"virtMem": "6644",
|
||||
"sharedMem": "3996",
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -185,9 +191,9 @@ EntityManager::doBackoff waiting for remote sysdb version ...................ok
|
|||
"activeTime": 360,
|
||||
"virtMem": "6644",
|
||||
"sharedMem": "3996",
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["Device has reported a high CPU utilization: 75.2%"]},
|
||||
|
@ -203,7 +209,7 @@ EntityManager::doBackoff waiting for remote sysdb version ...................ok
|
|||
"memTotal": 2004568,
|
||||
"memFree": 879004,
|
||||
"version": "4.27.3F",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -219,7 +225,7 @@ EntityManager::doBackoff waiting for remote sysdb version ...................ok
|
|||
"memTotal": 2004568,
|
||||
"memFree": 89004,
|
||||
"version": "4.27.3F",
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["Device has reported a high memory usage: 95.56%"]},
|
||||
|
@ -233,7 +239,7 @@ EntityManager::doBackoff waiting for remote sysdb version ...................ok
|
|||
none 294M 78M 217M 27% /
|
||||
none 294M 78M 217M 27% /.overlay
|
||||
/dev/loop0 461M 461M 0 100% /rootfs-i386
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -247,7 +253,7 @@ none 294M 78M 217M 27% /.overlay
|
|||
none 294M 78M 217M 27% /
|
||||
none 294M 78M 217M 84% /.overlay
|
||||
/dev/loop0 461M 461M 0 100% /rootfs-i386
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
|
@ -264,7 +270,7 @@ none 294M 78M 217M 84% /.overlay
|
|||
"eos_data": [
|
||||
"""synchronised
|
||||
poll interval unknown
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -275,7 +281,7 @@ poll interval unknown
|
|||
"eos_data": [
|
||||
"""unsynchronised
|
||||
poll interval unknown
|
||||
"""
|
||||
""",
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "failure", "messages": ["The device is not synchronized with the configured NTP server(s): 'unsynchronised'"]},
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.vlan.py
|
||||
"""
|
||||
"""Tests for anta.tests.vlan.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tests.vxlan.py
|
||||
"""
|
||||
"""Tests for anta.tests.vxlan.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
@ -107,7 +106,7 @@ DATA: list[dict[str, Any]] = [
|
|||
},
|
||||
},
|
||||
"warnings": [],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {"result": "success"},
|
||||
|
@ -172,7 +171,7 @@ DATA: list[dict[str, Any]] = [
|
|||
},
|
||||
},
|
||||
"warnings": ["Your configuration contains warnings. This does not mean misconfigurations. But you may wish to re-check your configurations."],
|
||||
}
|
||||
},
|
||||
],
|
||||
"inputs": None,
|
||||
"expected": {
|
||||
|
@ -184,7 +183,7 @@ DATA: list[dict[str, Any]] = [
|
|||
"'No VLAN-VNI mapping in Vxlan1'}, {'name': 'Flood List', 'checkPass': False, 'hasWarning': True, 'detail': "
|
||||
"'No VXLAN VLANs in Vxlan1'}, {'name': 'Routing', 'checkPass': True, 'hasWarning': False, 'detail': ''}, {'name': "
|
||||
"'VNI VRF ACL', 'checkPass': True, 'hasWarning': False, 'detail': ''}, {'name': 'VRF-VNI Dynamic VLAN', 'checkPass': True, "
|
||||
"'hasWarning': False, 'detail': ''}, {'name': 'Decap VRF-VNI Map', 'checkPass': True, 'hasWarning': False, 'detail': ''}]}}"
|
||||
"'hasWarning': False, 'detail': ''}, {'name': 'Decap VRF-VNI Map', 'checkPass': True, 'hasWarning': False, 'detail': ''}]}}",
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -203,12 +202,12 @@ DATA: list[dict[str, Any]] = [
|
|||
"vxlanIntfs": {
|
||||
"Vxlan1": {
|
||||
"vniBindings": {
|
||||
"10020": {"vlan": 20, "dynamicVlan": False, "source": "static", "interfaces": {"Ethernet31": {"dot1q": 0}, "Vxlan1": {"dot1q": 20}}}
|
||||
"10020": {"vlan": 20, "dynamicVlan": False, "source": "static", "interfaces": {"Ethernet31": {"dot1q": 0}, "Vxlan1": {"dot1q": 20}}},
|
||||
},
|
||||
"vniBindingsToVrf": {"500": {"vrfName": "PROD", "vlan": 1199, "source": "evpn"}},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"bindings": {10020: 20, 500: 1199}},
|
||||
"expected": {"result": "success"},
|
||||
|
@ -221,12 +220,12 @@ DATA: list[dict[str, Any]] = [
|
|||
"vxlanIntfs": {
|
||||
"Vxlan1": {
|
||||
"vniBindings": {
|
||||
"10020": {"vlan": 20, "dynamicVlan": False, "source": "static", "interfaces": {"Ethernet31": {"dot1q": 0}, "Vxlan1": {"dot1q": 20}}}
|
||||
"10020": {"vlan": 20, "dynamicVlan": False, "source": "static", "interfaces": {"Ethernet31": {"dot1q": 0}, "Vxlan1": {"dot1q": 20}}},
|
||||
},
|
||||
"vniBindingsToVrf": {"500": {"vrfName": "PROD", "vlan": 1199, "source": "evpn"}},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"bindings": {10010: 10, 10020: 20, 500: 1199}},
|
||||
"expected": {"result": "failure", "messages": ["The following VNI(s) have no binding: ['10010']"]},
|
||||
|
@ -239,12 +238,12 @@ DATA: list[dict[str, Any]] = [
|
|||
"vxlanIntfs": {
|
||||
"Vxlan1": {
|
||||
"vniBindings": {
|
||||
"10020": {"vlan": 30, "dynamicVlan": False, "source": "static", "interfaces": {"Ethernet31": {"dot1q": 0}, "Vxlan1": {"dot1q": 20}}}
|
||||
"10020": {"vlan": 30, "dynamicVlan": False, "source": "static", "interfaces": {"Ethernet31": {"dot1q": 0}, "Vxlan1": {"dot1q": 20}}},
|
||||
},
|
||||
"vniBindingsToVrf": {"500": {"vrfName": "PROD", "vlan": 1199, "source": "evpn"}},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"bindings": {10020: 20, 500: 1199}},
|
||||
"expected": {"result": "failure", "messages": ["The following VNI(s) have the wrong VLAN binding: [{'10020': 30}]"]},
|
||||
|
@ -257,12 +256,12 @@ DATA: list[dict[str, Any]] = [
|
|||
"vxlanIntfs": {
|
||||
"Vxlan1": {
|
||||
"vniBindings": {
|
||||
"10020": {"vlan": 30, "dynamicVlan": False, "source": "static", "interfaces": {"Ethernet31": {"dot1q": 0}, "Vxlan1": {"dot1q": 20}}}
|
||||
"10020": {"vlan": 30, "dynamicVlan": False, "source": "static", "interfaces": {"Ethernet31": {"dot1q": 0}, "Vxlan1": {"dot1q": 20}}},
|
||||
},
|
||||
"vniBindingsToVrf": {"500": {"vrfName": "PROD", "vlan": 1199, "source": "evpn"}},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"inputs": {"bindings": {10010: 10, 10020: 20, 500: 1199}},
|
||||
"expected": {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test anta.cli submodule."""
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test anta.cli.check submodule."""
|
||||
|
|
|
@ -1,30 +1,28 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.check
|
||||
"""
|
||||
"""Tests for anta.cli.check."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from click.testing import CliRunner
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from anta.cli import anta
|
||||
from anta.cli.utils import ExitCode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from click.testing import CliRunner
|
||||
|
||||
|
||||
def test_anta_check(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta check
|
||||
"""
|
||||
"""Test anta check."""
|
||||
result = click_runner.invoke(anta, ["check"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta check" in result.output
|
||||
|
||||
|
||||
def test_anta_check_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta check --help
|
||||
"""
|
||||
"""Test anta check --help."""
|
||||
result = click_runner.invoke(anta, ["check", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta check" in result.output
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.check.commands
|
||||
"""
|
||||
"""Tests for anta.cli.check.commands."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
@ -21,7 +20,7 @@ DATA_DIR: Path = Path(__file__).parents[3].resolve() / "data"
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"catalog_path, expected_exit, expected_output",
|
||||
("catalog_path", "expected_exit", "expected_output"),
|
||||
[
|
||||
pytest.param("ghost_catalog.yml", ExitCode.USAGE_ERROR, "Error: Invalid value for '--catalog'", id="catalog does not exist"),
|
||||
pytest.param("test_catalog_with_undefined_module.yml", ExitCode.USAGE_ERROR, "Test catalog is invalid!", id="catalog is not valid"),
|
||||
|
@ -29,9 +28,7 @@ DATA_DIR: Path = Path(__file__).parents[3].resolve() / "data"
|
|||
],
|
||||
)
|
||||
def test_catalog(click_runner: CliRunner, catalog_path: Path, expected_exit: int, expected_output: str) -> None:
|
||||
"""
|
||||
Test `anta check catalog -c catalog
|
||||
"""
|
||||
"""Test `anta check catalog -c catalog."""
|
||||
result = click_runner.invoke(anta, ["check", "catalog", "-c", str(DATA_DIR / catalog_path)])
|
||||
assert result.exit_code == expected_exit
|
||||
assert expected_output in result.output
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test anta.cli.debug submodule."""
|
||||
|
|
|
@ -1,30 +1,28 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.debug
|
||||
"""
|
||||
"""Tests for anta.cli.debug."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from click.testing import CliRunner
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from anta.cli import anta
|
||||
from anta.cli.utils import ExitCode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from click.testing import CliRunner
|
||||
|
||||
|
||||
def test_anta_debug(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta debug
|
||||
"""
|
||||
"""Test anta debug."""
|
||||
result = click_runner.invoke(anta, ["debug"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta debug" in result.output
|
||||
|
||||
|
||||
def test_anta_debug_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta debug --help
|
||||
"""
|
||||
"""Test anta debug --help."""
|
||||
result = click_runner.invoke(anta, ["debug", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta debug" in result.output
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.debug.commands
|
||||
"""
|
||||
"""Tests for anta.cli.debug.commands."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Literal
|
||||
|
@ -18,7 +17,7 @@ if TYPE_CHECKING:
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"command, ofmt, version, revision, device, failed",
|
||||
("command", "ofmt", "version", "revision", "device", "failed"),
|
||||
[
|
||||
pytest.param("show version", "json", None, None, "dummy", False, id="json command"),
|
||||
pytest.param("show version", "text", None, None, "dummy", False, id="text command"),
|
||||
|
@ -29,11 +28,15 @@ if TYPE_CHECKING:
|
|||
],
|
||||
)
|
||||
def test_run_cmd(
|
||||
click_runner: CliRunner, command: str, ofmt: Literal["json", "text"], version: Literal["1", "latest"] | None, revision: int | None, device: str, failed: bool
|
||||
click_runner: CliRunner,
|
||||
command: str,
|
||||
ofmt: Literal["json", "text"],
|
||||
version: Literal["1", "latest"] | None,
|
||||
revision: int | None,
|
||||
device: str,
|
||||
failed: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Test `anta debug run-cmd`
|
||||
"""
|
||||
"""Test `anta debug run-cmd`."""
|
||||
# pylint: disable=too-many-arguments
|
||||
cli_args = ["-l", "debug", "debug", "run-cmd", "--command", command, "--device", device]
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test anta.cli.exec submodule."""
|
||||
|
|
|
@ -1,30 +1,28 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.exec
|
||||
"""
|
||||
"""Tests for anta.cli.exec."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from click.testing import CliRunner
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from anta.cli import anta
|
||||
from anta.cli.utils import ExitCode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from click.testing import CliRunner
|
||||
|
||||
|
||||
def test_anta_exec(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta exec
|
||||
"""
|
||||
"""Test anta exec."""
|
||||
result = click_runner.invoke(anta, ["exec"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta exec" in result.output
|
||||
|
||||
|
||||
def test_anta_exec_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta exec --help
|
||||
"""
|
||||
"""Test anta exec --help."""
|
||||
result = click_runner.invoke(anta, ["exec", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta exec" in result.output
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.exec.commands
|
||||
"""
|
||||
"""Tests for anta.cli.exec.commands."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
@ -21,27 +19,21 @@ if TYPE_CHECKING:
|
|||
|
||||
|
||||
def test_clear_counters_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test `anta exec clear-counters --help`
|
||||
"""
|
||||
"""Test `anta exec clear-counters --help`."""
|
||||
result = click_runner.invoke(clear_counters, ["--help"])
|
||||
assert result.exit_code == 0
|
||||
assert "Usage" in result.output
|
||||
|
||||
|
||||
def test_snapshot_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test `anta exec snapshot --help`
|
||||
"""
|
||||
"""Test `anta exec snapshot --help`."""
|
||||
result = click_runner.invoke(snapshot, ["--help"])
|
||||
assert result.exit_code == 0
|
||||
assert "Usage" in result.output
|
||||
|
||||
|
||||
def test_collect_tech_support_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test `anta exec collect-tech-support --help`
|
||||
"""
|
||||
"""Test `anta exec collect-tech-support --help`."""
|
||||
result = click_runner.invoke(collect_tech_support, ["--help"])
|
||||
assert result.exit_code == 0
|
||||
assert "Usage" in result.output
|
||||
|
@ -55,9 +47,7 @@ def test_collect_tech_support_help(click_runner: CliRunner) -> None:
|
|||
],
|
||||
)
|
||||
def test_clear_counters(click_runner: CliRunner, tags: str | None) -> None:
|
||||
"""
|
||||
Test `anta exec clear-counters`
|
||||
"""
|
||||
"""Test `anta exec clear-counters`."""
|
||||
cli_args = ["exec", "clear-counters"]
|
||||
if tags is not None:
|
||||
cli_args.extend(["--tags", tags])
|
||||
|
@ -69,7 +59,7 @@ COMMAND_LIST_PATH_FILE = Path(__file__).parent.parent.parent.parent / "data" / "
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"commands_path, tags",
|
||||
("commands_path", "tags"),
|
||||
[
|
||||
pytest.param(None, None, id="missing command list"),
|
||||
pytest.param(Path("/I/do/not/exist"), None, id="wrong path for command_list"),
|
||||
|
@ -78,9 +68,7 @@ COMMAND_LIST_PATH_FILE = Path(__file__).parent.parent.parent.parent / "data" / "
|
|||
],
|
||||
)
|
||||
def test_snapshot(tmp_path: Path, click_runner: CliRunner, commands_path: Path | None, tags: str | None) -> None:
|
||||
"""
|
||||
Test `anta exec snapshot`
|
||||
"""
|
||||
"""Test `anta exec snapshot`."""
|
||||
cli_args = ["exec", "snapshot", "--output", str(tmp_path)]
|
||||
# Need to mock datetetime
|
||||
if commands_path is not None:
|
||||
|
@ -99,7 +87,7 @@ def test_snapshot(tmp_path: Path, click_runner: CliRunner, commands_path: Path |
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"output, latest, configure, tags",
|
||||
("output", "latest", "configure", "tags"),
|
||||
[
|
||||
pytest.param(None, None, False, None, id="no params"),
|
||||
pytest.param("/tmp/dummy", None, False, None, id="with output"),
|
||||
|
@ -109,9 +97,7 @@ def test_snapshot(tmp_path: Path, click_runner: CliRunner, commands_path: Path |
|
|||
],
|
||||
)
|
||||
def test_collect_tech_support(click_runner: CliRunner, output: str | None, latest: str | None, configure: bool | None, tags: str | None) -> None:
|
||||
"""
|
||||
Test `anta exec collect-tech-support`
|
||||
"""
|
||||
"""Test `anta exec collect-tech-support`."""
|
||||
cli_args = ["exec", "collect-tech-support"]
|
||||
if output is not None:
|
||||
cli_args.extend(["--output", output])
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.exec.utils
|
||||
"""
|
||||
"""Tests for anta.cli.exec.utils."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
@ -12,40 +10,59 @@ from unittest.mock import call, patch
|
|||
|
||||
import pytest
|
||||
|
||||
from anta.cli.exec.utils import clear_counters_utils # , collect_commands, collect_scheduled_show_tech
|
||||
from anta.device import AntaDevice
|
||||
from anta.inventory import AntaInventory
|
||||
from anta.cli.exec.utils import (
|
||||
clear_counters_utils,
|
||||
)
|
||||
from anta.models import AntaCommand
|
||||
|
||||
# , collect_commands, collect_scheduled_show_tech
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pytest import LogCaptureFixture
|
||||
from anta.device import AntaDevice
|
||||
from anta.inventory import AntaInventory
|
||||
|
||||
|
||||
# TODO complete test cases
|
||||
@pytest.mark.asyncio
|
||||
# TODO: complete test cases
|
||||
@pytest.mark.asyncio()
|
||||
@pytest.mark.parametrize(
|
||||
"inventory_state, per_device_command_output, tags",
|
||||
("inventory_state", "per_device_command_output", "tags"),
|
||||
[
|
||||
pytest.param(
|
||||
{"dummy": {"is_online": False}, "dummy2": {"is_online": False}, "dummy3": {"is_online": False}},
|
||||
{
|
||||
"dummy": {"is_online": False},
|
||||
"dummy2": {"is_online": False},
|
||||
"dummy3": {"is_online": False},
|
||||
},
|
||||
{},
|
||||
None,
|
||||
id="no_connected_device",
|
||||
),
|
||||
pytest.param(
|
||||
{"dummy": {"is_online": True, "hw_model": "cEOSLab"}, "dummy2": {"is_online": True, "hw_model": "vEOS-lab"}, "dummy3": {"is_online": False}},
|
||||
{
|
||||
"dummy": {"is_online": True, "hw_model": "cEOSLab"},
|
||||
"dummy2": {"is_online": True, "hw_model": "vEOS-lab"},
|
||||
"dummy3": {"is_online": False},
|
||||
},
|
||||
{},
|
||||
None,
|
||||
id="cEOSLab and vEOS-lab devices",
|
||||
),
|
||||
pytest.param(
|
||||
{"dummy": {"is_online": True}, "dummy2": {"is_online": True}, "dummy3": {"is_online": False}},
|
||||
{
|
||||
"dummy": {"is_online": True},
|
||||
"dummy2": {"is_online": True},
|
||||
"dummy3": {"is_online": False},
|
||||
},
|
||||
{"dummy": None}, # None means the command failed to collect
|
||||
None,
|
||||
id="device with error",
|
||||
),
|
||||
pytest.param(
|
||||
{"dummy": {"is_online": True}, "dummy2": {"is_online": True}, "dummy3": {"is_online": True}},
|
||||
{
|
||||
"dummy": {"is_online": True},
|
||||
"dummy2": {"is_online": True},
|
||||
"dummy3": {"is_online": True},
|
||||
},
|
||||
{},
|
||||
["spine"],
|
||||
id="tags",
|
||||
|
@ -53,42 +70,38 @@ if TYPE_CHECKING:
|
|||
],
|
||||
)
|
||||
async def test_clear_counters_utils(
|
||||
caplog: LogCaptureFixture,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
test_inventory: AntaInventory,
|
||||
inventory_state: dict[str, Any],
|
||||
per_device_command_output: dict[str, Any],
|
||||
tags: list[str] | None,
|
||||
tags: set[str] | None,
|
||||
) -> None:
|
||||
"""
|
||||
Test anta.cli.exec.utils.clear_counters_utils
|
||||
"""
|
||||
"""Test anta.cli.exec.utils.clear_counters_utils."""
|
||||
|
||||
async def mock_connect_inventory() -> None:
|
||||
"""
|
||||
mocking connect_inventory coroutine
|
||||
"""
|
||||
"""Mock connect_inventory coroutine."""
|
||||
for name, device in test_inventory.items():
|
||||
device.is_online = inventory_state[name].get("is_online", True)
|
||||
device.established = inventory_state[name].get("established", device.is_online)
|
||||
device.hw_model = inventory_state[name].get("hw_model", "dummy")
|
||||
|
||||
async def dummy_collect(self: AntaDevice, command: AntaCommand) -> None:
|
||||
"""
|
||||
mocking collect coroutine
|
||||
"""
|
||||
"""Mock collect coroutine."""
|
||||
command.output = per_device_command_output.get(self.name, "")
|
||||
|
||||
# Need to patch the child device class
|
||||
with patch("anta.device.AsyncEOSDevice.collect", side_effect=dummy_collect, autospec=True) as mocked_collect, patch(
|
||||
"anta.inventory.AntaInventory.connect_inventory",
|
||||
side_effect=mock_connect_inventory,
|
||||
) as mocked_connect_inventory:
|
||||
print(mocked_collect)
|
||||
with (
|
||||
patch("anta.device.AsyncEOSDevice.collect", side_effect=dummy_collect, autospec=True) as mocked_collect,
|
||||
patch(
|
||||
"anta.inventory.AntaInventory.connect_inventory",
|
||||
side_effect=mock_connect_inventory,
|
||||
) as mocked_connect_inventory,
|
||||
):
|
||||
mocked_collect.side_effect = dummy_collect
|
||||
await clear_counters_utils(test_inventory, tags=tags)
|
||||
|
||||
mocked_connect_inventory.assert_awaited_once()
|
||||
devices_established = list(test_inventory.get_inventory(established_only=True, tags=tags).values())
|
||||
devices_established = test_inventory.get_inventory(established_only=True, tags=tags).devices
|
||||
if devices_established:
|
||||
# Building the list of calls
|
||||
calls = []
|
||||
|
@ -96,32 +109,28 @@ async def test_clear_counters_utils(
|
|||
calls.append(
|
||||
call(
|
||||
device,
|
||||
**{
|
||||
"command": AntaCommand(
|
||||
command="clear counters",
|
||||
version="latest",
|
||||
revision=None,
|
||||
ofmt="json",
|
||||
output=per_device_command_output.get(device.name, ""),
|
||||
errors=[],
|
||||
)
|
||||
},
|
||||
)
|
||||
command=AntaCommand(
|
||||
command="clear counters",
|
||||
version="latest",
|
||||
revision=None,
|
||||
ofmt="json",
|
||||
output=per_device_command_output.get(device.name, ""),
|
||||
errors=[],
|
||||
),
|
||||
),
|
||||
)
|
||||
if device.hw_model not in ["cEOSLab", "vEOS-lab"]:
|
||||
calls.append(
|
||||
call(
|
||||
device,
|
||||
**{
|
||||
"command": AntaCommand(
|
||||
command="clear hardware counter drop",
|
||||
version="latest",
|
||||
revision=None,
|
||||
ofmt="json",
|
||||
output=per_device_command_output.get(device.name, ""),
|
||||
)
|
||||
},
|
||||
)
|
||||
command=AntaCommand(
|
||||
command="clear hardware counter drop",
|
||||
version="latest",
|
||||
revision=None,
|
||||
ofmt="json",
|
||||
output=per_device_command_output.get(device.name, ""),
|
||||
),
|
||||
),
|
||||
)
|
||||
mocked_collect.assert_has_awaits(calls)
|
||||
# Check error
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test anta.cli.get submodule."""
|
||||
|
|
|
@ -1,30 +1,28 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.get
|
||||
"""
|
||||
"""Tests for anta.cli.get."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from click.testing import CliRunner
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from anta.cli import anta
|
||||
from anta.cli.utils import ExitCode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from click.testing import CliRunner
|
||||
|
||||
|
||||
def test_anta_get(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta get
|
||||
"""
|
||||
"""Test anta get."""
|
||||
result = click_runner.invoke(anta, ["get"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta get" in result.output
|
||||
|
||||
|
||||
def test_anta_get_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta get --help
|
||||
"""
|
||||
"""Test anta get --help."""
|
||||
result = click_runner.invoke(anta, ["get", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta get" in result.output
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.get.commands
|
||||
"""
|
||||
"""Tests for anta.cli.get.commands."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import filecmp
|
||||
|
@ -12,7 +11,6 @@ from typing import TYPE_CHECKING
|
|||
from unittest.mock import ANY, patch
|
||||
|
||||
import pytest
|
||||
from cvprac.cvp_client import CvpClient
|
||||
from cvprac.cvp_client_errors import CvpApiError
|
||||
|
||||
from anta.cli import anta
|
||||
|
@ -20,12 +18,13 @@ from anta.cli.utils import ExitCode
|
|||
|
||||
if TYPE_CHECKING:
|
||||
from click.testing import CliRunner
|
||||
from cvprac.cvp_client import CvpClient
|
||||
|
||||
DATA_DIR: Path = Path(__file__).parents[3].resolve() / "data"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"cvp_container, cvp_connect_failure",
|
||||
("cvp_container", "cvp_connect_failure"),
|
||||
[
|
||||
pytest.param(None, False, id="all devices"),
|
||||
pytest.param("custom_container", False, id="custom container"),
|
||||
|
@ -38,28 +37,46 @@ def test_from_cvp(
|
|||
cvp_container: str | None,
|
||||
cvp_connect_failure: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Test `anta get from-cvp`
|
||||
"""Test `anta get from-cvp`.
|
||||
|
||||
This test verifies that username and password are NOT mandatory to run this command
|
||||
"""
|
||||
output: Path = tmp_path / "output.yml"
|
||||
cli_args = ["get", "from-cvp", "--output", str(output), "--host", "42.42.42.42", "--username", "anta", "--password", "anta"]
|
||||
cli_args = [
|
||||
"get",
|
||||
"from-cvp",
|
||||
"--output",
|
||||
str(output),
|
||||
"--host",
|
||||
"42.42.42.42",
|
||||
"--username",
|
||||
"anta",
|
||||
"--password",
|
||||
"anta",
|
||||
]
|
||||
|
||||
if cvp_container is not None:
|
||||
cli_args.extend(["--container", cvp_container])
|
||||
|
||||
def mock_cvp_connect(self: CvpClient, *args: str, **kwargs: str) -> None:
|
||||
# pylint: disable=unused-argument
|
||||
def mock_cvp_connect(_self: CvpClient, *_args: str, **_kwargs: str) -> None:
|
||||
if cvp_connect_failure:
|
||||
raise CvpApiError(msg="mocked CvpApiError")
|
||||
|
||||
# always get a token
|
||||
with patch("anta.cli.get.commands.get_cv_token", return_value="dummy_token"), patch(
|
||||
"cvprac.cvp_client.CvpClient.connect", autospec=True, side_effect=mock_cvp_connect
|
||||
) as mocked_cvp_connect, patch("cvprac.cvp_client.CvpApi.get_inventory", autospec=True, return_value=[]) as mocked_get_inventory, patch(
|
||||
"cvprac.cvp_client.CvpApi.get_devices_in_container", autospec=True, return_value=[]
|
||||
) as mocked_get_devices_in_container:
|
||||
with (
|
||||
patch("anta.cli.get.commands.get_cv_token", return_value="dummy_token"),
|
||||
patch(
|
||||
"cvprac.cvp_client.CvpClient.connect",
|
||||
autospec=True,
|
||||
side_effect=mock_cvp_connect,
|
||||
) as mocked_cvp_connect,
|
||||
patch("cvprac.cvp_client.CvpApi.get_inventory", autospec=True, return_value=[]) as mocked_get_inventory,
|
||||
patch(
|
||||
"cvprac.cvp_client.CvpApi.get_devices_in_container",
|
||||
autospec=True,
|
||||
return_value=[],
|
||||
) as mocked_get_devices_in_container,
|
||||
):
|
||||
result = click_runner.invoke(anta, cli_args)
|
||||
|
||||
if not cvp_connect_failure:
|
||||
|
@ -79,12 +96,24 @@ def test_from_cvp(
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"ansible_inventory, ansible_group, expected_exit, expected_log",
|
||||
("ansible_inventory", "ansible_group", "expected_exit", "expected_log"),
|
||||
[
|
||||
pytest.param("ansible_inventory.yml", None, ExitCode.OK, None, id="no group"),
|
||||
pytest.param("ansible_inventory.yml", "ATD_LEAFS", ExitCode.OK, None, id="group found"),
|
||||
pytest.param("ansible_inventory.yml", "DUMMY", ExitCode.USAGE_ERROR, "Group DUMMY not found in Ansible inventory", id="group not found"),
|
||||
pytest.param("empty_ansible_inventory.yml", None, ExitCode.USAGE_ERROR, "is empty", id="empty inventory"),
|
||||
pytest.param(
|
||||
"ansible_inventory.yml",
|
||||
"DUMMY",
|
||||
ExitCode.USAGE_ERROR,
|
||||
"Group DUMMY not found in Ansible inventory",
|
||||
id="group not found",
|
||||
),
|
||||
pytest.param(
|
||||
"empty_ansible_inventory.yml",
|
||||
None,
|
||||
ExitCode.USAGE_ERROR,
|
||||
"is empty",
|
||||
id="empty inventory",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_from_ansible(
|
||||
|
@ -95,8 +124,8 @@ def test_from_ansible(
|
|||
expected_exit: int,
|
||||
expected_log: str | None,
|
||||
) -> None:
|
||||
"""
|
||||
Test `anta get from-ansible`
|
||||
# pylint: disable=too-many-arguments
|
||||
"""Test `anta get from-ansible`.
|
||||
|
||||
This test verifies:
|
||||
* the parsing of an ansible-inventory
|
||||
|
@ -107,7 +136,14 @@ def test_from_ansible(
|
|||
output: Path = tmp_path / "output.yml"
|
||||
ansible_inventory_path = DATA_DIR / ansible_inventory
|
||||
# Init cli_args
|
||||
cli_args = ["get", "from-ansible", "--output", str(output), "--ansible-inventory", str(ansible_inventory_path)]
|
||||
cli_args = [
|
||||
"get",
|
||||
"from-ansible",
|
||||
"--output",
|
||||
str(output),
|
||||
"--ansible-inventory",
|
||||
str(ansible_inventory_path),
|
||||
]
|
||||
|
||||
# Set --ansible-group
|
||||
if ansible_group is not None:
|
||||
|
@ -122,14 +158,30 @@ def test_from_ansible(
|
|||
assert expected_log in result.output
|
||||
else:
|
||||
assert output.exists()
|
||||
# TODO check size of generated inventory to validate the group functionality!
|
||||
# TODO: check size of generated inventory to validate the group functionality!
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"env_set, overwrite, is_tty, prompt, expected_exit, expected_log",
|
||||
("env_set", "overwrite", "is_tty", "prompt", "expected_exit", "expected_log"),
|
||||
[
|
||||
pytest.param(True, False, True, "y", ExitCode.OK, "", id="no-overwrite-tty-init-prompt-yes"),
|
||||
pytest.param(True, False, True, "N", ExitCode.INTERNAL_ERROR, "Aborted", id="no-overwrite-tty-init-prompt-no"),
|
||||
pytest.param(
|
||||
True,
|
||||
False,
|
||||
True,
|
||||
"y",
|
||||
ExitCode.OK,
|
||||
"",
|
||||
id="no-overwrite-tty-init-prompt-yes",
|
||||
),
|
||||
pytest.param(
|
||||
True,
|
||||
False,
|
||||
True,
|
||||
"N",
|
||||
ExitCode.INTERNAL_ERROR,
|
||||
"Aborted",
|
||||
id="no-overwrite-tty-init-prompt-no",
|
||||
),
|
||||
pytest.param(
|
||||
True,
|
||||
False,
|
||||
|
@ -159,8 +211,7 @@ def test_from_ansible_overwrite(
|
|||
expected_log: str | None,
|
||||
) -> None:
|
||||
# pylint: disable=too-many-arguments
|
||||
"""
|
||||
Test `anta get from-ansible` overwrite mechanism
|
||||
"""Test `anta get from-ansible` overwrite mechanism.
|
||||
|
||||
The test uses a static ansible-inventory and output as these are tested in other functions
|
||||
|
||||
|
@ -177,7 +228,12 @@ def test_from_ansible_overwrite(
|
|||
ansible_inventory_path = DATA_DIR / "ansible_inventory.yml"
|
||||
expected_anta_inventory_path = DATA_DIR / "expected_anta_inventory.yml"
|
||||
tmp_output = tmp_path / "output.yml"
|
||||
cli_args = ["get", "from-ansible", "--ansible-inventory", str(ansible_inventory_path)]
|
||||
cli_args = [
|
||||
"get",
|
||||
"from-ansible",
|
||||
"--ansible-inventory",
|
||||
str(ansible_inventory_path),
|
||||
]
|
||||
|
||||
if env_set:
|
||||
tmp_inv = Path(str(temp_env["ANTA_INVENTORY"]))
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.get.utils
|
||||
"""
|
||||
"""Tests for anta.cli.get.utils."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import nullcontext
|
||||
from contextlib import AbstractContextManager, nullcontext
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
@ -21,10 +20,8 @@ DATA_DIR: Path = Path(__file__).parents[3].resolve() / "data"
|
|||
|
||||
|
||||
def test_get_cv_token() -> None:
|
||||
"""
|
||||
Test anta.get.utils.get_cv_token
|
||||
"""
|
||||
ip = "42.42.42.42"
|
||||
"""Test anta.get.utils.get_cv_token."""
|
||||
ip_addr = "42.42.42.42"
|
||||
username = "ant"
|
||||
password = "formica"
|
||||
|
||||
|
@ -32,7 +29,7 @@ def test_get_cv_token() -> None:
|
|||
mocked_ret = MagicMock(autospec=requests.Response)
|
||||
mocked_ret.json.return_value = {"sessionId": "simple"}
|
||||
patched_request.return_value = mocked_ret
|
||||
res = get_cv_token(ip, username, password)
|
||||
res = get_cv_token(ip_addr, username, password)
|
||||
patched_request.assert_called_once_with(
|
||||
"POST",
|
||||
"https://42.42.42.42/cvpservice/login/authenticate.do",
|
||||
|
@ -72,9 +69,7 @@ CVP_INVENTORY = [
|
|||
],
|
||||
)
|
||||
def test_create_inventory_from_cvp(tmp_path: Path, inventory: list[dict[str, Any]]) -> None:
|
||||
"""
|
||||
Test anta.get.utils.create_inventory_from_cvp
|
||||
"""
|
||||
"""Test anta.get.utils.create_inventory_from_cvp."""
|
||||
output = tmp_path / "output.yml"
|
||||
|
||||
create_inventory_from_cvp(inventory, output)
|
||||
|
@ -86,19 +81,41 @@ def test_create_inventory_from_cvp(tmp_path: Path, inventory: list[dict[str, Any
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"inventory_filename, ansible_group, expected_raise, expected_inv_length",
|
||||
("inventory_filename", "ansible_group", "expected_raise", "expected_inv_length"),
|
||||
[
|
||||
pytest.param("ansible_inventory.yml", None, nullcontext(), 7, id="no group"),
|
||||
pytest.param("ansible_inventory.yml", "ATD_LEAFS", nullcontext(), 4, id="group found"),
|
||||
pytest.param("ansible_inventory.yml", "DUMMY", pytest.raises(ValueError, match="Group DUMMY not found in Ansible inventory"), 0, id="group not found"),
|
||||
pytest.param("empty_ansible_inventory.yml", None, pytest.raises(ValueError, match="Ansible inventory .* is empty"), 0, id="empty inventory"),
|
||||
pytest.param("wrong_ansible_inventory.yml", None, pytest.raises(ValueError, match="Could not parse"), 0, id="os error inventory"),
|
||||
pytest.param(
|
||||
"ansible_inventory.yml",
|
||||
"DUMMY",
|
||||
pytest.raises(ValueError, match="Group DUMMY not found in Ansible inventory"),
|
||||
0,
|
||||
id="group not found",
|
||||
),
|
||||
pytest.param(
|
||||
"empty_ansible_inventory.yml",
|
||||
None,
|
||||
pytest.raises(ValueError, match="Ansible inventory .* is empty"),
|
||||
0,
|
||||
id="empty inventory",
|
||||
),
|
||||
pytest.param(
|
||||
"wrong_ansible_inventory.yml",
|
||||
None,
|
||||
pytest.raises(ValueError, match="Could not parse"),
|
||||
0,
|
||||
id="os error inventory",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_create_inventory_from_ansible(tmp_path: Path, inventory_filename: Path, ansible_group: str | None, expected_raise: Any, expected_inv_length: int) -> None:
|
||||
"""
|
||||
Test anta.get.utils.create_inventory_from_ansible
|
||||
"""
|
||||
def test_create_inventory_from_ansible(
|
||||
tmp_path: Path,
|
||||
inventory_filename: Path,
|
||||
ansible_group: str | None,
|
||||
expected_raise: AbstractContextManager[Exception],
|
||||
expected_inv_length: int,
|
||||
) -> None:
|
||||
"""Test anta.get.utils.create_inventory_from_ansible."""
|
||||
target_file = tmp_path / "inventory.yml"
|
||||
inventory_file_path = DATA_DIR / inventory_filename
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Test anta.cli.nrfu submodule."""
|
||||
|
|
|
@ -1,33 +1,31 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.nrfu
|
||||
"""
|
||||
"""Tests for anta.cli.nrfu."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from click.testing import CliRunner
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from anta.cli import anta
|
||||
from anta.cli.utils import ExitCode
|
||||
from tests.lib.utils import default_anta_env
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from click.testing import CliRunner
|
||||
|
||||
# TODO: write unit tests for ignore-status and ignore-error
|
||||
|
||||
|
||||
def test_anta_nrfu_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu --help
|
||||
"""
|
||||
"""Test anta nrfu --help."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta nrfu" in result.output
|
||||
|
||||
|
||||
def test_anta_nrfu(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu, catalog is given via env
|
||||
"""
|
||||
"""Test anta nrfu, catalog is given via env."""
|
||||
result = click_runner.invoke(anta, ["nrfu"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "ANTA Inventory contains 3 devices" in result.output
|
||||
|
@ -35,9 +33,7 @@ def test_anta_nrfu(click_runner: CliRunner) -> None:
|
|||
|
||||
|
||||
def test_anta_password_required(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test that password is provided
|
||||
"""
|
||||
"""Test that password is provided."""
|
||||
env = default_anta_env()
|
||||
env["ANTA_PASSWORD"] = None
|
||||
result = click_runner.invoke(anta, ["nrfu"], env=env)
|
||||
|
@ -47,9 +43,7 @@ def test_anta_password_required(click_runner: CliRunner) -> None:
|
|||
|
||||
|
||||
def test_anta_password(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test that password can be provided either via --password or --prompt
|
||||
"""
|
||||
"""Test that password can be provided either via --password or --prompt."""
|
||||
env = default_anta_env()
|
||||
env["ANTA_PASSWORD"] = None
|
||||
result = click_runner.invoke(anta, ["nrfu", "--password", "secret"], env=env)
|
||||
|
@ -59,9 +53,7 @@ def test_anta_password(click_runner: CliRunner) -> None:
|
|||
|
||||
|
||||
def test_anta_enable_password(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test that enable password can be provided either via --enable-password or --prompt
|
||||
"""
|
||||
"""Test that enable password can be provided either via --enable-password or --prompt."""
|
||||
# Both enable and enable-password
|
||||
result = click_runner.invoke(anta, ["nrfu", "--enable", "--enable-password", "secret"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
|
@ -78,7 +70,6 @@ def test_anta_enable_password(click_runner: CliRunner) -> None:
|
|||
assert "Please enter a password to enter EOS privileged EXEC mode" not in result.output
|
||||
assert result.exit_code == ExitCode.OK
|
||||
|
||||
# enable and enable-password and prompt (redundant)
|
||||
result = click_runner.invoke(anta, ["nrfu", "--enable", "--enable-password", "blah", "--prompt"], input="y\npassword\npassword\n")
|
||||
assert "Is a password required to enter EOS privileged EXEC mode? [y/N]:" not in result.output
|
||||
assert "Please enter a password to enter EOS privileged EXEC mode" not in result.output
|
||||
|
@ -91,17 +82,13 @@ def test_anta_enable_password(click_runner: CliRunner) -> None:
|
|||
|
||||
|
||||
def test_anta_enable_alone(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test that enable can be provided either without enable-password
|
||||
"""
|
||||
"""Test that enable can be provided either without enable-password."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "--enable"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
|
||||
|
||||
def test_disable_cache(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test that disable_cache is working on inventory
|
||||
"""
|
||||
"""Test that disable_cache is working on inventory."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "--disable-cache"])
|
||||
stdout_lines = result.stdout.split("\n")
|
||||
# All caches should be disabled from the inventory
|
||||
|
|
|
@ -1,97 +1,82 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.nrfu.commands
|
||||
"""
|
||||
"""Tests for anta.cli.nrfu.commands."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
from click.testing import CliRunner
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from anta.cli import anta
|
||||
from anta.cli.utils import ExitCode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from click.testing import CliRunner
|
||||
|
||||
DATA_DIR: Path = Path(__file__).parent.parent.parent.parent.resolve() / "data"
|
||||
|
||||
|
||||
def test_anta_nrfu_table_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu table --help
|
||||
"""
|
||||
"""Test anta nrfu table --help."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "table", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta nrfu table" in result.output
|
||||
|
||||
|
||||
def test_anta_nrfu_text_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu text --help
|
||||
"""
|
||||
"""Test anta nrfu text --help."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "text", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta nrfu text" in result.output
|
||||
|
||||
|
||||
def test_anta_nrfu_json_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu json --help
|
||||
"""
|
||||
"""Test anta nrfu json --help."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "json", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta nrfu json" in result.output
|
||||
|
||||
|
||||
def test_anta_nrfu_template_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu tpl-report --help
|
||||
"""
|
||||
"""Test anta nrfu tpl-report --help."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "tpl-report", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta nrfu tpl-report" in result.output
|
||||
|
||||
|
||||
def test_anta_nrfu_table(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu, catalog is given via env
|
||||
"""
|
||||
"""Test anta nrfu, catalog is given via env."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "table"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "dummy │ VerifyEOSVersion │ success" in result.output
|
||||
|
||||
|
||||
def test_anta_nrfu_text(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu, catalog is given via env
|
||||
"""
|
||||
"""Test anta nrfu, catalog is given via env."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "text"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "dummy :: VerifyEOSVersion :: SUCCESS" in result.output
|
||||
|
||||
|
||||
def test_anta_nrfu_json(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu, catalog is given via env
|
||||
"""
|
||||
"""Test anta nrfu, catalog is given via env."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "json"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "JSON results of all tests" in result.output
|
||||
m = re.search(r"\[\n {[\s\S]+ }\n\]", result.output)
|
||||
assert m is not None
|
||||
result_list = json.loads(m.group())
|
||||
for r in result_list:
|
||||
if r["name"] == "dummy":
|
||||
assert r["test"] == "VerifyEOSVersion"
|
||||
assert r["result"] == "success"
|
||||
assert "JSON results" in result.output
|
||||
match = re.search(r"\[\n {[\s\S]+ }\n\]", result.output)
|
||||
assert match is not None
|
||||
result_list = json.loads(match.group())
|
||||
for res in result_list:
|
||||
if res["name"] == "dummy":
|
||||
assert res["test"] == "VerifyEOSVersion"
|
||||
assert res["result"] == "success"
|
||||
|
||||
|
||||
def test_anta_nrfu_template(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta nrfu, catalog is given via env
|
||||
"""
|
||||
"""Test anta nrfu, catalog is given via env."""
|
||||
result = click_runner.invoke(anta, ["nrfu", "tpl-report", "--template", str(DATA_DIR / "template.j2")])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "* VerifyEOSVersion is SUCCESS for dummy" in result.output
|
||||
|
|
|
@ -1,58 +1,64 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.cli.__init__
|
||||
"""
|
||||
"""Tests for anta.cli.__init__."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from click.testing import CliRunner
|
||||
from typing import TYPE_CHECKING
|
||||
from unittest.mock import patch
|
||||
|
||||
from anta.cli import anta
|
||||
import pytest
|
||||
|
||||
from anta.cli import anta, cli
|
||||
from anta.cli.utils import ExitCode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from click.testing import CliRunner
|
||||
|
||||
|
||||
def test_anta(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta main entrypoint
|
||||
"""
|
||||
"""Test anta main entrypoint."""
|
||||
result = click_runner.invoke(anta)
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage" in result.output
|
||||
|
||||
|
||||
def test_anta_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta --help
|
||||
"""
|
||||
"""Test anta --help."""
|
||||
result = click_runner.invoke(anta, ["--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage" in result.output
|
||||
|
||||
|
||||
def test_anta_exec_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta exec --help
|
||||
"""
|
||||
"""Test anta exec --help."""
|
||||
result = click_runner.invoke(anta, ["exec", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta exec" in result.output
|
||||
|
||||
|
||||
def test_anta_debug_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta debug --help
|
||||
"""
|
||||
"""Test anta debug --help."""
|
||||
result = click_runner.invoke(anta, ["debug", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta debug" in result.output
|
||||
|
||||
|
||||
def test_anta_get_help(click_runner: CliRunner) -> None:
|
||||
"""
|
||||
Test anta get --help
|
||||
"""
|
||||
"""Test anta get --help."""
|
||||
result = click_runner.invoke(anta, ["get", "--help"])
|
||||
assert result.exit_code == ExitCode.OK
|
||||
assert "Usage: anta get" in result.output
|
||||
|
||||
|
||||
def test_uncaught_failure_anta(caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test uncaught failure when running ANTA cli."""
|
||||
with (
|
||||
pytest.raises(SystemExit) as e_info,
|
||||
patch("anta.cli.anta", side_effect=ZeroDivisionError()),
|
||||
):
|
||||
cli()
|
||||
assert "CRITICAL" in caplog.text
|
||||
assert "Uncaught Exception when running ANTA CLI" in caplog.text
|
||||
assert e_info.value.code == 1
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Tests for inventory submodule."""
|
||||
|
|
|
@ -2,23 +2,25 @@
|
|||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""ANTA Inventory unit tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from pydantic import ValidationError
|
||||
|
||||
from anta.inventory import AntaInventory
|
||||
from anta.inventory.exceptions import InventoryIncorrectSchema, InventoryRootKeyError
|
||||
from anta.inventory.exceptions import InventoryIncorrectSchemaError, InventoryRootKeyError
|
||||
from tests.data.json_data import ANTA_INVENTORY_TESTS_INVALID, ANTA_INVENTORY_TESTS_VALID
|
||||
from tests.lib.utils import generate_test_ids_dict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
class Test_AntaInventory:
|
||||
|
||||
class TestAntaInventory:
|
||||
"""Test AntaInventory class."""
|
||||
|
||||
def create_inventory(self, content: str, tmp_path: Path) -> str:
|
||||
|
@ -31,7 +33,7 @@ class Test_AntaInventory:
|
|||
|
||||
def check_parameter(self, parameter: str, test_definition: dict[Any, Any]) -> bool:
|
||||
"""Check if parameter is configured in testbed."""
|
||||
return "parameters" in test_definition and parameter in test_definition["parameters"].keys()
|
||||
return "parameters" in test_definition and parameter in test_definition["parameters"]
|
||||
|
||||
@pytest.mark.parametrize("test_definition", ANTA_INVENTORY_TESTS_VALID, ids=generate_test_ids_dict)
|
||||
def test_init_valid(self, test_definition: dict[str, Any], tmp_path: Path) -> None:
|
||||
|
@ -55,8 +57,7 @@ class Test_AntaInventory:
|
|||
try:
|
||||
AntaInventory.parse(filename=inventory_file, username="arista", password="arista123")
|
||||
except ValidationError as exc:
|
||||
logging.error("Exceptions is: %s", str(exc))
|
||||
assert False
|
||||
raise AssertionError from exc
|
||||
|
||||
@pytest.mark.parametrize("test_definition", ANTA_INVENTORY_TESTS_INVALID, ids=generate_test_ids_dict)
|
||||
def test_init_invalid(self, test_definition: dict[str, Any], tmp_path: Path) -> None:
|
||||
|
@ -77,5 +78,5 @@ class Test_AntaInventory:
|
|||
|
||||
"""
|
||||
inventory_file = self.create_inventory(content=test_definition["input"], tmp_path=tmp_path)
|
||||
with pytest.raises((InventoryIncorrectSchema, InventoryRootKeyError, ValidationError)):
|
||||
with pytest.raises((InventoryIncorrectSchemaError, InventoryRootKeyError, ValidationError)):
|
||||
AntaInventory.parse(filename=inventory_file, username="arista", password="arista123")
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""ANTA Inventory models unit tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
@ -30,7 +31,7 @@ from tests.data.json_data import (
|
|||
from tests.lib.utils import generate_test_ids_dict
|
||||
|
||||
|
||||
class Test_InventoryUnitModels:
|
||||
class TestInventoryUnitModels:
|
||||
"""Test components of AntaInventoryInput model."""
|
||||
|
||||
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_HOST_VALID, ids=generate_test_ids_dict)
|
||||
|
@ -51,9 +52,8 @@ class Test_InventoryUnitModels:
|
|||
host_inventory = AntaInventoryHost(host=test_definition["input"])
|
||||
except ValidationError as exc:
|
||||
logging.warning("Error: %s", str(exc))
|
||||
assert False
|
||||
else:
|
||||
assert test_definition["input"] == str(host_inventory.host)
|
||||
raise AssertionError from exc
|
||||
assert test_definition["input"] == str(host_inventory.host)
|
||||
|
||||
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_HOST_INVALID, ids=generate_test_ids_dict)
|
||||
def test_anta_inventory_host_invalid(self, test_definition: dict[str, Any]) -> None:
|
||||
|
@ -110,9 +110,8 @@ class Test_InventoryUnitModels:
|
|||
network_inventory = AntaInventoryNetwork(network=test_definition["input"])
|
||||
except ValidationError as exc:
|
||||
logging.warning("Error: %s", str(exc))
|
||||
assert False
|
||||
else:
|
||||
assert test_definition["input"] == str(network_inventory.network)
|
||||
raise AssertionError from exc
|
||||
assert test_definition["input"] == str(network_inventory.network)
|
||||
|
||||
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_NETWORK_INVALID, ids=generate_test_ids_dict)
|
||||
def test_anta_inventory_network_invalid(self, test_definition: dict[str, Any]) -> None:
|
||||
|
@ -133,11 +132,11 @@ class Test_InventoryUnitModels:
|
|||
except ValidationError as exc:
|
||||
logging.warning("Error: %s", str(exc))
|
||||
else:
|
||||
assert False
|
||||
raise AssertionError
|
||||
|
||||
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_NETWORK_CACHE, ids=generate_test_ids_dict)
|
||||
def test_anta_inventory_network_cache(self, test_definition: dict[str, Any]) -> None:
|
||||
"""Test network disable_cache
|
||||
"""Test network disable_cache.
|
||||
|
||||
Test structure:
|
||||
---------------
|
||||
|
@ -176,10 +175,9 @@ class Test_InventoryUnitModels:
|
|||
)
|
||||
except ValidationError as exc:
|
||||
logging.warning("Error: %s", str(exc))
|
||||
assert False
|
||||
else:
|
||||
assert test_definition["input"]["start"] == str(range_inventory.start)
|
||||
assert test_definition["input"]["end"] == str(range_inventory.end)
|
||||
raise AssertionError from exc
|
||||
assert test_definition["input"]["start"] == str(range_inventory.start)
|
||||
assert test_definition["input"]["end"] == str(range_inventory.end)
|
||||
|
||||
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_RANGE_INVALID, ids=generate_test_ids_dict)
|
||||
def test_anta_inventory_range_invalid(self, test_definition: dict[str, Any]) -> None:
|
||||
|
@ -203,11 +201,11 @@ class Test_InventoryUnitModels:
|
|||
except ValidationError as exc:
|
||||
logging.warning("Error: %s", str(exc))
|
||||
else:
|
||||
assert False
|
||||
raise AssertionError
|
||||
|
||||
@pytest.mark.parametrize("test_definition", INVENTORY_MODEL_RANGE_CACHE, ids=generate_test_ids_dict)
|
||||
def test_anta_inventory_range_cache(self, test_definition: dict[str, Any]) -> None:
|
||||
"""Test range disable_cache
|
||||
"""Test range disable_cache.
|
||||
|
||||
Test structure:
|
||||
---------------
|
||||
|
@ -221,22 +219,23 @@ class Test_InventoryUnitModels:
|
|||
"""
|
||||
if "disable_cache" in test_definition["input"]:
|
||||
range_inventory = AntaInventoryRange(
|
||||
start=test_definition["input"]["start"], end=test_definition["input"]["end"], disable_cache=test_definition["input"]["disable_cache"]
|
||||
start=test_definition["input"]["start"],
|
||||
end=test_definition["input"]["end"],
|
||||
disable_cache=test_definition["input"]["disable_cache"],
|
||||
)
|
||||
else:
|
||||
range_inventory = AntaInventoryRange(start=test_definition["input"]["start"], end=test_definition["input"]["end"])
|
||||
assert test_definition["expected_result"] == range_inventory.disable_cache
|
||||
|
||||
|
||||
class Test_AntaInventoryInputModel:
|
||||
class TestAntaInventoryInputModel:
|
||||
"""Unit test of AntaInventoryInput model."""
|
||||
|
||||
def test_inventory_input_structure(self) -> None:
|
||||
"""Test inventory keys are those expected."""
|
||||
|
||||
inventory = AntaInventoryInput()
|
||||
logging.info("Inventory keys are: %s", str(inventory.model_dump().keys()))
|
||||
assert all(elem in inventory.model_dump().keys() for elem in ["hosts", "networks", "ranges"])
|
||||
assert all(elem in inventory.model_dump() for elem in ["hosts", "networks", "ranges"])
|
||||
|
||||
@pytest.mark.parametrize("inventory_def", INVENTORY_MODEL_VALID, ids=generate_test_ids_dict)
|
||||
def test_anta_inventory_intput_valid(self, inventory_def: dict[str, Any]) -> None:
|
||||
|
@ -265,10 +264,9 @@ class Test_AntaInventoryInputModel:
|
|||
inventory = AntaInventoryInput(**inventory_def["input"])
|
||||
except ValidationError as exc:
|
||||
logging.warning("Error: %s", str(exc))
|
||||
assert False
|
||||
else:
|
||||
logging.info("Checking if all root keys are correctly lodaded")
|
||||
assert all(elem in inventory.model_dump().keys() for elem in inventory_def["input"].keys())
|
||||
raise AssertionError from exc
|
||||
logging.info("Checking if all root keys are correctly lodaded")
|
||||
assert all(elem in inventory.model_dump() for elem in inventory_def["input"])
|
||||
|
||||
@pytest.mark.parametrize("inventory_def", INVENTORY_MODEL_INVALID, ids=generate_test_ids_dict)
|
||||
def test_anta_inventory_intput_invalid(self, inventory_def: dict[str, Any]) -> None:
|
||||
|
@ -294,19 +292,19 @@ class Test_AntaInventoryInputModel:
|
|||
|
||||
"""
|
||||
try:
|
||||
if "hosts" in inventory_def["input"].keys():
|
||||
if "hosts" in inventory_def["input"]:
|
||||
logging.info(
|
||||
"Loading %s into AntaInventoryInput hosts section",
|
||||
str(inventory_def["input"]["hosts"]),
|
||||
)
|
||||
AntaInventoryInput(hosts=inventory_def["input"]["hosts"])
|
||||
if "networks" in inventory_def["input"].keys():
|
||||
if "networks" in inventory_def["input"]:
|
||||
logging.info(
|
||||
"Loading %s into AntaInventoryInput networks section",
|
||||
str(inventory_def["input"]["networks"]),
|
||||
)
|
||||
AntaInventoryInput(networks=inventory_def["input"]["networks"])
|
||||
if "ranges" in inventory_def["input"].keys():
|
||||
if "ranges" in inventory_def["input"]:
|
||||
logging.info(
|
||||
"Loading %s into AntaInventoryInput ranges section",
|
||||
str(inventory_def["input"]["ranges"]),
|
||||
|
@ -315,10 +313,10 @@ class Test_AntaInventoryInputModel:
|
|||
except ValidationError as exc:
|
||||
logging.warning("Error: %s", str(exc))
|
||||
else:
|
||||
assert False
|
||||
raise AssertionError
|
||||
|
||||
|
||||
class Test_InventoryDeviceModel:
|
||||
class TestInventoryDeviceModel:
|
||||
"""Unit test of InventoryDevice model."""
|
||||
|
||||
@pytest.mark.parametrize("test_definition", INVENTORY_DEVICE_MODEL_VALID, ids=generate_test_ids_dict)
|
||||
|
@ -349,12 +347,12 @@ class Test_InventoryDeviceModel:
|
|||
if test_definition["expected_result"] == "invalid":
|
||||
pytest.skip("Not concerned by the test")
|
||||
|
||||
for entity in test_definition["input"]:
|
||||
try:
|
||||
try:
|
||||
for entity in test_definition["input"]:
|
||||
AsyncEOSDevice(**entity)
|
||||
except TypeError as exc:
|
||||
logging.warning("Error: %s", str(exc))
|
||||
assert False
|
||||
except TypeError as exc:
|
||||
logging.warning("Error: %s", str(exc))
|
||||
raise AssertionError from exc
|
||||
|
||||
@pytest.mark.parametrize("test_definition", INVENTORY_DEVICE_MODEL_INVALID, ids=generate_test_ids_dict)
|
||||
def test_inventory_device_invalid(self, test_definition: dict[str, Any]) -> None:
|
||||
|
@ -384,10 +382,10 @@ class Test_InventoryDeviceModel:
|
|||
if test_definition["expected_result"] == "valid":
|
||||
pytest.skip("Not concerned by the test")
|
||||
|
||||
for entity in test_definition["input"]:
|
||||
try:
|
||||
try:
|
||||
for entity in test_definition["input"]:
|
||||
AsyncEOSDevice(**entity)
|
||||
except TypeError as exc:
|
||||
logging.info("Error: %s", str(exc))
|
||||
else:
|
||||
assert False
|
||||
except TypeError as exc:
|
||||
logging.info("Error: %s", str(exc))
|
||||
else:
|
||||
raise AssertionError
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Tests for anta.reporter submodule."""
|
||||
|
|
|
@ -1,44 +1,51 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Test anta.report.__init__.py
|
||||
"""
|
||||
"""Test anta.report.__init__.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Callable
|
||||
from typing import TYPE_CHECKING, Callable
|
||||
|
||||
import pytest
|
||||
from rich.table import Table
|
||||
|
||||
from anta import RICH_COLOR_PALETTE
|
||||
from anta.custom_types import TestStatus
|
||||
from anta.reporter import ReportTable
|
||||
from anta.result_manager import ResultManager
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from anta.custom_types import TestStatus
|
||||
from anta.result_manager import ResultManager
|
||||
|
||||
|
||||
class Test_ReportTable:
|
||||
"""
|
||||
Test ReportTable class
|
||||
"""
|
||||
class TestReportTable:
|
||||
"""Test ReportTable class."""
|
||||
|
||||
# not testing __init__ as nothing is going on there
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"usr_list, delimiter, expected_output",
|
||||
("usr_list", "delimiter", "expected_output"),
|
||||
[
|
||||
pytest.param([], None, "", id="empty list no delimiter"),
|
||||
pytest.param([], "*", "", id="empty list with delimiter"),
|
||||
pytest.param(["elem1"], None, "elem1", id="one elem list no delimiter"),
|
||||
pytest.param(["elem1"], "*", "* elem1", id="one elem list with delimiter"),
|
||||
pytest.param(["elem1", "elem2"], None, "elem1\nelem2", id="two elems list no delimiter"),
|
||||
pytest.param(["elem1", "elem2"], "&", "& elem1\n& elem2", id="two elems list with delimiter"),
|
||||
pytest.param(
|
||||
["elem1", "elem2"],
|
||||
None,
|
||||
"elem1\nelem2",
|
||||
id="two elems list no delimiter",
|
||||
),
|
||||
pytest.param(
|
||||
["elem1", "elem2"],
|
||||
"&",
|
||||
"& elem1\n& elem2",
|
||||
id="two elems list with delimiter",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test__split_list_to_txt_list(self, usr_list: list[str], delimiter: str | None, expected_output: str) -> None:
|
||||
"""
|
||||
test _split_list_to_txt_list
|
||||
"""
|
||||
"""Test _split_list_to_txt_list."""
|
||||
# pylint: disable=protected-access
|
||||
report = ReportTable()
|
||||
assert report._split_list_to_txt_list(usr_list, delimiter) == expected_output
|
||||
|
@ -52,9 +59,7 @@ class Test_ReportTable:
|
|||
],
|
||||
)
|
||||
def test__build_headers(self, headers: list[str]) -> None:
|
||||
"""
|
||||
test _build_headers
|
||||
"""
|
||||
"""Test _build_headers."""
|
||||
# pylint: disable=protected-access
|
||||
report = ReportTable()
|
||||
table = Table()
|
||||
|
@ -65,7 +70,7 @@ class Test_ReportTable:
|
|||
assert table.columns[table_column_before].style == RICH_COLOR_PALETTE.HEADER
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"status, expected_status",
|
||||
("status", "expected_status"),
|
||||
[
|
||||
pytest.param("unknown", "unknown", id="unknown status"),
|
||||
pytest.param("unset", "[grey74]unset", id="unset status"),
|
||||
|
@ -76,48 +81,42 @@ class Test_ReportTable:
|
|||
],
|
||||
)
|
||||
def test__color_result(self, status: TestStatus, expected_status: str) -> None:
|
||||
"""
|
||||
test _build_headers
|
||||
"""
|
||||
"""Test _build_headers."""
|
||||
# pylint: disable=protected-access
|
||||
report = ReportTable()
|
||||
assert report._color_result(status) == expected_status
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"host, testcase, title, number_of_tests, expected_length",
|
||||
("title", "number_of_tests", "expected_length"),
|
||||
[
|
||||
pytest.param(None, None, None, 5, 5, id="all results"),
|
||||
pytest.param("host1", None, None, 5, 0, id="result for host1 when no host1 test"),
|
||||
pytest.param(None, "VerifyTest3", None, 5, 1, id="result for test VerifyTest3"),
|
||||
pytest.param(None, None, "Custom title", 5, 5, id="Change table title"),
|
||||
pytest.param(None, 5, 5, id="all results"),
|
||||
pytest.param(None, 0, 0, id="result for host1 when no host1 test"),
|
||||
pytest.param(None, 5, 5, id="result for test VerifyTest3"),
|
||||
pytest.param("Custom title", 5, 5, id="Change table title"),
|
||||
],
|
||||
)
|
||||
def test_report_all(
|
||||
self,
|
||||
result_manager_factory: Callable[[int], ResultManager],
|
||||
host: str | None,
|
||||
testcase: str | None,
|
||||
title: str | None,
|
||||
number_of_tests: int,
|
||||
expected_length: int,
|
||||
) -> None:
|
||||
"""
|
||||
test report_all
|
||||
"""
|
||||
"""Test report_all."""
|
||||
# pylint: disable=too-many-arguments
|
||||
rm = result_manager_factory(number_of_tests)
|
||||
manager = result_manager_factory(number_of_tests)
|
||||
|
||||
report = ReportTable()
|
||||
kwargs = {"host": host, "testcase": testcase, "title": title}
|
||||
kwargs = {"title": title}
|
||||
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||
res = report.report_all(rm, **kwargs) # type: ignore[arg-type]
|
||||
res = report.report_all(manager, **kwargs) # type: ignore[arg-type]
|
||||
|
||||
assert isinstance(res, Table)
|
||||
assert res.title == (title or "All tests results")
|
||||
assert res.row_count == expected_length
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"testcase, title, number_of_tests, expected_length",
|
||||
("test", "title", "number_of_tests", "expected_length"),
|
||||
[
|
||||
pytest.param(None, None, 5, 5, id="all results"),
|
||||
pytest.param("VerifyTest3", None, 5, 1, id="result for test VerifyTest3"),
|
||||
|
@ -127,67 +126,62 @@ class Test_ReportTable:
|
|||
def test_report_summary_tests(
|
||||
self,
|
||||
result_manager_factory: Callable[[int], ResultManager],
|
||||
testcase: str | None,
|
||||
test: str | None,
|
||||
title: str | None,
|
||||
number_of_tests: int,
|
||||
expected_length: int,
|
||||
) -> None:
|
||||
"""
|
||||
test report_summary_tests
|
||||
"""
|
||||
"""Test report_summary_tests."""
|
||||
# pylint: disable=too-many-arguments
|
||||
# TODO refactor this later... this is injecting double test results by modyfing the device name
|
||||
# TODO: refactor this later... this is injecting double test results by modyfing the device name
|
||||
# should be a fixture
|
||||
rm = result_manager_factory(number_of_tests)
|
||||
new_results = [result.model_copy() for result in rm.get_results()]
|
||||
manager = result_manager_factory(number_of_tests)
|
||||
new_results = [result.model_copy() for result in manager.results]
|
||||
for result in new_results:
|
||||
result.name = "test_device"
|
||||
result.result = "failure"
|
||||
rm.add_test_results(new_results)
|
||||
|
||||
report = ReportTable()
|
||||
kwargs = {"testcase": testcase, "title": title}
|
||||
kwargs = {"tests": [test] if test is not None else None, "title": title}
|
||||
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||
res = report.report_summary_tests(rm, **kwargs) # type: ignore[arg-type]
|
||||
res = report.report_summary_tests(manager, **kwargs) # type: ignore[arg-type]
|
||||
|
||||
assert isinstance(res, Table)
|
||||
assert res.title == (title or "Summary per test case")
|
||||
assert res.title == (title or "Summary per test")
|
||||
assert res.row_count == expected_length
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"host, title, number_of_tests, expected_length",
|
||||
("dev", "title", "number_of_tests", "expected_length"),
|
||||
[
|
||||
pytest.param(None, None, 5, 2, id="all results"),
|
||||
pytest.param("host1", None, 5, 1, id="result for host host1"),
|
||||
pytest.param(None, "Custom title", 5, 2, id="Change table title"),
|
||||
pytest.param(None, None, 5, 1, id="all results"),
|
||||
pytest.param("device1", None, 5, 1, id="result for host host1"),
|
||||
pytest.param(None, "Custom title", 5, 1, id="Change table title"),
|
||||
],
|
||||
)
|
||||
def test_report_summary_hosts(
|
||||
def test_report_summary_devices(
|
||||
self,
|
||||
result_manager_factory: Callable[[int], ResultManager],
|
||||
host: str | None,
|
||||
dev: str | None,
|
||||
title: str | None,
|
||||
number_of_tests: int,
|
||||
expected_length: int,
|
||||
) -> None:
|
||||
"""
|
||||
test report_summary_hosts
|
||||
"""
|
||||
"""Test report_summary_devices."""
|
||||
# pylint: disable=too-many-arguments
|
||||
# TODO refactor this later... this is injecting double test results by modyfing the device name
|
||||
# TODO: refactor this later... this is injecting double test results by modyfing the device name
|
||||
# should be a fixture
|
||||
rm = result_manager_factory(number_of_tests)
|
||||
new_results = [result.model_copy() for result in rm.get_results()]
|
||||
manager = result_manager_factory(number_of_tests)
|
||||
new_results = [result.model_copy() for result in manager.results]
|
||||
for result in new_results:
|
||||
result.name = host or "test_device"
|
||||
result.name = dev or "test_device"
|
||||
result.result = "failure"
|
||||
rm.add_test_results(new_results)
|
||||
manager.results = new_results
|
||||
|
||||
report = ReportTable()
|
||||
kwargs = {"host": host, "title": title}
|
||||
kwargs = {"devices": [dev] if dev is not None else None, "title": title}
|
||||
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||
res = report.report_summary_hosts(rm, **kwargs) # type: ignore[arg-type]
|
||||
res = report.report_summary_devices(manager, **kwargs) # type: ignore[arg-type]
|
||||
|
||||
assert isinstance(res, Table)
|
||||
assert res.title == (title or "Summary per host")
|
||||
assert res.title == (title or "Summary per device")
|
||||
assert res.row_count == expected_length
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Tests for anta.result_manager submodule."""
|
||||
|
|
|
@ -1,190 +1,64 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Test anta.result_manager.__init__.py
|
||||
"""
|
||||
"""Test anta.result_manager.__init__.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from contextlib import nullcontext
|
||||
from typing import TYPE_CHECKING, Any, Callable
|
||||
from contextlib import AbstractContextManager, nullcontext
|
||||
from typing import TYPE_CHECKING, Callable
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.custom_types import TestStatus
|
||||
from anta.result_manager import ResultManager
|
||||
from anta.result_manager import ResultManager, models
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from anta.custom_types import TestStatus
|
||||
from anta.result_manager.models import TestResult
|
||||
|
||||
|
||||
class Test_ResultManager:
|
||||
"""
|
||||
Test ResultManager class
|
||||
"""
|
||||
class TestResultManager:
|
||||
"""Test ResultManager class."""
|
||||
|
||||
# not testing __init__ as nothing is going on there
|
||||
|
||||
def test__len__(self, list_result_factory: Callable[[int], list[TestResult]]) -> None:
|
||||
"""
|
||||
test __len__
|
||||
"""
|
||||
"""Test __len__."""
|
||||
list_result = list_result_factory(3)
|
||||
result_manager = ResultManager()
|
||||
assert len(result_manager) == 0
|
||||
for i in range(3):
|
||||
result_manager.add_test_result(list_result[i])
|
||||
result_manager.add(list_result[i])
|
||||
assert len(result_manager) == i + 1
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"starting_status, test_status, expected_status, expected_raise",
|
||||
[
|
||||
pytest.param("unset", "unset", "unset", nullcontext(), id="unset->unset"),
|
||||
pytest.param("unset", "success", "success", nullcontext(), id="unset->success"),
|
||||
pytest.param("unset", "error", "unset", nullcontext(), id="set error"),
|
||||
pytest.param("skipped", "skipped", "skipped", nullcontext(), id="skipped->skipped"),
|
||||
pytest.param("skipped", "unset", "skipped", nullcontext(), id="skipped, add unset"),
|
||||
pytest.param("skipped", "success", "success", nullcontext(), id="skipped, add success"),
|
||||
pytest.param("skipped", "failure", "failure", nullcontext(), id="skipped, add failure"),
|
||||
pytest.param("success", "unset", "success", nullcontext(), id="success, add unset"),
|
||||
pytest.param("success", "skipped", "success", nullcontext(), id="success, add skipped"),
|
||||
pytest.param("success", "success", "success", nullcontext(), id="success->success"),
|
||||
pytest.param("success", "failure", "failure", nullcontext(), id="success->failure"),
|
||||
pytest.param("failure", "unset", "failure", nullcontext(), id="failure->failure"),
|
||||
pytest.param("failure", "skipped", "failure", nullcontext(), id="failure, add unset"),
|
||||
pytest.param("failure", "success", "failure", nullcontext(), id="failure, add skipped"),
|
||||
pytest.param("failure", "failure", "failure", nullcontext(), id="failure, add success"),
|
||||
pytest.param("unset", "unknown", None, pytest.raises(ValueError), id="wrong status"),
|
||||
],
|
||||
)
|
||||
def test__update_status(self, starting_status: TestStatus, test_status: TestStatus, expected_status: str, expected_raise: Any) -> None:
|
||||
"""
|
||||
Test ResultManager._update_status
|
||||
"""
|
||||
result_manager = ResultManager()
|
||||
result_manager.status = starting_status
|
||||
assert result_manager.error_status is False
|
||||
def test_results_getter(self, result_manager_factory: Callable[[int], ResultManager]) -> None:
|
||||
"""Test ResultManager.results property getter."""
|
||||
result_manager = result_manager_factory(3)
|
||||
res = result_manager.results
|
||||
assert len(res) == 3
|
||||
assert isinstance(res, list)
|
||||
for e in res:
|
||||
assert isinstance(e, models.TestResult)
|
||||
|
||||
with expected_raise:
|
||||
result_manager._update_status(test_status) # pylint: disable=protected-access
|
||||
if test_status == "error":
|
||||
assert result_manager.error_status is True
|
||||
else:
|
||||
assert result_manager.status == expected_status
|
||||
|
||||
def test_add_test_result(self, test_result_factory: Callable[[int], TestResult]) -> None:
|
||||
"""
|
||||
Test ResultManager.add_test_result
|
||||
"""
|
||||
result_manager = ResultManager()
|
||||
assert result_manager.status == "unset"
|
||||
assert result_manager.error_status is False
|
||||
assert len(result_manager) == 0
|
||||
|
||||
# Add one unset test
|
||||
unset_test = test_result_factory(0)
|
||||
unset_test.result = "unset"
|
||||
result_manager.add_test_result(unset_test)
|
||||
assert result_manager.status == "unset"
|
||||
assert result_manager.error_status is False
|
||||
assert len(result_manager) == 1
|
||||
|
||||
# Add one success test
|
||||
success_test = test_result_factory(1)
|
||||
success_test.result = "success"
|
||||
result_manager.add_test_result(success_test)
|
||||
assert result_manager.status == "success"
|
||||
assert result_manager.error_status is False
|
||||
assert len(result_manager) == 2
|
||||
|
||||
# Add one error test
|
||||
error_test = test_result_factory(1)
|
||||
error_test.result = "error"
|
||||
result_manager.add_test_result(error_test)
|
||||
assert result_manager.status == "success"
|
||||
assert result_manager.error_status is True
|
||||
def test_results_setter(self, list_result_factory: Callable[[int], list[TestResult]], result_manager_factory: Callable[[int], ResultManager]) -> None:
|
||||
"""Test ResultManager.results property setter."""
|
||||
result_manager = result_manager_factory(3)
|
||||
assert len(result_manager) == 3
|
||||
|
||||
# Add one failure test
|
||||
failure_test = test_result_factory(1)
|
||||
failure_test.result = "failure"
|
||||
result_manager.add_test_result(failure_test)
|
||||
assert result_manager.status == "failure"
|
||||
assert result_manager.error_status is True
|
||||
assert len(result_manager) == 4
|
||||
|
||||
def test_add_test_results(self, list_result_factory: Callable[[int], list[TestResult]]) -> None:
|
||||
"""
|
||||
Test ResultManager.add_test_results
|
||||
"""
|
||||
result_manager = ResultManager()
|
||||
assert result_manager.status == "unset"
|
||||
assert result_manager.error_status is False
|
||||
assert len(result_manager) == 0
|
||||
|
||||
# Add three success tests
|
||||
success_list = list_result_factory(3)
|
||||
for test in success_list:
|
||||
test.result = "success"
|
||||
result_manager.add_test_results(success_list)
|
||||
assert result_manager.status == "success"
|
||||
assert result_manager.error_status is False
|
||||
assert len(result_manager) == 3
|
||||
|
||||
# Add one error test and one failure
|
||||
error_failure_list = list_result_factory(2)
|
||||
error_failure_list[0].result = "error"
|
||||
error_failure_list[1].result = "failure"
|
||||
result_manager.add_test_results(error_failure_list)
|
||||
assert result_manager.status == "failure"
|
||||
assert result_manager.error_status is True
|
||||
tests = list_result_factory(5)
|
||||
result_manager.results = tests
|
||||
assert len(result_manager) == 5
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"status, error_status, ignore_error, expected_status",
|
||||
[
|
||||
pytest.param("success", False, True, "success", id="no error"),
|
||||
pytest.param("success", True, True, "success", id="error, ignore error"),
|
||||
pytest.param("success", True, False, "error", id="error, do not ignore error"),
|
||||
],
|
||||
)
|
||||
def test_get_status(self, status: TestStatus, error_status: bool, ignore_error: bool, expected_status: str) -> None:
|
||||
"""
|
||||
test ResultManager.get_status
|
||||
"""
|
||||
result_manager = ResultManager()
|
||||
result_manager.status = status
|
||||
result_manager.error_status = error_status
|
||||
|
||||
assert result_manager.get_status(ignore_error=ignore_error) == expected_status
|
||||
|
||||
def test_get_results(self, list_result_factory: Callable[[int], list[TestResult]]) -> None:
|
||||
"""
|
||||
test ResultManager.get_results
|
||||
"""
|
||||
def test_json(self, list_result_factory: Callable[[int], list[TestResult]]) -> None:
|
||||
"""Test ResultManager.json property."""
|
||||
result_manager = ResultManager()
|
||||
|
||||
success_list = list_result_factory(3)
|
||||
for test in success_list:
|
||||
test.result = "success"
|
||||
result_manager.add_test_results(success_list)
|
||||
result_manager.results = success_list
|
||||
|
||||
res = result_manager.get_results()
|
||||
assert isinstance(res, list)
|
||||
|
||||
def test_get_json_results(self, list_result_factory: Callable[[int], list[TestResult]]) -> None:
|
||||
"""
|
||||
test ResultManager.get_json_results
|
||||
"""
|
||||
result_manager = ResultManager()
|
||||
|
||||
success_list = list_result_factory(3)
|
||||
for test in success_list:
|
||||
test.result = "success"
|
||||
result_manager.add_test_results(success_list)
|
||||
|
||||
json_res = result_manager.get_json_results()
|
||||
json_res = result_manager.json
|
||||
assert isinstance(json_res, str)
|
||||
|
||||
# Verifies it can be deserialized back to a list of dict with the correct values types
|
||||
|
@ -197,8 +71,207 @@ class Test_ResultManager:
|
|||
assert test.get("custom_field") is None
|
||||
assert test.get("result") == "success"
|
||||
|
||||
# TODO
|
||||
# get_result_by_test
|
||||
# get_result_by_host
|
||||
# get_testcases
|
||||
# get_hosts
|
||||
@pytest.mark.parametrize(
|
||||
("starting_status", "test_status", "expected_status", "expected_raise"),
|
||||
[
|
||||
pytest.param("unset", "unset", "unset", nullcontext(), id="unset->unset"),
|
||||
pytest.param("unset", "success", "success", nullcontext(), id="unset->success"),
|
||||
pytest.param("unset", "error", "unset", nullcontext(), id="set error"),
|
||||
pytest.param("skipped", "skipped", "skipped", nullcontext(), id="skipped->skipped"),
|
||||
pytest.param("skipped", "unset", "skipped", nullcontext(), id="skipped, add unset"),
|
||||
pytest.param(
|
||||
"skipped",
|
||||
"success",
|
||||
"success",
|
||||
nullcontext(),
|
||||
id="skipped, add success",
|
||||
),
|
||||
pytest.param(
|
||||
"skipped",
|
||||
"failure",
|
||||
"failure",
|
||||
nullcontext(),
|
||||
id="skipped, add failure",
|
||||
),
|
||||
pytest.param("success", "unset", "success", nullcontext(), id="success, add unset"),
|
||||
pytest.param(
|
||||
"success",
|
||||
"skipped",
|
||||
"success",
|
||||
nullcontext(),
|
||||
id="success, add skipped",
|
||||
),
|
||||
pytest.param("success", "success", "success", nullcontext(), id="success->success"),
|
||||
pytest.param("success", "failure", "failure", nullcontext(), id="success->failure"),
|
||||
pytest.param("failure", "unset", "failure", nullcontext(), id="failure->failure"),
|
||||
pytest.param("failure", "skipped", "failure", nullcontext(), id="failure, add unset"),
|
||||
pytest.param(
|
||||
"failure",
|
||||
"success",
|
||||
"failure",
|
||||
nullcontext(),
|
||||
id="failure, add skipped",
|
||||
),
|
||||
pytest.param(
|
||||
"failure",
|
||||
"failure",
|
||||
"failure",
|
||||
nullcontext(),
|
||||
id="failure, add success",
|
||||
),
|
||||
pytest.param(
|
||||
"unset", "unknown", None, pytest.raises(ValueError, match="Input should be 'unset', 'success', 'failure', 'error' or 'skipped'"), id="wrong status"
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_add(
|
||||
self,
|
||||
test_result_factory: Callable[[], TestResult],
|
||||
starting_status: TestStatus,
|
||||
test_status: TestStatus,
|
||||
expected_status: str,
|
||||
expected_raise: AbstractContextManager[Exception],
|
||||
) -> None:
|
||||
# pylint: disable=too-many-arguments
|
||||
"""Test ResultManager_update_status."""
|
||||
result_manager = ResultManager()
|
||||
result_manager.status = starting_status
|
||||
assert result_manager.error_status is False
|
||||
assert len(result_manager) == 0
|
||||
|
||||
test = test_result_factory()
|
||||
test.result = test_status
|
||||
with expected_raise:
|
||||
result_manager.add(test)
|
||||
if test_status == "error":
|
||||
assert result_manager.error_status is True
|
||||
else:
|
||||
assert result_manager.status == expected_status
|
||||
assert len(result_manager) == 1
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("status", "error_status", "ignore_error", "expected_status"),
|
||||
[
|
||||
pytest.param("success", False, True, "success", id="no error"),
|
||||
pytest.param("success", True, True, "success", id="error, ignore error"),
|
||||
pytest.param("success", True, False, "error", id="error, do not ignore error"),
|
||||
],
|
||||
)
|
||||
def test_get_status(
|
||||
self,
|
||||
status: TestStatus,
|
||||
error_status: bool,
|
||||
ignore_error: bool,
|
||||
expected_status: str,
|
||||
) -> None:
|
||||
"""Test ResultManager.get_status."""
|
||||
result_manager = ResultManager()
|
||||
result_manager.status = status
|
||||
result_manager.error_status = error_status
|
||||
|
||||
assert result_manager.get_status(ignore_error=ignore_error) == expected_status
|
||||
|
||||
def test_filter(self, test_result_factory: Callable[[], TestResult], list_result_factory: Callable[[int], list[TestResult]]) -> None:
|
||||
"""Test ResultManager.filter."""
|
||||
result_manager = ResultManager()
|
||||
|
||||
success_list = list_result_factory(3)
|
||||
for test in success_list:
|
||||
test.result = "success"
|
||||
result_manager.results = success_list
|
||||
|
||||
test = test_result_factory()
|
||||
test.result = "failure"
|
||||
result_manager.add(test)
|
||||
|
||||
test = test_result_factory()
|
||||
test.result = "error"
|
||||
result_manager.add(test)
|
||||
|
||||
test = test_result_factory()
|
||||
test.result = "skipped"
|
||||
result_manager.add(test)
|
||||
|
||||
assert len(result_manager) == 6
|
||||
assert len(result_manager.filter({"failure"})) == 5
|
||||
assert len(result_manager.filter({"error"})) == 5
|
||||
assert len(result_manager.filter({"skipped"})) == 5
|
||||
assert len(result_manager.filter({"failure", "error"})) == 4
|
||||
assert len(result_manager.filter({"failure", "error", "skipped"})) == 3
|
||||
assert len(result_manager.filter({"success", "failure", "error", "skipped"})) == 0
|
||||
|
||||
def test_get_by_tests(self, test_result_factory: Callable[[], TestResult], result_manager_factory: Callable[[int], ResultManager]) -> None:
|
||||
"""Test ResultManager.get_by_tests."""
|
||||
result_manager = result_manager_factory(3)
|
||||
|
||||
test = test_result_factory()
|
||||
test.test = "Test1"
|
||||
result_manager.add(test)
|
||||
|
||||
test = test_result_factory()
|
||||
test.test = "Test2"
|
||||
result_manager.add(test)
|
||||
|
||||
test = test_result_factory()
|
||||
test.test = "Test2"
|
||||
result_manager.add(test)
|
||||
|
||||
assert len(result_manager) == 6
|
||||
assert len(result_manager.filter_by_tests({"Test1"})) == 1
|
||||
rm = result_manager.filter_by_tests({"Test1", "Test2"})
|
||||
assert len(rm) == 3
|
||||
assert len(rm.filter_by_tests({"Test1"})) == 1
|
||||
|
||||
def test_get_by_devices(self, test_result_factory: Callable[[], TestResult], result_manager_factory: Callable[[int], ResultManager]) -> None:
|
||||
"""Test ResultManager.get_by_devices."""
|
||||
result_manager = result_manager_factory(3)
|
||||
|
||||
test = test_result_factory()
|
||||
test.name = "Device1"
|
||||
result_manager.add(test)
|
||||
|
||||
test = test_result_factory()
|
||||
test.name = "Device2"
|
||||
result_manager.add(test)
|
||||
|
||||
test = test_result_factory()
|
||||
test.name = "Device2"
|
||||
result_manager.add(test)
|
||||
|
||||
assert len(result_manager) == 6
|
||||
assert len(result_manager.filter_by_devices({"Device1"})) == 1
|
||||
rm = result_manager.filter_by_devices({"Device1", "Device2"})
|
||||
assert len(rm) == 3
|
||||
assert len(rm.filter_by_devices({"Device1"})) == 1
|
||||
|
||||
def test_get_tests(self, test_result_factory: Callable[[], TestResult], list_result_factory: Callable[[int], list[TestResult]]) -> None:
|
||||
"""Test ResultManager.get_tests."""
|
||||
result_manager = ResultManager()
|
||||
|
||||
tests = list_result_factory(3)
|
||||
for test in tests:
|
||||
test.test = "Test1"
|
||||
result_manager.results = tests
|
||||
|
||||
test = test_result_factory()
|
||||
test.test = "Test2"
|
||||
result_manager.add(test)
|
||||
|
||||
assert len(result_manager.get_tests()) == 2
|
||||
assert all(t in result_manager.get_tests() for t in ["Test1", "Test2"])
|
||||
|
||||
def test_get_devices(self, test_result_factory: Callable[[], TestResult], list_result_factory: Callable[[int], list[TestResult]]) -> None:
|
||||
"""Test ResultManager.get_tests."""
|
||||
result_manager = ResultManager()
|
||||
|
||||
tests = list_result_factory(3)
|
||||
for test in tests:
|
||||
test.name = "Device1"
|
||||
result_manager.results = tests
|
||||
|
||||
test = test_result_factory()
|
||||
test.name = "Device2"
|
||||
result_manager.add(test)
|
||||
|
||||
assert len(result_manager.get_devices()) == 2
|
||||
assert all(t in result_manager.get_devices() for t in ["Device1", "Device2"])
|
||||
|
|
|
@ -2,18 +2,21 @@
|
|||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""ANTA Result Manager models unit tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Callable
|
||||
from typing import TYPE_CHECKING, Any, Callable
|
||||
|
||||
import pytest
|
||||
|
||||
# Import as Result to avoid pytest collection
|
||||
from anta.result_manager.models import TestResult as Result
|
||||
from tests.data.json_data import TEST_RESULT_SET_STATUS
|
||||
from tests.lib.fixture import DEVICE_NAME
|
||||
from tests.lib.utils import generate_test_ids_dict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from anta.result_manager.models import TestResult as Result
|
||||
|
||||
|
||||
class TestTestResultModels:
|
||||
"""Test components of anta.result_manager.models."""
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
test anta.device.py
|
||||
"""
|
||||
"""test anta.device.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
@ -51,14 +50,21 @@ INIT_CATALOG_DATA: list[dict[str, Any]] = [
|
|||
VerifyUptime,
|
||||
VerifyUptime.Input(
|
||||
minimum=10,
|
||||
filters=VerifyUptime.Input.Filters(tags=["fabric"]),
|
||||
filters=VerifyUptime.Input.Filters(tags={"fabric"}),
|
||||
),
|
||||
),
|
||||
(
|
||||
VerifyUptime,
|
||||
VerifyUptime.Input(
|
||||
minimum=9,
|
||||
filters=VerifyUptime.Input.Filters(tags={"leaf"}),
|
||||
),
|
||||
),
|
||||
(VerifyReloadCause, {"filters": {"tags": ["leaf", "spine"]}}),
|
||||
(VerifyCoredump, VerifyCoredump.Input()),
|
||||
(VerifyAgentLogs, AntaTest.Input()),
|
||||
(VerifyCPUUtilization, VerifyCPUUtilization.Input(filters=VerifyCPUUtilization.Input.Filters(tags=["leaf"]))),
|
||||
(VerifyMemoryUtilization, VerifyMemoryUtilization.Input(filters=VerifyMemoryUtilization.Input.Filters(tags=["testdevice"]))),
|
||||
(VerifyCPUUtilization, VerifyCPUUtilization.Input(filters=VerifyCPUUtilization.Input.Filters(tags={"leaf"}))),
|
||||
(VerifyMemoryUtilization, VerifyMemoryUtilization.Input(filters=VerifyMemoryUtilization.Input.Filters(tags={"testdevice"}))),
|
||||
(VerifyFileSystemUtilization, None),
|
||||
(VerifyNTP, {}),
|
||||
(VerifyMlagStatus, None),
|
||||
|
@ -146,12 +152,12 @@ CATALOG_FROM_LIST_FAIL_DATA: list[dict[str, Any]] = [
|
|||
{
|
||||
"name": "no_input_when_required",
|
||||
"tests": [(FakeTestWithInput, None)],
|
||||
"error": "Field required",
|
||||
"error": "FakeTestWithInput test inputs are not valid: 1 validation error for Input\n\tstring\n\t Field required",
|
||||
},
|
||||
{
|
||||
"name": "wrong_input_type",
|
||||
"tests": [(FakeTestWithInput, True)],
|
||||
"error": "Value error, Coud not instantiate inputs as type bool is not valid",
|
||||
"tests": [(FakeTestWithInput, {"string": True})],
|
||||
"error": "FakeTestWithInput test inputs are not valid: 1 validation error for Input\n\tstring\n\t Input should be a valid string",
|
||||
},
|
||||
]
|
||||
|
||||
|
@ -169,64 +175,52 @@ TESTS_SETTER_FAIL_DATA: list[dict[str, Any]] = [
|
|||
]
|
||||
|
||||
|
||||
class Test_AntaCatalog:
|
||||
"""
|
||||
Test for anta.catalog.AntaCatalog
|
||||
"""
|
||||
class TestAntaCatalog:
|
||||
"""Test for anta.catalog.AntaCatalog."""
|
||||
|
||||
@pytest.mark.parametrize("catalog_data", INIT_CATALOG_DATA, ids=generate_test_ids_list(INIT_CATALOG_DATA))
|
||||
def test_parse(self, catalog_data: dict[str, Any]) -> None:
|
||||
"""
|
||||
Instantiate AntaCatalog from a file
|
||||
"""
|
||||
"""Instantiate AntaCatalog from a file."""
|
||||
catalog: AntaCatalog = AntaCatalog.parse(str(DATA_DIR / catalog_data["filename"]))
|
||||
|
||||
assert len(catalog.tests) == len(catalog_data["tests"])
|
||||
for test_id, (test, inputs) in enumerate(catalog_data["tests"]):
|
||||
for test_id, (test, inputs_data) in enumerate(catalog_data["tests"]):
|
||||
assert catalog.tests[test_id].test == test
|
||||
if inputs is not None:
|
||||
if isinstance(inputs, dict):
|
||||
inputs = test.Input(**inputs)
|
||||
if inputs_data is not None:
|
||||
inputs = test.Input(**inputs_data) if isinstance(inputs_data, dict) else inputs_data
|
||||
assert inputs == catalog.tests[test_id].inputs
|
||||
|
||||
@pytest.mark.parametrize("catalog_data", INIT_CATALOG_DATA, ids=generate_test_ids_list(INIT_CATALOG_DATA))
|
||||
def test_from_list(self, catalog_data: dict[str, Any]) -> None:
|
||||
"""
|
||||
Instantiate AntaCatalog from a list
|
||||
"""
|
||||
"""Instantiate AntaCatalog from a list."""
|
||||
catalog: AntaCatalog = AntaCatalog.from_list(catalog_data["tests"])
|
||||
|
||||
assert len(catalog.tests) == len(catalog_data["tests"])
|
||||
for test_id, (test, inputs) in enumerate(catalog_data["tests"]):
|
||||
for test_id, (test, inputs_data) in enumerate(catalog_data["tests"]):
|
||||
assert catalog.tests[test_id].test == test
|
||||
if inputs is not None:
|
||||
if isinstance(inputs, dict):
|
||||
inputs = test.Input(**inputs)
|
||||
if inputs_data is not None:
|
||||
inputs = test.Input(**inputs_data) if isinstance(inputs_data, dict) else inputs_data
|
||||
assert inputs == catalog.tests[test_id].inputs
|
||||
|
||||
@pytest.mark.parametrize("catalog_data", INIT_CATALOG_DATA, ids=generate_test_ids_list(INIT_CATALOG_DATA))
|
||||
def test_from_dict(self, catalog_data: dict[str, Any]) -> None:
|
||||
"""
|
||||
Instantiate AntaCatalog from a dict
|
||||
"""
|
||||
with open(file=str(DATA_DIR / catalog_data["filename"]), mode="r", encoding="UTF-8") as file:
|
||||
"""Instantiate AntaCatalog from a dict."""
|
||||
file = DATA_DIR / catalog_data["filename"]
|
||||
with file.open(encoding="UTF-8") as file:
|
||||
data = safe_load(file)
|
||||
catalog: AntaCatalog = AntaCatalog.from_dict(data)
|
||||
|
||||
assert len(catalog.tests) == len(catalog_data["tests"])
|
||||
for test_id, (test, inputs) in enumerate(catalog_data["tests"]):
|
||||
for test_id, (test, inputs_data) in enumerate(catalog_data["tests"]):
|
||||
assert catalog.tests[test_id].test == test
|
||||
if inputs is not None:
|
||||
if isinstance(inputs, dict):
|
||||
inputs = test.Input(**inputs)
|
||||
if inputs_data is not None:
|
||||
inputs = test.Input(**inputs_data) if isinstance(inputs_data, dict) else inputs_data
|
||||
assert inputs == catalog.tests[test_id].inputs
|
||||
|
||||
@pytest.mark.parametrize("catalog_data", CATALOG_PARSE_FAIL_DATA, ids=generate_test_ids_list(CATALOG_PARSE_FAIL_DATA))
|
||||
def test_parse_fail(self, catalog_data: dict[str, Any]) -> None:
|
||||
"""
|
||||
Errors when instantiating AntaCatalog from a file
|
||||
"""
|
||||
with pytest.raises((ValidationError, ValueError)) as exec_info:
|
||||
"""Errors when instantiating AntaCatalog from a file."""
|
||||
with pytest.raises((ValidationError, TypeError)) as exec_info:
|
||||
AntaCatalog.parse(str(DATA_DIR / catalog_data["filename"]))
|
||||
if isinstance(exec_info.value, ValidationError):
|
||||
assert catalog_data["error"] in exec_info.value.errors()[0]["msg"]
|
||||
|
@ -234,34 +228,29 @@ class Test_AntaCatalog:
|
|||
assert catalog_data["error"] in str(exec_info)
|
||||
|
||||
def test_parse_fail_parsing(self, caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""
|
||||
Errors when instantiating AntaCatalog from a file
|
||||
"""
|
||||
with pytest.raises(Exception) as exec_info:
|
||||
"""Errors when instantiating AntaCatalog from a file."""
|
||||
with pytest.raises(FileNotFoundError) as exec_info:
|
||||
AntaCatalog.parse(str(DATA_DIR / "catalog_does_not_exist.yml"))
|
||||
assert "No such file or directory" in str(exec_info)
|
||||
assert len(caplog.record_tuples) >= 1
|
||||
_, _, message = caplog.record_tuples[0]
|
||||
assert "Unable to parse ANTA Test Catalog file" in message
|
||||
assert "FileNotFoundError ([Errno 2] No such file or directory" in message
|
||||
assert "FileNotFoundError: [Errno 2] No such file or directory" in message
|
||||
|
||||
@pytest.mark.parametrize("catalog_data", CATALOG_FROM_LIST_FAIL_DATA, ids=generate_test_ids_list(CATALOG_FROM_LIST_FAIL_DATA))
|
||||
def test_from_list_fail(self, catalog_data: dict[str, Any]) -> None:
|
||||
"""
|
||||
Errors when instantiating AntaCatalog from a list of tuples
|
||||
"""
|
||||
"""Errors when instantiating AntaCatalog from a list of tuples."""
|
||||
with pytest.raises(ValidationError) as exec_info:
|
||||
AntaCatalog.from_list(catalog_data["tests"])
|
||||
assert catalog_data["error"] in exec_info.value.errors()[0]["msg"]
|
||||
|
||||
@pytest.mark.parametrize("catalog_data", CATALOG_FROM_DICT_FAIL_DATA, ids=generate_test_ids_list(CATALOG_FROM_DICT_FAIL_DATA))
|
||||
def test_from_dict_fail(self, catalog_data: dict[str, Any]) -> None:
|
||||
"""
|
||||
Errors when instantiating AntaCatalog from a list of tuples
|
||||
"""
|
||||
with open(file=str(DATA_DIR / catalog_data["filename"]), mode="r", encoding="UTF-8") as file:
|
||||
"""Errors when instantiating AntaCatalog from a list of tuples."""
|
||||
file = DATA_DIR / catalog_data["filename"]
|
||||
with file.open(encoding="UTF-8") as file:
|
||||
data = safe_load(file)
|
||||
with pytest.raises((ValidationError, ValueError)) as exec_info:
|
||||
with pytest.raises((ValidationError, TypeError)) as exec_info:
|
||||
AntaCatalog.from_dict(data)
|
||||
if isinstance(exec_info.value, ValidationError):
|
||||
assert catalog_data["error"] in exec_info.value.errors()[0]["msg"]
|
||||
|
@ -269,9 +258,7 @@ class Test_AntaCatalog:
|
|||
assert catalog_data["error"] in str(exec_info)
|
||||
|
||||
def test_filename(self) -> None:
|
||||
"""
|
||||
Test filename
|
||||
"""
|
||||
"""Test filename."""
|
||||
catalog = AntaCatalog(filename="test")
|
||||
assert catalog.filename == Path("test")
|
||||
catalog = AntaCatalog(filename=Path("test"))
|
||||
|
@ -279,33 +266,34 @@ class Test_AntaCatalog:
|
|||
|
||||
@pytest.mark.parametrize("catalog_data", INIT_CATALOG_DATA, ids=generate_test_ids_list(INIT_CATALOG_DATA))
|
||||
def test__tests_setter_success(self, catalog_data: dict[str, Any]) -> None:
|
||||
"""
|
||||
Success when setting AntaCatalog.tests from a list of tuples
|
||||
"""
|
||||
"""Success when setting AntaCatalog.tests from a list of tuples."""
|
||||
catalog = AntaCatalog()
|
||||
catalog.tests = [AntaTestDefinition(test=test, inputs=inputs) for test, inputs in catalog_data["tests"]]
|
||||
assert len(catalog.tests) == len(catalog_data["tests"])
|
||||
for test_id, (test, inputs) in enumerate(catalog_data["tests"]):
|
||||
for test_id, (test, inputs_data) in enumerate(catalog_data["tests"]):
|
||||
assert catalog.tests[test_id].test == test
|
||||
if inputs is not None:
|
||||
if isinstance(inputs, dict):
|
||||
inputs = test.Input(**inputs)
|
||||
if inputs_data is not None:
|
||||
inputs = test.Input(**inputs_data) if isinstance(inputs_data, dict) else inputs_data
|
||||
assert inputs == catalog.tests[test_id].inputs
|
||||
|
||||
@pytest.mark.parametrize("catalog_data", TESTS_SETTER_FAIL_DATA, ids=generate_test_ids_list(TESTS_SETTER_FAIL_DATA))
|
||||
def test__tests_setter_fail(self, catalog_data: dict[str, Any]) -> None:
|
||||
"""
|
||||
Errors when setting AntaCatalog.tests from a list of tuples
|
||||
"""
|
||||
"""Errors when setting AntaCatalog.tests from a list of tuples."""
|
||||
catalog = AntaCatalog()
|
||||
with pytest.raises(ValueError) as exec_info:
|
||||
with pytest.raises(TypeError) as exec_info:
|
||||
catalog.tests = catalog_data["tests"]
|
||||
assert catalog_data["error"] in str(exec_info)
|
||||
|
||||
def test_get_tests_by_tags(self) -> None:
|
||||
"""
|
||||
Test AntaCatalog.test_get_tests_by_tags()
|
||||
"""
|
||||
"""Test AntaCatalog.get_tests_by_tags()."""
|
||||
catalog: AntaCatalog = AntaCatalog.parse(str(DATA_DIR / "test_catalog_with_tags.yml"))
|
||||
tests: list[AntaTestDefinition] = catalog.get_tests_by_tags(tags=["leaf"])
|
||||
tests: list[AntaTestDefinition] = catalog.get_tests_by_tags(tags={"leaf"})
|
||||
assert len(tests) == 3
|
||||
tests = catalog.get_tests_by_tags(tags={"leaf"}, strict=True)
|
||||
assert len(tests) == 2
|
||||
|
||||
def test_get_tests_by_names(self) -> None:
|
||||
"""Test AntaCatalog.get_tests_by_tags()."""
|
||||
catalog: AntaCatalog = AntaCatalog.parse(str(DATA_DIR / "test_catalog_with_tags.yml"))
|
||||
tests: list[AntaTestDefinition] = catalog.get_tests_by_names(names={"VerifyUptime", "VerifyCoredump"})
|
||||
assert len(tests) == 3
|
||||
|
|
|
@ -1,20 +1,17 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
test anta.device.py
|
||||
"""
|
||||
"""test anta.device.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from unittest.mock import patch
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
from _pytest.mark.structures import ParameterSet
|
||||
from asyncssh import SSHClientConnection, SSHClientConnectionOptions
|
||||
from rich import print as rprint
|
||||
|
||||
|
@ -24,6 +21,9 @@ from anta.models import AntaCommand
|
|||
from tests.lib.fixture import COMMAND_OUTPUT
|
||||
from tests.lib.utils import generate_test_ids_list
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _pytest.mark.structures import ParameterSet
|
||||
|
||||
INIT_DATA: list[dict[str, Any]] = [
|
||||
{
|
||||
"name": "no name, no port",
|
||||
|
@ -155,8 +155,8 @@ AIOEAPI_COLLECT_DATA: list[dict[str, Any]] = [
|
|||
"memTotal": 8099732,
|
||||
"memFree": 4989568,
|
||||
"isIntlVersion": False,
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
"expected": {
|
||||
|
@ -211,7 +211,7 @@ AIOEAPI_COLLECT_DATA: list[dict[str, Any]] = [
|
|||
"memFree": 4989568,
|
||||
"isIntlVersion": False,
|
||||
},
|
||||
]
|
||||
],
|
||||
},
|
||||
},
|
||||
"expected": {
|
||||
|
@ -266,7 +266,7 @@ AIOEAPI_COLLECT_DATA: list[dict[str, Any]] = [
|
|||
"memFree": 4989568,
|
||||
"isIntlVersion": False,
|
||||
},
|
||||
]
|
||||
],
|
||||
},
|
||||
},
|
||||
"expected": {
|
||||
|
@ -322,7 +322,7 @@ AIOEAPI_COLLECT_DATA: list[dict[str, Any]] = [
|
|||
"memFree": 4989568,
|
||||
"isIntlVersion": False,
|
||||
},
|
||||
]
|
||||
],
|
||||
},
|
||||
},
|
||||
"expected": {
|
||||
|
@ -356,8 +356,12 @@ AIOEAPI_COLLECT_DATA: list[dict[str, Any]] = [
|
|||
"command": "show version",
|
||||
"patch_kwargs": {
|
||||
"side_effect": aioeapi.EapiCommandError(
|
||||
passed=[], failed="show version", errors=["Authorization denied for command 'show version'"], errmsg="Invalid command", not_exec=[]
|
||||
)
|
||||
passed=[],
|
||||
failed="show version",
|
||||
errors=["Authorization denied for command 'show version'"],
|
||||
errmsg="Invalid command",
|
||||
not_exec=[],
|
||||
),
|
||||
},
|
||||
},
|
||||
"expected": {"output": None, "errors": ["Authorization denied for command 'show version'"]},
|
||||
|
@ -369,7 +373,7 @@ AIOEAPI_COLLECT_DATA: list[dict[str, Any]] = [
|
|||
"command": "show version",
|
||||
"patch_kwargs": {"side_effect": httpx.HTTPError(message="404")},
|
||||
},
|
||||
"expected": {"output": None, "errors": ["404"]},
|
||||
"expected": {"output": None, "errors": ["HTTPError: 404"]},
|
||||
},
|
||||
{
|
||||
"name": "httpx.ConnectError",
|
||||
|
@ -378,7 +382,7 @@ AIOEAPI_COLLECT_DATA: list[dict[str, Any]] = [
|
|||
"command": "show version",
|
||||
"patch_kwargs": {"side_effect": httpx.ConnectError(message="Cannot open port")},
|
||||
},
|
||||
"expected": {"output": None, "errors": ["Cannot open port"]},
|
||||
"expected": {"output": None, "errors": ["ConnectError: Cannot open port"]},
|
||||
},
|
||||
]
|
||||
AIOEAPI_COPY_DATA: list[dict[str, Any]] = [
|
||||
|
@ -387,7 +391,7 @@ AIOEAPI_COPY_DATA: list[dict[str, Any]] = [
|
|||
"device": {},
|
||||
"copy": {
|
||||
"sources": [Path("/mnt/flash"), Path("/var/log/agents")],
|
||||
"destination": Path("."),
|
||||
"destination": Path(),
|
||||
"direction": "from",
|
||||
},
|
||||
},
|
||||
|
@ -396,7 +400,7 @@ AIOEAPI_COPY_DATA: list[dict[str, Any]] = [
|
|||
"device": {},
|
||||
"copy": {
|
||||
"sources": [Path("/mnt/flash"), Path("/var/log/agents")],
|
||||
"destination": Path("."),
|
||||
"destination": Path(),
|
||||
"direction": "to",
|
||||
},
|
||||
},
|
||||
|
@ -405,7 +409,7 @@ AIOEAPI_COPY_DATA: list[dict[str, Any]] = [
|
|||
"device": {},
|
||||
"copy": {
|
||||
"sources": [Path("/mnt/flash"), Path("/var/log/agents")],
|
||||
"destination": Path("."),
|
||||
"destination": Path(),
|
||||
"direction": "wrong",
|
||||
},
|
||||
},
|
||||
|
@ -417,26 +421,28 @@ REFRESH_DATA: list[dict[str, Any]] = [
|
|||
"patch_kwargs": (
|
||||
{"return_value": True},
|
||||
{
|
||||
"return_value": {
|
||||
"mfgName": "Arista",
|
||||
"modelName": "DCS-7280CR3-32P4-F",
|
||||
"hardwareRevision": "11.00",
|
||||
"serialNumber": "JPE19500066",
|
||||
"systemMacAddress": "fc:bd:67:3d:13:c5",
|
||||
"hwMacAddress": "fc:bd:67:3d:13:c5",
|
||||
"configMacAddress": "00:00:00:00:00:00",
|
||||
"version": "4.31.1F-34361447.fraserrel (engineering build)",
|
||||
"architecture": "x86_64",
|
||||
"internalVersion": "4.31.1F-34361447.fraserrel",
|
||||
"internalBuildId": "4940d112-a2fc-4970-8b5a-a16cd03fd08c",
|
||||
"imageFormatVersion": "3.0",
|
||||
"imageOptimization": "Default",
|
||||
"bootupTimestamp": 1700729434.5892005,
|
||||
"uptime": 20666.78,
|
||||
"memTotal": 8099732,
|
||||
"memFree": 4989568,
|
||||
"isIntlVersion": False,
|
||||
}
|
||||
"return_value": [
|
||||
{
|
||||
"mfgName": "Arista",
|
||||
"modelName": "DCS-7280CR3-32P4-F",
|
||||
"hardwareRevision": "11.00",
|
||||
"serialNumber": "JPE19500066",
|
||||
"systemMacAddress": "fc:bd:67:3d:13:c5",
|
||||
"hwMacAddress": "fc:bd:67:3d:13:c5",
|
||||
"configMacAddress": "00:00:00:00:00:00",
|
||||
"version": "4.31.1F-34361447.fraserrel (engineering build)",
|
||||
"architecture": "x86_64",
|
||||
"internalVersion": "4.31.1F-34361447.fraserrel",
|
||||
"internalBuildId": "4940d112-a2fc-4970-8b5a-a16cd03fd08c",
|
||||
"imageFormatVersion": "3.0",
|
||||
"imageOptimization": "Default",
|
||||
"bootupTimestamp": 1700729434.5892005,
|
||||
"uptime": 20666.78,
|
||||
"memTotal": 8099732,
|
||||
"memFree": 4989568,
|
||||
"isIntlVersion": False,
|
||||
}
|
||||
],
|
||||
},
|
||||
),
|
||||
"expected": {"is_online": True, "established": True, "hw_model": "DCS-7280CR3-32P4-F"},
|
||||
|
@ -466,7 +472,7 @@ REFRESH_DATA: list[dict[str, Any]] = [
|
|||
"memTotal": 8099732,
|
||||
"memFree": 4989568,
|
||||
"isIntlVersion": False,
|
||||
}
|
||||
},
|
||||
},
|
||||
),
|
||||
"expected": {"is_online": False, "established": False, "hw_model": None},
|
||||
|
@ -477,25 +483,27 @@ REFRESH_DATA: list[dict[str, Any]] = [
|
|||
"patch_kwargs": (
|
||||
{"return_value": True},
|
||||
{
|
||||
"return_value": {
|
||||
"mfgName": "Arista",
|
||||
"hardwareRevision": "11.00",
|
||||
"serialNumber": "JPE19500066",
|
||||
"systemMacAddress": "fc:bd:67:3d:13:c5",
|
||||
"hwMacAddress": "fc:bd:67:3d:13:c5",
|
||||
"configMacAddress": "00:00:00:00:00:00",
|
||||
"version": "4.31.1F-34361447.fraserrel (engineering build)",
|
||||
"architecture": "x86_64",
|
||||
"internalVersion": "4.31.1F-34361447.fraserrel",
|
||||
"internalBuildId": "4940d112-a2fc-4970-8b5a-a16cd03fd08c",
|
||||
"imageFormatVersion": "3.0",
|
||||
"imageOptimization": "Default",
|
||||
"bootupTimestamp": 1700729434.5892005,
|
||||
"uptime": 20666.78,
|
||||
"memTotal": 8099732,
|
||||
"memFree": 4989568,
|
||||
"isIntlVersion": False,
|
||||
}
|
||||
"return_value": [
|
||||
{
|
||||
"mfgName": "Arista",
|
||||
"hardwareRevision": "11.00",
|
||||
"serialNumber": "JPE19500066",
|
||||
"systemMacAddress": "fc:bd:67:3d:13:c5",
|
||||
"hwMacAddress": "fc:bd:67:3d:13:c5",
|
||||
"configMacAddress": "00:00:00:00:00:00",
|
||||
"version": "4.31.1F-34361447.fraserrel (engineering build)",
|
||||
"architecture": "x86_64",
|
||||
"internalVersion": "4.31.1F-34361447.fraserrel",
|
||||
"internalBuildId": "4940d112-a2fc-4970-8b5a-a16cd03fd08c",
|
||||
"imageFormatVersion": "3.0",
|
||||
"imageOptimization": "Default",
|
||||
"bootupTimestamp": 1700729434.5892005,
|
||||
"uptime": 20666.78,
|
||||
"memTotal": 8099732,
|
||||
"memFree": 4989568,
|
||||
"isIntlVersion": False,
|
||||
}
|
||||
],
|
||||
},
|
||||
),
|
||||
"expected": {"is_online": True, "established": False, "hw_model": None},
|
||||
|
@ -507,8 +515,12 @@ REFRESH_DATA: list[dict[str, Any]] = [
|
|||
{"return_value": True},
|
||||
{
|
||||
"side_effect": aioeapi.EapiCommandError(
|
||||
passed=[], failed="show version", errors=["Authorization denied for command 'show version'"], errmsg="Invalid command", not_exec=[]
|
||||
)
|
||||
passed=[],
|
||||
failed="show version",
|
||||
errors=["Authorization denied for command 'show version'"],
|
||||
errmsg="Invalid command",
|
||||
not_exec=[],
|
||||
),
|
||||
},
|
||||
),
|
||||
"expected": {"is_online": True, "established": False, "hw_model": None},
|
||||
|
@ -599,21 +611,17 @@ CACHE_STATS_DATA: list[ParameterSet] = [
|
|||
|
||||
|
||||
class TestAntaDevice:
|
||||
"""
|
||||
Test for anta.device.AntaDevice Abstract class
|
||||
"""
|
||||
"""Test for anta.device.AntaDevice Abstract class."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.asyncio()
|
||||
@pytest.mark.parametrize(
|
||||
"device, command_data, expected_data",
|
||||
map(lambda d: (d["device"], d["command"], d["expected"]), COLLECT_DATA),
|
||||
("device", "command_data", "expected_data"),
|
||||
((d["device"], d["command"], d["expected"]) for d in COLLECT_DATA),
|
||||
indirect=["device"],
|
||||
ids=generate_test_ids_list(COLLECT_DATA),
|
||||
)
|
||||
async def test_collect(self, device: AntaDevice, command_data: dict[str, Any], expected_data: dict[str, Any]) -> None:
|
||||
"""
|
||||
Test AntaDevice.collect behavior
|
||||
"""
|
||||
"""Test AntaDevice.collect behavior."""
|
||||
command = AntaCommand(command=command_data["command"], use_cache=command_data["use_cache"])
|
||||
|
||||
# Dummy output for cache hit
|
||||
|
@ -646,32 +654,21 @@ class TestAntaDevice:
|
|||
assert device.cache is None
|
||||
device._collect.assert_called_once_with(command=command) # type: ignore[attr-defined] # pylint: disable=protected-access
|
||||
|
||||
@pytest.mark.parametrize("device, expected", CACHE_STATS_DATA, indirect=["device"])
|
||||
@pytest.mark.parametrize(("device", "expected"), CACHE_STATS_DATA, indirect=["device"])
|
||||
def test_cache_statistics(self, device: AntaDevice, expected: dict[str, Any] | None) -> None:
|
||||
"""
|
||||
Verify that when cache statistics attribute does not exist
|
||||
TODO add a test where cache has some value
|
||||
"""Verify that when cache statistics attribute does not exist.
|
||||
|
||||
TODO add a test where cache has some value.
|
||||
"""
|
||||
assert device.cache_statistics == expected
|
||||
|
||||
def test_supports(self, device: AntaDevice) -> None:
|
||||
"""
|
||||
Test if the supports() method
|
||||
"""
|
||||
command = AntaCommand(command="show hardware counter drop", errors=["Unavailable command (not supported on this hardware platform) (at token 2: 'counter')"])
|
||||
assert device.supports(command) is False
|
||||
command = AntaCommand(command="show hardware counter drop")
|
||||
assert device.supports(command) is True
|
||||
|
||||
|
||||
class TestAsyncEOSDevice:
|
||||
"""
|
||||
Test for anta.device.AsyncEOSDevice
|
||||
"""
|
||||
"""Test for anta.device.AsyncEOSDevice."""
|
||||
|
||||
@pytest.mark.parametrize("data", INIT_DATA, ids=generate_test_ids_list(INIT_DATA))
|
||||
def test__init__(self, data: dict[str, Any]) -> None:
|
||||
"""Test the AsyncEOSDevice constructor"""
|
||||
"""Test the AsyncEOSDevice constructor."""
|
||||
device = AsyncEOSDevice(**data["device"])
|
||||
|
||||
assert device.name == data["expected"]["name"]
|
||||
|
@ -683,12 +680,12 @@ class TestAsyncEOSDevice:
|
|||
assert device.cache_locks is not None
|
||||
hash(device)
|
||||
|
||||
with patch("anta.device.__DEBUG__", True):
|
||||
with patch("anta.device.__DEBUG__", new=True):
|
||||
rprint(device)
|
||||
|
||||
@pytest.mark.parametrize("data", EQUALITY_DATA, ids=generate_test_ids_list(EQUALITY_DATA))
|
||||
def test__eq(self, data: dict[str, Any]) -> None:
|
||||
"""Test the AsyncEOSDevice equality"""
|
||||
"""Test the AsyncEOSDevice equality."""
|
||||
device1 = AsyncEOSDevice(**data["device1"])
|
||||
device2 = AsyncEOSDevice(**data["device2"])
|
||||
if data["expected"]:
|
||||
|
@ -696,49 +693,45 @@ class TestAsyncEOSDevice:
|
|||
else:
|
||||
assert device1 != device2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.asyncio()
|
||||
@pytest.mark.parametrize(
|
||||
"async_device, patch_kwargs, expected",
|
||||
map(lambda d: (d["device"], d["patch_kwargs"], d["expected"]), REFRESH_DATA),
|
||||
("async_device", "patch_kwargs", "expected"),
|
||||
((d["device"], d["patch_kwargs"], d["expected"]) for d in REFRESH_DATA),
|
||||
ids=generate_test_ids_list(REFRESH_DATA),
|
||||
indirect=["async_device"],
|
||||
)
|
||||
async def test_refresh(self, async_device: AsyncEOSDevice, patch_kwargs: list[dict[str, Any]], expected: dict[str, Any]) -> None:
|
||||
# pylint: disable=protected-access
|
||||
"""Test AsyncEOSDevice.refresh()"""
|
||||
with patch.object(async_device._session, "check_connection", **patch_kwargs[0]):
|
||||
with patch.object(async_device._session, "cli", **patch_kwargs[1]):
|
||||
await async_device.refresh()
|
||||
async_device._session.check_connection.assert_called_once()
|
||||
if expected["is_online"]:
|
||||
async_device._session.cli.assert_called_once()
|
||||
assert async_device.is_online == expected["is_online"]
|
||||
assert async_device.established == expected["established"]
|
||||
assert async_device.hw_model == expected["hw_model"]
|
||||
"""Test AsyncEOSDevice.refresh()."""
|
||||
with patch.object(async_device._session, "check_connection", **patch_kwargs[0]), patch.object(async_device._session, "cli", **patch_kwargs[1]):
|
||||
await async_device.refresh()
|
||||
async_device._session.check_connection.assert_called_once()
|
||||
if expected["is_online"]:
|
||||
async_device._session.cli.assert_called_once()
|
||||
assert async_device.is_online == expected["is_online"]
|
||||
assert async_device.established == expected["established"]
|
||||
assert async_device.hw_model == expected["hw_model"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.asyncio()
|
||||
@pytest.mark.parametrize(
|
||||
"async_device, command, expected",
|
||||
map(lambda d: (d["device"], d["command"], d["expected"]), AIOEAPI_COLLECT_DATA),
|
||||
("async_device", "command", "expected"),
|
||||
((d["device"], d["command"], d["expected"]) for d in AIOEAPI_COLLECT_DATA),
|
||||
ids=generate_test_ids_list(AIOEAPI_COLLECT_DATA),
|
||||
indirect=["async_device"],
|
||||
)
|
||||
async def test__collect(self, async_device: AsyncEOSDevice, command: dict[str, Any], expected: dict[str, Any]) -> None:
|
||||
# pylint: disable=protected-access
|
||||
"""Test AsyncEOSDevice._collect()"""
|
||||
if "revision" in command:
|
||||
cmd = AntaCommand(command=command["command"], revision=command["revision"])
|
||||
else:
|
||||
cmd = AntaCommand(command=command["command"])
|
||||
"""Test AsyncEOSDevice._collect()."""
|
||||
cmd = AntaCommand(command=command["command"], revision=command["revision"]) if "revision" in command else AntaCommand(command=command["command"])
|
||||
with patch.object(async_device._session, "cli", **command["patch_kwargs"]):
|
||||
await async_device.collect(cmd)
|
||||
commands = []
|
||||
commands: list[dict[str, Any]] = []
|
||||
if async_device.enable and async_device._enable_password is not None:
|
||||
commands.append(
|
||||
{
|
||||
"cmd": "enable",
|
||||
"input": str(async_device._enable_password),
|
||||
}
|
||||
},
|
||||
)
|
||||
elif async_device.enable:
|
||||
# No password
|
||||
|
@ -751,15 +744,15 @@ class TestAsyncEOSDevice:
|
|||
assert cmd.output == expected["output"]
|
||||
assert cmd.errors == expected["errors"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.asyncio()
|
||||
@pytest.mark.parametrize(
|
||||
"async_device, copy",
|
||||
map(lambda d: (d["device"], d["copy"]), AIOEAPI_COPY_DATA),
|
||||
("async_device", "copy"),
|
||||
((d["device"], d["copy"]) for d in AIOEAPI_COPY_DATA),
|
||||
ids=generate_test_ids_list(AIOEAPI_COPY_DATA),
|
||||
indirect=["async_device"],
|
||||
)
|
||||
async def test_copy(self, async_device: AsyncEOSDevice, copy: dict[str, Any]) -> None:
|
||||
"""Test AsyncEOSDevice.copy()"""
|
||||
"""Test AsyncEOSDevice.copy()."""
|
||||
conn = SSHClientConnection(asyncio.get_event_loop(), SSHClientConnectionOptions())
|
||||
with patch("asyncssh.connect") as connect_mock:
|
||||
connect_mock.return_value.__aenter__.return_value = conn
|
||||
|
|
|
@ -1,53 +1,65 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.logger
|
||||
"""
|
||||
"""Tests for anta.logger."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.logger import anta_log_exception
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pytest import LogCaptureFixture
|
||||
from anta.logger import anta_log_exception, exc_to_str, tb_to_str
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"exception, message, calling_logger, __DEBUG__value, expected_message",
|
||||
("exception", "message", "calling_logger", "debug_value", "expected_message"),
|
||||
[
|
||||
pytest.param(ValueError("exception message"), None, None, False, "ValueError (exception message)", id="exception only"),
|
||||
pytest.param(ValueError("exception message"), "custom message", None, False, "custom message\nValueError (exception message)", id="custom message"),
|
||||
pytest.param(
|
||||
ValueError("exception message"),
|
||||
None,
|
||||
None,
|
||||
False,
|
||||
"ValueError: exception message",
|
||||
id="exception only",
|
||||
),
|
||||
pytest.param(
|
||||
ValueError("exception message"),
|
||||
"custom message",
|
||||
None,
|
||||
False,
|
||||
"custom message\nValueError: exception message",
|
||||
id="custom message",
|
||||
),
|
||||
pytest.param(
|
||||
ValueError("exception message"),
|
||||
"custom logger",
|
||||
logging.getLogger("custom"),
|
||||
False,
|
||||
"custom logger\nValueError (exception message)",
|
||||
"custom logger\nValueError: exception message",
|
||||
id="custom logger",
|
||||
),
|
||||
pytest.param(
|
||||
ValueError("exception message"), "Use with custom message", None, True, "Use with custom message\nValueError (exception message)", id="__DEBUG__ on"
|
||||
ValueError("exception message"),
|
||||
"Use with custom message",
|
||||
None,
|
||||
True,
|
||||
"Use with custom message\nValueError: exception message",
|
||||
id="__DEBUG__ on",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_anta_log_exception(
|
||||
caplog: LogCaptureFixture,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
exception: Exception,
|
||||
message: str | None,
|
||||
calling_logger: logging.Logger | None,
|
||||
__DEBUG__value: bool,
|
||||
debug_value: bool,
|
||||
expected_message: str,
|
||||
) -> None:
|
||||
"""
|
||||
Test anta_log_exception
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-arguments
|
||||
"""Test anta_log_exception."""
|
||||
if calling_logger is not None:
|
||||
# https://github.com/pytest-dev/pytest/issues/3697
|
||||
calling_logger.propagate = True
|
||||
|
@ -57,12 +69,12 @@ def test_anta_log_exception(
|
|||
# Need to raise to trigger nice stacktrace for __DEBUG__ == True
|
||||
try:
|
||||
raise exception
|
||||
except ValueError as e:
|
||||
with patch("anta.logger.__DEBUG__", __DEBUG__value):
|
||||
anta_log_exception(e, message=message, calling_logger=calling_logger)
|
||||
except ValueError as exc:
|
||||
with patch("anta.logger.__DEBUG__", new=debug_value):
|
||||
anta_log_exception(exc, message=message, calling_logger=calling_logger)
|
||||
|
||||
# Two log captured
|
||||
if __DEBUG__value:
|
||||
if debug_value:
|
||||
assert len(caplog.record_tuples) == 2
|
||||
else:
|
||||
assert len(caplog.record_tuples) == 1
|
||||
|
@ -76,5 +88,29 @@ def test_anta_log_exception(
|
|||
assert level == logging.CRITICAL
|
||||
assert message == expected_message
|
||||
# the only place where we can see the stracktrace is in the capture.text
|
||||
if __DEBUG__value is True:
|
||||
if debug_value:
|
||||
assert "Traceback" in caplog.text
|
||||
|
||||
|
||||
def my_raising_function(exception: Exception) -> None:
|
||||
"""Raise Exception."""
|
||||
raise exception
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("exception", "expected_output"),
|
||||
[(ValueError("test"), "ValueError: test"), (ValueError(), "ValueError")],
|
||||
)
|
||||
def test_exc_to_str(exception: Exception, expected_output: str) -> None:
|
||||
"""Test exc_to_str."""
|
||||
assert exc_to_str(exception) == expected_output
|
||||
|
||||
|
||||
def test_tb_to_str() -> None:
|
||||
"""Test tb_to_str."""
|
||||
try:
|
||||
my_raising_function(ValueError("test"))
|
||||
except ValueError as exc:
|
||||
output = tb_to_str(exc)
|
||||
assert "Traceback" in output
|
||||
assert 'my_raising_function(ValueError("test"))' in output
|
||||
|
|
|
@ -1,209 +1,242 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
test anta.models.py
|
||||
"""
|
||||
"""test anta.models.py."""
|
||||
|
||||
# Mypy does not understand AntaTest.Input typing
|
||||
# mypy: disable-error-code=attr-defined
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any, ClassVar
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.decorators import deprecated_test, skip_on_platforms
|
||||
from anta.device import AntaDevice
|
||||
from anta.models import AntaCommand, AntaTemplate, AntaTest
|
||||
from tests.lib.fixture import DEVICE_HW_MODEL
|
||||
from tests.lib.utils import generate_test_ids
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from anta.device import AntaDevice
|
||||
|
||||
|
||||
class FakeTest(AntaTest):
|
||||
"""ANTA test that always succeed"""
|
||||
"""ANTA test that always succeed."""
|
||||
|
||||
name = "FakeTest"
|
||||
description = "ANTA test that always succeed"
|
||||
categories = []
|
||||
commands = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success()
|
||||
|
||||
|
||||
class FakeTestWithFailedCommand(AntaTest):
|
||||
"""ANTA test with a command that failed"""
|
||||
"""ANTA test with a command that failed."""
|
||||
|
||||
name = "FakeTestWithFailedCommand"
|
||||
description = "ANTA test with a command that failed"
|
||||
categories = []
|
||||
commands = [AntaCommand(command="show version", errors=["failed command"])]
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show version", errors=["failed command"])]
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success()
|
||||
|
||||
|
||||
class FakeTestWithUnsupportedCommand(AntaTest):
|
||||
"""ANTA test with an unsupported command"""
|
||||
"""ANTA test with an unsupported command."""
|
||||
|
||||
name = "FakeTestWithUnsupportedCommand"
|
||||
description = "ANTA test with an unsupported command"
|
||||
categories = []
|
||||
commands = [AntaCommand(command="show hardware counter drop", errors=["Unavailable command (not supported on this hardware platform) (at token 2: 'counter')"])]
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [
|
||||
AntaCommand(
|
||||
command="show hardware counter drop",
|
||||
errors=["Unavailable command (not supported on this hardware platform) (at token 2: 'counter')"],
|
||||
)
|
||||
]
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success()
|
||||
|
||||
|
||||
class FakeTestWithInput(AntaTest):
|
||||
"""ANTA test with inputs that always succeed"""
|
||||
"""ANTA test with inputs that always succeed."""
|
||||
|
||||
name = "FakeTestWithInput"
|
||||
description = "ANTA test with inputs that always succeed"
|
||||
categories = []
|
||||
commands = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
class Input(AntaTest.Input):
|
||||
"""Inputs for FakeTestWithInput test."""
|
||||
|
||||
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
|
||||
string: str
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success(self.inputs.string)
|
||||
|
||||
|
||||
class FakeTestWithTemplate(AntaTest):
|
||||
"""ANTA test with template that always succeed"""
|
||||
"""ANTA test with template that always succeed."""
|
||||
|
||||
name = "FakeTestWithTemplate"
|
||||
description = "ANTA test with template that always succeed"
|
||||
categories = []
|
||||
commands = [AntaTemplate(template="show interface {interface}")]
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")]
|
||||
|
||||
class Input(AntaTest.Input):
|
||||
"""Inputs for FakeTestWithTemplate test."""
|
||||
|
||||
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
|
||||
interface: str
|
||||
|
||||
def render(self, template: AntaTemplate) -> list[AntaCommand]:
|
||||
"""Render function."""
|
||||
return [template.render(interface=self.inputs.interface)]
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success(self.instance_commands[0].command)
|
||||
|
||||
|
||||
class FakeTestWithTemplateNoRender(AntaTest):
|
||||
"""ANTA test with template that miss the render() method"""
|
||||
"""ANTA test with template that miss the render() method."""
|
||||
|
||||
name = "FakeTestWithTemplateNoRender"
|
||||
description = "ANTA test with template that miss the render() method"
|
||||
categories = []
|
||||
commands = [AntaTemplate(template="show interface {interface}")]
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")]
|
||||
|
||||
class Input(AntaTest.Input):
|
||||
"""Inputs for FakeTestWithTemplateNoRender test."""
|
||||
|
||||
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
|
||||
interface: str
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success(self.instance_commands[0].command)
|
||||
|
||||
|
||||
class FakeTestWithTemplateBadRender1(AntaTest):
|
||||
"""ANTA test with template that raises a AntaTemplateRenderError exception"""
|
||||
"""ANTA test with template that raises a AntaTemplateRenderError exception."""
|
||||
|
||||
name = "FakeTestWithTemplateBadRender"
|
||||
description = "ANTA test with template that raises a AntaTemplateRenderError exception"
|
||||
categories = []
|
||||
commands = [AntaTemplate(template="show interface {interface}")]
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")]
|
||||
|
||||
class Input(AntaTest.Input):
|
||||
"""Inputs for FakeTestWithTemplateBadRender1 test."""
|
||||
|
||||
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
|
||||
interface: str
|
||||
|
||||
def render(self, template: AntaTemplate) -> list[AntaCommand]:
|
||||
"""Render function."""
|
||||
return [template.render(wrong_template_param=self.inputs.interface)]
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success(self.instance_commands[0].command)
|
||||
|
||||
|
||||
class FakeTestWithTemplateBadRender2(AntaTest):
|
||||
"""ANTA test with template that raises an arbitrary exception"""
|
||||
"""ANTA test with template that raises an arbitrary exception."""
|
||||
|
||||
name = "FakeTestWithTemplateBadRender2"
|
||||
description = "ANTA test with template that raises an arbitrary exception"
|
||||
categories = []
|
||||
commands = [AntaTemplate(template="show interface {interface}")]
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")]
|
||||
|
||||
class Input(AntaTest.Input):
|
||||
"""Inputs for FakeTestWithTemplateBadRender2 test."""
|
||||
|
||||
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
|
||||
interface: str
|
||||
|
||||
def render(self, template: AntaTemplate) -> list[AntaCommand]:
|
||||
raise Exception() # pylint: disable=broad-exception-raised
|
||||
"""Render function."""
|
||||
raise RuntimeError(template)
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success(self.instance_commands[0].command)
|
||||
|
||||
|
||||
class SkipOnPlatformTest(AntaTest):
|
||||
"""ANTA test that is skipped"""
|
||||
"""ANTA test that is skipped."""
|
||||
|
||||
name = "SkipOnPlatformTest"
|
||||
description = "ANTA test that is skipped on a specific platform"
|
||||
categories = []
|
||||
commands = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
@skip_on_platforms([DEVICE_HW_MODEL])
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success()
|
||||
|
||||
|
||||
class UnSkipOnPlatformTest(AntaTest):
|
||||
"""ANTA test that is skipped"""
|
||||
"""ANTA test that is skipped."""
|
||||
|
||||
name = "UnSkipOnPlatformTest"
|
||||
description = "ANTA test that is skipped on a specific platform"
|
||||
categories = []
|
||||
commands = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
@skip_on_platforms(["dummy"])
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success()
|
||||
|
||||
|
||||
class SkipOnPlatformTestWithInput(AntaTest):
|
||||
"""ANTA test skipped on platforms but with Input"""
|
||||
"""ANTA test skipped on platforms but with Input."""
|
||||
|
||||
name = "SkipOnPlatformTestWithInput"
|
||||
description = "ANTA test skipped on platforms but with Input"
|
||||
categories = []
|
||||
commands = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
class Input(AntaTest.Input):
|
||||
"""Inputs for SkipOnPlatformTestWithInput test."""
|
||||
|
||||
class Input(AntaTest.Input): # pylint: disable=missing-class-docstring
|
||||
string: str
|
||||
|
||||
@skip_on_platforms([DEVICE_HW_MODEL])
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success(self.inputs.string)
|
||||
|
||||
|
||||
class DeprecatedTestWithoutNewTest(AntaTest):
|
||||
"""ANTA test that is deprecated without new test"""
|
||||
"""ANTA test that is deprecated without new test."""
|
||||
|
||||
name = "DeprecatedTestWitouthNewTest"
|
||||
description = "ANTA test that is deprecated without new test"
|
||||
categories = []
|
||||
commands = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
@deprecated_test()
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success()
|
||||
|
||||
|
||||
|
@ -212,52 +245,88 @@ class DeprecatedTestWithNewTest(AntaTest):
|
|||
|
||||
name = "DeprecatedTestWithNewTest"
|
||||
description = "ANTA deprecated test with New Test"
|
||||
categories = []
|
||||
commands = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
@deprecated_test(new_tests=["NewTest"])
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
"""Test function."""
|
||||
self.result.is_success()
|
||||
|
||||
|
||||
ANTATEST_DATA: list[dict[str, Any]] = [
|
||||
{"name": "no input", "test": FakeTest, "inputs": None, "expected": {"__init__": {"result": "unset"}, "test": {"result": "success"}}},
|
||||
{
|
||||
"name": "no input",
|
||||
"test": FakeTest,
|
||||
"inputs": None,
|
||||
"expected": {"__init__": {"result": "unset"}, "test": {"result": "success"}},
|
||||
},
|
||||
{
|
||||
"name": "extra input",
|
||||
"test": FakeTest,
|
||||
"inputs": {"string": "culpa! veniam quas quas veniam molestias, esse"},
|
||||
"expected": {"__init__": {"result": "error", "messages": ["Extra inputs are not permitted"]}, "test": {"result": "error"}},
|
||||
"expected": {
|
||||
"__init__": {
|
||||
"result": "error",
|
||||
"messages": ["Extra inputs are not permitted"],
|
||||
},
|
||||
"test": {"result": "error"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "no input",
|
||||
"test": FakeTestWithInput,
|
||||
"inputs": None,
|
||||
"expected": {"__init__": {"result": "error", "messages": ["Field required"]}, "test": {"result": "error"}},
|
||||
"expected": {
|
||||
"__init__": {"result": "error", "messages": ["Field required"]},
|
||||
"test": {"result": "error"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "wrong input type",
|
||||
"test": FakeTestWithInput,
|
||||
"inputs": {"string": 1},
|
||||
"expected": {"__init__": {"result": "error", "messages": ["Input should be a valid string"]}, "test": {"result": "error"}},
|
||||
"expected": {
|
||||
"__init__": {
|
||||
"result": "error",
|
||||
"messages": ["Input should be a valid string"],
|
||||
},
|
||||
"test": {"result": "error"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "good input",
|
||||
"test": FakeTestWithInput,
|
||||
"inputs": {"string": "culpa! veniam quas quas veniam molestias, esse"},
|
||||
"expected": {"__init__": {"result": "unset"}, "test": {"result": "success", "messages": ["culpa! veniam quas quas veniam molestias, esse"]}},
|
||||
"expected": {
|
||||
"__init__": {"result": "unset"},
|
||||
"test": {
|
||||
"result": "success",
|
||||
"messages": ["culpa! veniam quas quas veniam molestias, esse"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "good input",
|
||||
"test": FakeTestWithTemplate,
|
||||
"inputs": {"interface": "Ethernet1"},
|
||||
"expected": {"__init__": {"result": "unset"}, "test": {"result": "success", "messages": ["show interface Ethernet1"]}},
|
||||
"expected": {
|
||||
"__init__": {"result": "unset"},
|
||||
"test": {"result": "success", "messages": ["show interface Ethernet1"]},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "wrong input type",
|
||||
"test": FakeTestWithTemplate,
|
||||
"inputs": {"interface": 1},
|
||||
"expected": {"__init__": {"result": "error", "messages": ["Input should be a valid string"]}, "test": {"result": "error"}},
|
||||
"expected": {
|
||||
"__init__": {
|
||||
"result": "error",
|
||||
"messages": ["Input should be a valid string"],
|
||||
},
|
||||
"test": {"result": "error"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "wrong render definition",
|
||||
|
@ -284,13 +353,13 @@ ANTATEST_DATA: list[dict[str, Any]] = [
|
|||
},
|
||||
},
|
||||
{
|
||||
"name": "Exception in render()",
|
||||
"name": "RuntimeError in render()",
|
||||
"test": FakeTestWithTemplateBadRender2,
|
||||
"inputs": {"interface": "Ethernet1"},
|
||||
"expected": {
|
||||
"__init__": {
|
||||
"result": "error",
|
||||
"messages": ["Exception in tests.units.test_models.FakeTestWithTemplateBadRender2.render(): Exception"],
|
||||
"messages": ["Exception in tests.units.test_models.FakeTestWithTemplateBadRender2.render(): RuntimeError"],
|
||||
},
|
||||
"test": {"result": "error"},
|
||||
},
|
||||
|
@ -317,7 +386,10 @@ ANTATEST_DATA: list[dict[str, Any]] = [
|
|||
"name": "skip on platforms, not unset",
|
||||
"test": SkipOnPlatformTestWithInput,
|
||||
"inputs": None,
|
||||
"expected": {"__init__": {"result": "error", "messages": ["Field required"]}, "test": {"result": "error"}},
|
||||
"expected": {
|
||||
"__init__": {"result": "error", "messages": ["Field required"]},
|
||||
"test": {"result": "error"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "deprecate test without new test",
|
||||
|
@ -341,7 +413,13 @@ ANTATEST_DATA: list[dict[str, Any]] = [
|
|||
"name": "failed command",
|
||||
"test": FakeTestWithFailedCommand,
|
||||
"inputs": None,
|
||||
"expected": {"__init__": {"result": "unset"}, "test": {"result": "error", "messages": ["show version has failed: failed command"]}},
|
||||
"expected": {
|
||||
"__init__": {"result": "unset"},
|
||||
"test": {
|
||||
"result": "error",
|
||||
"messages": ["show version has failed: failed command"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "unsupported command",
|
||||
|
@ -349,29 +427,30 @@ ANTATEST_DATA: list[dict[str, Any]] = [
|
|||
"inputs": None,
|
||||
"expected": {
|
||||
"__init__": {"result": "unset"},
|
||||
"test": {"result": "skipped", "messages": ["Skipped because show hardware counter drop is not supported on pytest"]},
|
||||
"test": {
|
||||
"result": "skipped",
|
||||
"messages": ["'show hardware counter drop' is not supported on pytest"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class Test_AntaTest:
|
||||
"""
|
||||
Test for anta.models.AntaTest
|
||||
"""
|
||||
class TestAntaTest:
|
||||
"""Test for anta.models.AntaTest."""
|
||||
|
||||
def test__init_subclass__name(self) -> None:
|
||||
"""Test __init_subclass__"""
|
||||
"""Test __init_subclass__."""
|
||||
# Pylint detects all the classes in here as unused which is on purpose
|
||||
# pylint: disable=unused-variable
|
||||
with pytest.raises(NotImplementedError) as exec_info:
|
||||
|
||||
class WrongTestNoName(AntaTest):
|
||||
"""ANTA test that is missing a name"""
|
||||
"""ANTA test that is missing a name."""
|
||||
|
||||
description = "ANTA test that is missing a name"
|
||||
categories = []
|
||||
commands = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
|
@ -382,11 +461,11 @@ class Test_AntaTest:
|
|||
with pytest.raises(NotImplementedError) as exec_info:
|
||||
|
||||
class WrongTestNoDescription(AntaTest):
|
||||
"""ANTA test that is missing a description"""
|
||||
"""ANTA test that is missing a description."""
|
||||
|
||||
name = "WrongTestNoDescription"
|
||||
categories = []
|
||||
commands = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
|
@ -397,11 +476,11 @@ class Test_AntaTest:
|
|||
with pytest.raises(NotImplementedError) as exec_info:
|
||||
|
||||
class WrongTestNoCategories(AntaTest):
|
||||
"""ANTA test that is missing categories"""
|
||||
"""ANTA test that is missing categories."""
|
||||
|
||||
name = "WrongTestNoCategories"
|
||||
description = "ANTA test that is missing categories"
|
||||
commands = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = []
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
|
@ -412,11 +491,11 @@ class Test_AntaTest:
|
|||
with pytest.raises(NotImplementedError) as exec_info:
|
||||
|
||||
class WrongTestNoCommands(AntaTest):
|
||||
"""ANTA test that is missing commands"""
|
||||
"""ANTA test that is missing commands."""
|
||||
|
||||
name = "WrongTestNoCommands"
|
||||
description = "ANTA test that is missing commands"
|
||||
categories = []
|
||||
categories: ClassVar[list[str]] = []
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
|
@ -432,14 +511,14 @@ class Test_AntaTest:
|
|||
|
||||
@pytest.mark.parametrize("data", ANTATEST_DATA, ids=generate_test_ids(ANTATEST_DATA))
|
||||
def test__init__(self, device: AntaDevice, data: dict[str, Any]) -> None:
|
||||
"""Test the AntaTest constructor"""
|
||||
"""Test the AntaTest constructor."""
|
||||
expected = data["expected"]["__init__"]
|
||||
test = data["test"](device, inputs=data["inputs"])
|
||||
self._assert_test(test, expected)
|
||||
|
||||
@pytest.mark.parametrize("data", ANTATEST_DATA, ids=generate_test_ids(ANTATEST_DATA))
|
||||
def test_test(self, device: AntaDevice, data: dict[str, Any]) -> None:
|
||||
"""Test the AntaTest.test method"""
|
||||
"""Test the AntaTest.test method."""
|
||||
expected = data["expected"]["test"]
|
||||
test = data["test"](device, inputs=data["inputs"])
|
||||
asyncio.run(test.test())
|
||||
|
@ -454,12 +533,12 @@ def test_blacklist(device: AntaDevice, data: str) -> None:
|
|||
"""Test for blacklisting function."""
|
||||
|
||||
class FakeTestWithBlacklist(AntaTest):
|
||||
"""Fake Test for blacklist"""
|
||||
"""Fake Test for blacklist."""
|
||||
|
||||
name = "FakeTestWithBlacklist"
|
||||
description = "ANTA test that has blacklisted command"
|
||||
categories = []
|
||||
commands = [AntaCommand(command=data)]
|
||||
categories: ClassVar[list[str]] = []
|
||||
commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command=data)]
|
||||
|
||||
@AntaTest.anta_test
|
||||
def test(self) -> None:
|
||||
|
@ -470,3 +549,61 @@ def test_blacklist(device: AntaDevice, data: str) -> None:
|
|||
# Run the test() method
|
||||
asyncio.run(test_instance.test())
|
||||
assert test_instance.result.result == "error"
|
||||
|
||||
|
||||
class TestAntaComamnd:
|
||||
"""Test for anta.models.AntaCommand."""
|
||||
|
||||
# ruff: noqa: B018
|
||||
# pylint: disable=pointless-statement
|
||||
|
||||
def test_empty_output_access(self) -> None:
|
||||
"""Test for both json and text ofmt."""
|
||||
json_cmd = AntaCommand(command="show dummy")
|
||||
text_cmd = AntaCommand(command="show dummy", ofmt="text")
|
||||
msg = "There is no output for command 'show dummy'"
|
||||
with pytest.raises(RuntimeError, match=msg):
|
||||
json_cmd.json_output
|
||||
with pytest.raises(RuntimeError, match=msg):
|
||||
text_cmd.text_output
|
||||
|
||||
def test_wrong_format_output_access(self) -> None:
|
||||
"""Test for both json and text ofmt."""
|
||||
json_cmd = AntaCommand(command="show dummy", output={})
|
||||
json_cmd_2 = AntaCommand(command="show dummy", output="not_json")
|
||||
text_cmd = AntaCommand(command="show dummy", ofmt="text", output="blah")
|
||||
text_cmd_2 = AntaCommand(command="show dummy", ofmt="text", output={"not_a": "string"})
|
||||
msg = "Output of command 'show dummy' is invalid"
|
||||
msg = "Output of command 'show dummy' is invalid"
|
||||
with pytest.raises(RuntimeError, match=msg):
|
||||
json_cmd.text_output
|
||||
with pytest.raises(RuntimeError, match=msg):
|
||||
text_cmd.json_output
|
||||
with pytest.raises(RuntimeError, match=msg):
|
||||
json_cmd_2.text_output
|
||||
with pytest.raises(RuntimeError, match=msg):
|
||||
text_cmd_2.json_output
|
||||
|
||||
def test_supported(self) -> None:
|
||||
"""Test if the supported property."""
|
||||
command = AntaCommand(command="show hardware counter drop", errors=["Unavailable command (not supported on this hardware platform) (at token 2: 'counter')"])
|
||||
assert command.supported is False
|
||||
command = AntaCommand(
|
||||
command="show hardware counter drop", output={"totalAdverseDrops": 0, "totalCongestionDrops": 0, "totalPacketProcessorDrops": 0, "dropEvents": {}}
|
||||
)
|
||||
assert command.supported is True
|
||||
|
||||
def test_requires_privileges(self) -> None:
|
||||
"""Test if the requires_privileges property."""
|
||||
command = AntaCommand(command="show aaa methods accounting", errors=["Invalid input (privileged mode required)"])
|
||||
assert command.requires_privileges is True
|
||||
command = AntaCommand(
|
||||
command="show aaa methods accounting",
|
||||
output={
|
||||
"commandsAcctMethods": {"privilege0-15": {"defaultMethods": [], "consoleMethods": []}},
|
||||
"execAcctMethods": {"exec": {"defaultMethods": [], "consoleMethods": []}},
|
||||
"systemAcctMethods": {"system": {"defaultMethods": [], "consoleMethods": []}},
|
||||
"dot1xAcctMethods": {"dot1x": {"defaultMethods": [], "consoleMethods": []}},
|
||||
},
|
||||
)
|
||||
assert command.requires_privileges is False
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
test anta.runner.py
|
||||
"""
|
||||
"""test anta.runner.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
|
||||
|
@ -19,16 +17,12 @@ from anta.runner import main
|
|||
|
||||
from .test_models import FakeTest
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pytest import LogCaptureFixture
|
||||
|
||||
FAKE_CATALOG: AntaCatalog = AntaCatalog.from_list([(FakeTest, None)])
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_runner_empty_tests(caplog: LogCaptureFixture, test_inventory: AntaInventory) -> None:
|
||||
"""
|
||||
Test that when the list of tests is empty, a log is raised
|
||||
@pytest.mark.asyncio()
|
||||
async def test_runner_empty_tests(caplog: pytest.LogCaptureFixture, test_inventory: AntaInventory) -> None:
|
||||
"""Test that when the list of tests is empty, a log is raised.
|
||||
|
||||
caplog is the pytest fixture to capture logs
|
||||
test_inventory is a fixture that gives a default inventory for tests
|
||||
|
@ -42,10 +36,9 @@ async def test_runner_empty_tests(caplog: LogCaptureFixture, test_inventory: Ant
|
|||
assert "The list of tests is empty, exiting" in caplog.records[0].message
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_runner_empty_inventory(caplog: LogCaptureFixture) -> None:
|
||||
"""
|
||||
Test that when the Inventory is empty, a log is raised
|
||||
@pytest.mark.asyncio()
|
||||
async def test_runner_empty_inventory(caplog: pytest.LogCaptureFixture) -> None:
|
||||
"""Test that when the Inventory is empty, a log is raised.
|
||||
|
||||
caplog is the pytest fixture to capture logs
|
||||
"""
|
||||
|
@ -58,10 +51,9 @@ async def test_runner_empty_inventory(caplog: LogCaptureFixture) -> None:
|
|||
assert "The inventory is empty, exiting" in caplog.records[0].message
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_runner_no_selected_device(caplog: LogCaptureFixture, test_inventory: AntaInventory) -> None:
|
||||
"""
|
||||
Test that when the list of established device
|
||||
@pytest.mark.asyncio()
|
||||
async def test_runner_no_selected_device(caplog: pytest.LogCaptureFixture, test_inventory: AntaInventory) -> None:
|
||||
"""Test that when the list of established device.
|
||||
|
||||
caplog is the pytest fixture to capture logs
|
||||
test_inventory is a fixture that gives a default inventory for tests
|
||||
|
@ -71,12 +63,10 @@ async def test_runner_no_selected_device(caplog: LogCaptureFixture, test_invento
|
|||
manager = ResultManager()
|
||||
await main(manager, test_inventory, FAKE_CATALOG)
|
||||
|
||||
assert "No device in the established state 'True' was found. There is no device to run tests against, exiting" in [record.message for record in caplog.records]
|
||||
assert "No reachable device was found." in [record.message for record in caplog.records]
|
||||
|
||||
# Reset logs and run with tags
|
||||
caplog.clear()
|
||||
await main(manager, test_inventory, FAKE_CATALOG, tags=["toto"])
|
||||
await main(manager, test_inventory, FAKE_CATALOG, tags={"toto"})
|
||||
|
||||
assert "No device in the established state 'True' matching the tags ['toto'] was found. There is no device to run tests against, exiting" in [
|
||||
record.message for record in caplog.records
|
||||
]
|
||||
assert "No reachable device matching the tags {'toto'} was found." in [record.message for record in caplog.records]
|
||||
|
|
490
tests/units/test_tools.py
Normal file
490
tests/units/test_tools.py
Normal file
|
@ -0,0 +1,490 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""Tests for `anta.tools`."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import AbstractContextManager
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.tools import get_dict_superset, get_failed_logs, get_item, get_value
|
||||
|
||||
TEST_GET_FAILED_LOGS_DATA = [
|
||||
{"id": 1, "name": "Alice", "age": 30, "email": "alice@example.com"},
|
||||
{"id": 2, "name": "Bob", "age": 35, "email": "bob@example.com"},
|
||||
{"id": 3, "name": "Charlie", "age": 40, "email": "charlie@example.com"},
|
||||
{"id": 4, "name": "Jon", "age": 25, "email": "Jon@example.com"},
|
||||
{"id": 4, "name": "Rob", "age": 25, "email": "Jon@example.com"},
|
||||
]
|
||||
TEST_GET_DICT_SUPERSET_DATA = [
|
||||
("id", 0),
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Alice",
|
||||
"age": 30,
|
||||
"email": "alice@example.com",
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Bob",
|
||||
"age": 35,
|
||||
"email": "bob@example.com",
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Charlie",
|
||||
"age": 40,
|
||||
"email": "charlie@example.com",
|
||||
},
|
||||
]
|
||||
TEST_GET_VALUE_DATA = {"test_value": 42, "nested_test": {"nested_value": 43}}
|
||||
TEST_GET_ITEM_DATA = [
|
||||
("id", 0),
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Alice",
|
||||
"age": 30,
|
||||
"email": "alice@example.com",
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Bob",
|
||||
"age": 35,
|
||||
"email": "bob@example.com",
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Charlie",
|
||||
"age": 40,
|
||||
"email": "charlie@example.com",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("expected_output", "actual_output", "expected_result"),
|
||||
[
|
||||
pytest.param(
|
||||
TEST_GET_FAILED_LOGS_DATA[0],
|
||||
TEST_GET_FAILED_LOGS_DATA[0],
|
||||
"",
|
||||
id="no difference",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_FAILED_LOGS_DATA[0],
|
||||
TEST_GET_FAILED_LOGS_DATA[1],
|
||||
"\nExpected `1` as the id, but found `2` instead.\nExpected `Alice` as the name, but found `Bob` instead.\n"
|
||||
"Expected `30` as the age, but found `35` instead.\nExpected `alice@example.com` as the email, but found `bob@example.com` instead.",
|
||||
id="different data",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_FAILED_LOGS_DATA[0],
|
||||
{},
|
||||
"\nExpected `1` as the id, but it was not found in the actual output.\nExpected `Alice` as the name, but it was not found in the actual output.\n"
|
||||
"Expected `30` as the age, but it was not found in the actual output.\nExpected `alice@example.com` as the email, but it was not found in "
|
||||
"the actual output.",
|
||||
id="empty actual output",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_FAILED_LOGS_DATA[3],
|
||||
TEST_GET_FAILED_LOGS_DATA[4],
|
||||
"\nExpected `Jon` as the name, but found `Rob` instead.",
|
||||
id="different name",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_failed_logs(
|
||||
expected_output: dict[Any, Any],
|
||||
actual_output: dict[Any, Any],
|
||||
expected_result: str,
|
||||
) -> None:
|
||||
"""Test get_failed_logs."""
|
||||
assert get_failed_logs(expected_output, actual_output) == expected_result
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
(
|
||||
"list_of_dicts",
|
||||
"input_dict",
|
||||
"default",
|
||||
"required",
|
||||
"var_name",
|
||||
"custom_error_msg",
|
||||
"expected_result",
|
||||
"expected_raise",
|
||||
),
|
||||
[
|
||||
pytest.param(
|
||||
[],
|
||||
{"id": 1, "name": "Alice"},
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
does_not_raise(),
|
||||
id="empty list",
|
||||
),
|
||||
pytest.param(
|
||||
[],
|
||||
{"id": 1, "name": "Alice"},
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="empty list and required",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 10, "name": "Jack"},
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
does_not_raise(),
|
||||
id="missing item",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 1, "name": "Alice"},
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
TEST_GET_DICT_SUPERSET_DATA[1],
|
||||
does_not_raise(),
|
||||
id="found item",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 10, "name": "Jack"},
|
||||
"default_value",
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
"default_value",
|
||||
does_not_raise(),
|
||||
id="default value",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 10, "name": "Jack"},
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="required",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 10, "name": "Jack"},
|
||||
None,
|
||||
True,
|
||||
"custom_var_name",
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="custom_var_name not found in the provided list."),
|
||||
id="custom var_name",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 1, "name": "Alice"},
|
||||
None,
|
||||
True,
|
||||
"custom_var_name",
|
||||
"Custom error message",
|
||||
TEST_GET_DICT_SUPERSET_DATA[1],
|
||||
does_not_raise(),
|
||||
id="custom error message",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 10, "name": "Jack"},
|
||||
None,
|
||||
True,
|
||||
"custom_var_name",
|
||||
"Custom error message",
|
||||
None,
|
||||
pytest.raises(ValueError, match="Custom error message"),
|
||||
id="custom error message and required",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 1, "name": "Jack"},
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
does_not_raise(),
|
||||
id="id ok but name not ok",
|
||||
),
|
||||
pytest.param(
|
||||
"not a list",
|
||||
{"id": 1, "name": "Alice"},
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="non-list input for list_of_dicts",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
"not a dict",
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="non-dictionary input",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{},
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
does_not_raise(),
|
||||
id="empty dictionary input",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 1, "name": "Alice", "extra_key": "extra_value"},
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="input dictionary with extra keys",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{"id": 1},
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
TEST_GET_DICT_SUPERSET_DATA[1],
|
||||
does_not_raise(),
|
||||
id="input dictionary is a subset of more than one dictionary in list_of_dicts",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_DICT_SUPERSET_DATA,
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Alice",
|
||||
"age": 30,
|
||||
"email": "alice@example.com",
|
||||
"extra_key": "extra_value",
|
||||
},
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="input dictionary is a superset of a dictionary in list_of_dicts",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_dict_superset(
|
||||
list_of_dicts: list[dict[Any, Any]],
|
||||
input_dict: dict[Any, Any],
|
||||
default: str | None,
|
||||
required: bool,
|
||||
var_name: str | None,
|
||||
custom_error_msg: str | None,
|
||||
expected_result: str,
|
||||
expected_raise: AbstractContextManager[Exception],
|
||||
) -> None:
|
||||
"""Test get_dict_superset."""
|
||||
# pylint: disable=too-many-arguments
|
||||
with expected_raise:
|
||||
assert get_dict_superset(list_of_dicts, input_dict, default, var_name, custom_error_msg, required=required) == expected_result
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
(
|
||||
"input_dict",
|
||||
"key",
|
||||
"default",
|
||||
"required",
|
||||
"org_key",
|
||||
"separator",
|
||||
"expected_result",
|
||||
"expected_raise",
|
||||
),
|
||||
[
|
||||
pytest.param({}, "test", None, False, None, None, None, does_not_raise(), id="empty dict"),
|
||||
pytest.param(
|
||||
TEST_GET_VALUE_DATA,
|
||||
"test_value",
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
42,
|
||||
does_not_raise(),
|
||||
id="simple key",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_VALUE_DATA,
|
||||
"nested_test.nested_value",
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
43,
|
||||
does_not_raise(),
|
||||
id="nested_key",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_VALUE_DATA,
|
||||
"missing_value",
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
does_not_raise(),
|
||||
id="missing_value",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_VALUE_DATA,
|
||||
"missing_value_with_default",
|
||||
"default_value",
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
"default_value",
|
||||
does_not_raise(),
|
||||
id="default",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_VALUE_DATA,
|
||||
"missing_required",
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="missing_required"),
|
||||
id="required",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_VALUE_DATA,
|
||||
"missing_required",
|
||||
None,
|
||||
True,
|
||||
"custom_org_key",
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="custom_org_key"),
|
||||
id="custom org_key",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_VALUE_DATA,
|
||||
"nested_test||nested_value",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
"||",
|
||||
43,
|
||||
does_not_raise(),
|
||||
id="custom separator",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_value(
|
||||
input_dict: dict[Any, Any],
|
||||
key: str,
|
||||
default: str | None,
|
||||
required: bool,
|
||||
org_key: str | None,
|
||||
separator: str | None,
|
||||
expected_result: int | str | None,
|
||||
expected_raise: AbstractContextManager[Exception],
|
||||
) -> None:
|
||||
"""Test get_value."""
|
||||
# pylint: disable=too-many-arguments
|
||||
kwargs = {
|
||||
"default": default,
|
||||
"required": required,
|
||||
"org_key": org_key,
|
||||
"separator": separator,
|
||||
}
|
||||
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||
with expected_raise:
|
||||
assert get_value(input_dict, key, **kwargs) == expected_result # type: ignore[arg-type]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("list_of_dicts", "key", "value", "default", "required", "case_sensitive", "var_name", "custom_error_msg", "expected_result", "expected_raise"),
|
||||
[
|
||||
pytest.param([], "name", "Bob", None, False, False, None, None, None, does_not_raise(), id="empty list"),
|
||||
pytest.param([], "name", "Bob", None, True, False, None, None, None, pytest.raises(ValueError, match="name"), id="empty list and required"),
|
||||
pytest.param(TEST_GET_ITEM_DATA, "name", "Jack", None, False, False, None, None, None, does_not_raise(), id="missing item"),
|
||||
pytest.param(TEST_GET_ITEM_DATA, "name", "Alice", None, False, False, None, None, TEST_GET_ITEM_DATA[1], does_not_raise(), id="found item"),
|
||||
pytest.param(TEST_GET_ITEM_DATA, "name", "Jack", "default_value", False, False, None, None, "default_value", does_not_raise(), id="default value"),
|
||||
pytest.param(TEST_GET_ITEM_DATA, "name", "Jack", None, True, False, None, None, None, pytest.raises(ValueError, match="name"), id="required"),
|
||||
pytest.param(TEST_GET_ITEM_DATA, "name", "Bob", None, False, True, None, None, TEST_GET_ITEM_DATA[2], does_not_raise(), id="case sensitive"),
|
||||
pytest.param(TEST_GET_ITEM_DATA, "name", "charlie", None, False, False, None, None, TEST_GET_ITEM_DATA[3], does_not_raise(), id="case insensitive"),
|
||||
pytest.param(
|
||||
TEST_GET_ITEM_DATA,
|
||||
"name",
|
||||
"Jack",
|
||||
None,
|
||||
True,
|
||||
False,
|
||||
"custom_var_name",
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="custom_var_name"),
|
||||
id="custom var_name",
|
||||
),
|
||||
pytest.param(
|
||||
TEST_GET_ITEM_DATA,
|
||||
"name",
|
||||
"Jack",
|
||||
None,
|
||||
True,
|
||||
False,
|
||||
None,
|
||||
"custom_error_msg",
|
||||
None,
|
||||
pytest.raises(ValueError, match="custom_error_msg"),
|
||||
id="custom error msg",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_item(
|
||||
list_of_dicts: list[dict[Any, Any]],
|
||||
key: str,
|
||||
value: str | None,
|
||||
default: str | None,
|
||||
required: bool,
|
||||
case_sensitive: bool,
|
||||
var_name: str | None,
|
||||
custom_error_msg: str | None,
|
||||
expected_result: str,
|
||||
expected_raise: AbstractContextManager[Exception],
|
||||
) -> None:
|
||||
"""Test get_item."""
|
||||
# pylint: disable=too-many-arguments
|
||||
with expected_raise:
|
||||
assert get_item(list_of_dicts, key, value, default, var_name, custom_error_msg, required=required, case_sensitive=case_sensitive) == expected_result
|
|
@ -1,3 +0,0 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
|
@ -1,149 +0,0 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
|
||||
"""Tests for `anta.tools.get_dict_superset`."""
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.tools.get_dict_superset import get_dict_superset
|
||||
|
||||
# pylint: disable=duplicate-code
|
||||
DUMMY_DATA = [
|
||||
("id", 0),
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Alice",
|
||||
"age": 30,
|
||||
"email": "alice@example.com",
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Bob",
|
||||
"age": 35,
|
||||
"email": "bob@example.com",
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Charlie",
|
||||
"age": 40,
|
||||
"email": "charlie@example.com",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"list_of_dicts, input_dict, default, required, var_name, custom_error_msg, expected_result, expected_raise",
|
||||
[
|
||||
pytest.param([], {"id": 1, "name": "Alice"}, None, False, None, None, None, does_not_raise(), id="empty list"),
|
||||
pytest.param(
|
||||
[],
|
||||
{"id": 1, "name": "Alice"},
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="empty list and required",
|
||||
),
|
||||
pytest.param(DUMMY_DATA, {"id": 10, "name": "Jack"}, None, False, None, None, None, does_not_raise(), id="missing item"),
|
||||
pytest.param(DUMMY_DATA, {"id": 1, "name": "Alice"}, None, False, None, None, DUMMY_DATA[1], does_not_raise(), id="found item"),
|
||||
pytest.param(DUMMY_DATA, {"id": 10, "name": "Jack"}, "default_value", False, None, None, "default_value", does_not_raise(), id="default value"),
|
||||
pytest.param(
|
||||
DUMMY_DATA, {"id": 10, "name": "Jack"}, None, True, None, None, None, pytest.raises(ValueError, match="not found in the provided list."), id="required"
|
||||
),
|
||||
pytest.param(
|
||||
DUMMY_DATA,
|
||||
{"id": 10, "name": "Jack"},
|
||||
None,
|
||||
True,
|
||||
"custom_var_name",
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="custom_var_name not found in the provided list."),
|
||||
id="custom var_name",
|
||||
),
|
||||
pytest.param(
|
||||
DUMMY_DATA, {"id": 1, "name": "Alice"}, None, True, "custom_var_name", "Custom error message", DUMMY_DATA[1], does_not_raise(), id="custom error message"
|
||||
),
|
||||
pytest.param(
|
||||
DUMMY_DATA,
|
||||
{"id": 10, "name": "Jack"},
|
||||
None,
|
||||
True,
|
||||
"custom_var_name",
|
||||
"Custom error message",
|
||||
None,
|
||||
pytest.raises(ValueError, match="Custom error message"),
|
||||
id="custom error message and required",
|
||||
),
|
||||
pytest.param(DUMMY_DATA, {"id": 1, "name": "Jack"}, None, False, None, None, None, does_not_raise(), id="id ok but name not ok"),
|
||||
pytest.param(
|
||||
"not a list",
|
||||
{"id": 1, "name": "Alice"},
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="non-list input for list_of_dicts",
|
||||
),
|
||||
pytest.param(
|
||||
DUMMY_DATA, "not a dict", None, True, None, None, None, pytest.raises(ValueError, match="not found in the provided list."), id="non-dictionary input"
|
||||
),
|
||||
pytest.param(DUMMY_DATA, {}, None, False, None, None, None, does_not_raise(), id="empty dictionary input"),
|
||||
pytest.param(
|
||||
DUMMY_DATA,
|
||||
{"id": 1, "name": "Alice", "extra_key": "extra_value"},
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="input dictionary with extra keys",
|
||||
),
|
||||
pytest.param(
|
||||
DUMMY_DATA,
|
||||
{"id": 1},
|
||||
None,
|
||||
False,
|
||||
None,
|
||||
None,
|
||||
DUMMY_DATA[1],
|
||||
does_not_raise(),
|
||||
id="input dictionary is a subset of more than one dictionary in list_of_dicts",
|
||||
),
|
||||
pytest.param(
|
||||
DUMMY_DATA,
|
||||
{"id": 1, "name": "Alice", "age": 30, "email": "alice@example.com", "extra_key": "extra_value"},
|
||||
None,
|
||||
True,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
pytest.raises(ValueError, match="not found in the provided list."),
|
||||
id="input dictionary is a superset of a dictionary in list_of_dicts",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_dict_superset(
|
||||
list_of_dicts: list[dict[Any, Any]],
|
||||
input_dict: Any,
|
||||
default: Any | None,
|
||||
required: bool,
|
||||
var_name: str | None,
|
||||
custom_error_msg: str | None,
|
||||
expected_result: str,
|
||||
expected_raise: Any,
|
||||
) -> None:
|
||||
"""Test get_dict_superset."""
|
||||
# pylint: disable=too-many-arguments
|
||||
with expected_raise:
|
||||
assert get_dict_superset(list_of_dicts, input_dict, default, required, var_name, custom_error_msg) == expected_result
|
|
@ -1,72 +0,0 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
|
||||
"""Tests for `anta.tools.get_item`."""
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.tools.get_item import get_item
|
||||
|
||||
DUMMY_DATA = [
|
||||
("id", 0),
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Alice",
|
||||
"age": 30,
|
||||
"email": "alice@example.com",
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Bob",
|
||||
"age": 35,
|
||||
"email": "bob@example.com",
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Charlie",
|
||||
"age": 40,
|
||||
"email": "charlie@example.com",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"list_of_dicts, key, value, default, required, case_sensitive, var_name, custom_error_msg, expected_result, expected_raise",
|
||||
[
|
||||
pytest.param([], "name", "Bob", None, False, False, None, None, None, does_not_raise(), id="empty list"),
|
||||
pytest.param([], "name", "Bob", None, True, False, None, None, None, pytest.raises(ValueError, match="name"), id="empty list and required"),
|
||||
pytest.param(DUMMY_DATA, "name", "Jack", None, False, False, None, None, None, does_not_raise(), id="missing item"),
|
||||
pytest.param(DUMMY_DATA, "name", "Alice", None, False, False, None, None, DUMMY_DATA[1], does_not_raise(), id="found item"),
|
||||
pytest.param(DUMMY_DATA, "name", "Jack", "default_value", False, False, None, None, "default_value", does_not_raise(), id="default value"),
|
||||
pytest.param(DUMMY_DATA, "name", "Jack", None, True, False, None, None, None, pytest.raises(ValueError, match="name"), id="required"),
|
||||
pytest.param(DUMMY_DATA, "name", "Bob", None, False, True, None, None, DUMMY_DATA[2], does_not_raise(), id="case sensitive"),
|
||||
pytest.param(DUMMY_DATA, "name", "charlie", None, False, False, None, None, DUMMY_DATA[3], does_not_raise(), id="case insensitive"),
|
||||
pytest.param(
|
||||
DUMMY_DATA, "name", "Jack", None, True, False, "custom_var_name", None, None, pytest.raises(ValueError, match="custom_var_name"), id="custom var_name"
|
||||
),
|
||||
pytest.param(
|
||||
DUMMY_DATA, "name", "Jack", None, True, False, None, "custom_error_msg", None, pytest.raises(ValueError, match="custom_error_msg"), id="custom error msg"
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_get_item(
|
||||
list_of_dicts: list[dict[Any, Any]],
|
||||
key: Any,
|
||||
value: Any,
|
||||
default: Any | None,
|
||||
required: bool,
|
||||
case_sensitive: bool,
|
||||
var_name: str | None,
|
||||
custom_error_msg: str | None,
|
||||
expected_result: str,
|
||||
expected_raise: Any,
|
||||
) -> None:
|
||||
"""Test get_item."""
|
||||
# pylint: disable=too-many-arguments
|
||||
with expected_raise:
|
||||
assert get_item(list_of_dicts, key, value, default, required, case_sensitive, var_name, custom_error_msg) == expected_result
|
|
@ -1,50 +0,0 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tools.get_value
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.tools.get_value import get_value
|
||||
|
||||
INPUT_DICT = {"test_value": 42, "nested_test": {"nested_value": 43}}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_dict, key, default, required, org_key, separator, expected_result, expected_raise",
|
||||
[
|
||||
pytest.param({}, "test", None, False, None, None, None, does_not_raise(), id="empty dict"),
|
||||
pytest.param(INPUT_DICT, "test_value", None, False, None, None, 42, does_not_raise(), id="simple key"),
|
||||
pytest.param(INPUT_DICT, "nested_test.nested_value", None, False, None, None, 43, does_not_raise(), id="nested_key"),
|
||||
pytest.param(INPUT_DICT, "missing_value", None, False, None, None, None, does_not_raise(), id="missing_value"),
|
||||
pytest.param(INPUT_DICT, "missing_value_with_default", "default_value", False, None, None, "default_value", does_not_raise(), id="default"),
|
||||
pytest.param(INPUT_DICT, "missing_required", None, True, None, None, None, pytest.raises(ValueError), id="required"),
|
||||
pytest.param(INPUT_DICT, "missing_required", None, True, "custom_org_key", None, None, pytest.raises(ValueError), id="custom org_key"),
|
||||
pytest.param(INPUT_DICT, "nested_test||nested_value", None, None, None, "||", 43, does_not_raise(), id="custom separator"),
|
||||
],
|
||||
)
|
||||
def test_get_value(
|
||||
input_dict: dict[Any, Any],
|
||||
key: str,
|
||||
default: str | None,
|
||||
required: bool,
|
||||
org_key: str | None,
|
||||
separator: str | None,
|
||||
expected_result: str,
|
||||
expected_raise: Any,
|
||||
) -> None:
|
||||
"""
|
||||
Test get_value
|
||||
"""
|
||||
# pylint: disable=too-many-arguments
|
||||
kwargs = {"default": default, "required": required, "org_key": org_key, "separator": separator}
|
||||
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||
with expected_raise:
|
||||
assert get_value(input_dict, key, **kwargs) == expected_result # type: ignore
|
|
@ -1,38 +0,0 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
"""
|
||||
Tests for anta.tools.misc
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.tools.misc import exc_to_str, tb_to_str
|
||||
|
||||
|
||||
def my_raising_function(exception: Exception) -> None:
|
||||
"""
|
||||
dummy function to raise Exception
|
||||
"""
|
||||
raise exception
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exception, expected_output", [(ValueError("test"), "ValueError (test)"), (ValueError(), "ValueError")])
|
||||
def test_exc_to_str(exception: Exception, expected_output: str) -> None:
|
||||
"""
|
||||
Test exc_to_str
|
||||
"""
|
||||
assert exc_to_str(exception) == expected_output
|
||||
|
||||
|
||||
def test_tb_to_str() -> None:
|
||||
"""
|
||||
Test tb_to_str
|
||||
"""
|
||||
try:
|
||||
my_raising_function(ValueError("test"))
|
||||
except ValueError as e:
|
||||
output = tb_to_str(e)
|
||||
assert "Traceback" in output
|
||||
assert 'my_raising_function(ValueError("test"))' in output
|
|
@ -1,57 +0,0 @@
|
|||
# Copyright (c) 2023-2024 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the LICENSE file.
|
||||
|
||||
"""Tests for `anta.tools.utils`."""
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from anta.tools.utils import get_failed_logs
|
||||
|
||||
EXPECTED_OUTPUTS = [
|
||||
{"id": 1, "name": "Alice", "age": 30, "email": "alice@example.com"},
|
||||
{"id": 2, "name": "Bob", "age": 35, "email": "bob@example.com"},
|
||||
{"id": 3, "name": "Charlie", "age": 40, "email": "charlie@example.com"},
|
||||
{"id": 4, "name": "Jon", "age": 25, "email": "Jon@example.com"},
|
||||
]
|
||||
|
||||
ACTUAL_OUTPUTS = [
|
||||
{"id": 1, "name": "Alice", "age": 30, "email": "alice@example.com"},
|
||||
{"id": 2, "name": "Bob", "age": 35, "email": "bob@example.com"},
|
||||
{"id": 3, "name": "Charlie", "age": 40, "email": "charlie@example.com"},
|
||||
{"id": 4, "name": "Rob", "age": 25, "email": "Jon@example.com"},
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"expected_output, actual_output, expected_result, expected_raise",
|
||||
[
|
||||
pytest.param(EXPECTED_OUTPUTS[0], ACTUAL_OUTPUTS[0], "", does_not_raise(), id="no difference"),
|
||||
pytest.param(
|
||||
EXPECTED_OUTPUTS[0],
|
||||
ACTUAL_OUTPUTS[1],
|
||||
"\nExpected `1` as the id, but found `2` instead.\nExpected `Alice` as the name, but found `Bob` instead.\n"
|
||||
"Expected `30` as the age, but found `35` instead.\nExpected `alice@example.com` as the email, but found `bob@example.com` instead.",
|
||||
does_not_raise(),
|
||||
id="different data",
|
||||
),
|
||||
pytest.param(
|
||||
EXPECTED_OUTPUTS[0],
|
||||
{},
|
||||
"\nExpected `1` as the id, but it was not found in the actual output.\nExpected `Alice` as the name, but it was not found in the actual output.\n"
|
||||
"Expected `30` as the age, but it was not found in the actual output.\nExpected `alice@example.com` as the email, but it was not found in "
|
||||
"the actual output.",
|
||||
does_not_raise(),
|
||||
id="empty actual output",
|
||||
),
|
||||
pytest.param(EXPECTED_OUTPUTS[3], ACTUAL_OUTPUTS[3], "\nExpected `Jon` as the name, but found `Rob` instead.", does_not_raise(), id="different name"),
|
||||
],
|
||||
)
|
||||
def test_get_failed_logs(expected_output: dict[Any, Any], actual_output: dict[Any, Any], expected_result: str, expected_raise: Any) -> None:
|
||||
"""Test get_failed_logs."""
|
||||
with expected_raise:
|
||||
assert get_failed_logs(expected_output, actual_output) == expected_result
|
Loading…
Add table
Add a link
Reference in a new issue