Merging upstream version 2.3~rc1.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
972d2d9aa2
commit
ca2ec6771a
37 changed files with 1946 additions and 407 deletions
6
.github/workflows/docker-publish.yml
vendored
6
.github/workflows/docker-publish.yml
vendored
|
@ -38,7 +38,7 @@ jobs:
|
|||
# https://github.com/docker/login-action
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
|
@ -48,14 +48,14 @@ jobs:
|
|||
# https://github.com/docker/metadata-action
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96
|
||||
uses: docker/metadata-action@2c0bd771b40637d97bf205cbccdd294a32112176
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
# Build and push Docker image with Buildx (don't push on PR)
|
||||
# https://github.com/docker/build-push-action
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
|
||||
uses: docker/build-push-action@44ea916f6c540f9302d50c2b1e5a8dc071f15cdf
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
|
|
2
.github/workflows/meson-test.yml
vendored
2
.github/workflows/meson-test.yml
vendored
|
@ -42,7 +42,7 @@ jobs:
|
|||
run: |
|
||||
sudo apt-get install --yes --quiet swig libjson-c-dev
|
||||
meson subprojects download
|
||||
meson setup .build subprojects/libnvme -Dlibdbus=disabled -Dopenssl=disabled -Dbuildtype=release -Dprefix=/usr -Dpython=true
|
||||
meson setup .build subprojects/libnvme -Dlibdbus=disabled -Dopenssl=disabled -Dbuildtype=release -Dprefix=/usr -Dpython=enabled
|
||||
ninja -C .build
|
||||
sudo meson install -C .build
|
||||
|
||||
|
|
2
.github/workflows/pylint.yml
vendored
2
.github/workflows/pylint.yml
vendored
|
@ -64,7 +64,7 @@ jobs:
|
|||
run: |
|
||||
sudo apt-get install --yes --quiet swig libjson-c-dev || true
|
||||
meson subprojects download
|
||||
meson setup builddir subprojects/libnvme -Dlibdbus=disabled -Dopenssl=disabled -Dbuildtype=release -Dprefix=/usr -Dpython=true
|
||||
meson setup builddir subprojects/libnvme -Dlibdbus=disabled -Dopenssl=disabled -Dbuildtype=release -Dprefix=/usr -Dpython=enabled
|
||||
ninja -C builddir
|
||||
sudo meson install -C builddir
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM fedora:37
|
||||
FROM fedora:38
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
|
|
7
Makefile
7
Makefile
|
@ -26,12 +26,12 @@ ${BUILD-DIR}:
|
|||
|
||||
.PHONY: stas
|
||||
stas: ${BUILD-DIR}
|
||||
ninja -C ${BUILD-DIR}
|
||||
meson compile -C ${BUILD-DIR}
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
ifneq ("$(wildcard ${BUILD-DIR})","")
|
||||
ninja -C ${BUILD-DIR} -t clean
|
||||
meson compile --clean -C ${BUILD-DIR}
|
||||
endif
|
||||
|
||||
.PHONY: purge
|
||||
|
@ -46,7 +46,7 @@ install: stas
|
|||
|
||||
.PHONY: uninstall
|
||||
uninstall: ${BUILD-DIR}
|
||||
sudo ninja -C ${BUILD-DIR} uninstall
|
||||
sudo ninja $@ -C ${BUILD-DIR}
|
||||
|
||||
.PHONY: dist
|
||||
dist: stas
|
||||
|
@ -56,6 +56,7 @@ dist: stas
|
|||
test: stas
|
||||
meson $@ -C ${BUILD-DIR} --suite nvme-stas
|
||||
|
||||
################################################################################
|
||||
.PHONY: loc
|
||||
loc:
|
||||
@cloc --by-file --exclude-dir=${BUILD-DIR},doc,subprojects,test,utils,debian,obj-x86_64-linux-gnu,.github --exclude-lang=Markdown,"NAnt script",XML,"Bourne Again Shell",make,"Bourne Shell",Meson,YAML,XSLT .
|
||||
|
|
18
NEWS.md
18
NEWS.md
|
@ -1,5 +1,23 @@
|
|||
# STorage Appliance Services (STAS)
|
||||
|
||||
## Changes with release 2.3
|
||||
|
||||
New features:
|
||||
|
||||
- Support for nBFT (NVMe-oF Boot Table).
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* For TCP transport: use `sysfs` controller `src_addr` attribute when matching to a configured "candidate" controller. This is to determine when an existing controller (located under the `sysfs`) can be reused instead of creating a new one. This avoids creating unnecessary duplicate connections.
|
||||
* Udev event handling: use `systemctl restart` instead of `systemctl start`. There is a small chance that a `start` operation has not completed when a new `start` is required. Issuing a `start` while a `start` is being performed has no effect. However, a `restart` will be handled properly.
|
||||
|
||||
## Changes with release 2.2.3
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* When processing kernel nvme events, only react to `rediscover` and not to `connected` events. The `connected` event happens too early (before the nvme device has been fully identified).
|
||||
*
|
||||
|
||||
## Changes with release 2.2.2
|
||||
|
||||
Bug fixes:
|
||||
|
|
35
README.md
35
README.md
|
@ -171,31 +171,28 @@ STAS uses the `meson` build system. Since STAS is a Python project, there is no
|
|||
Invoke `meson` to configure the project:
|
||||
|
||||
```bash
|
||||
meson .build
|
||||
meson setup .build
|
||||
```
|
||||
|
||||
The command `meson .build` need only be called once. This analyzes the project and the host computer to determine if all the necessary tools and dependencies are available. The result is saved to the directory named `.build`.
|
||||
The command `meson setup .build` need only be called once. This analyzes the project and the host computer to determine if all the necessary tools and dependencies are available. The result is saved to the directory named `.build`.
|
||||
|
||||
To compile the code:
|
||||
|
||||
```bash
|
||||
cd .build
|
||||
ninja
|
||||
meson compile -C .build
|
||||
```
|
||||
|
||||
To install / uninstall the code:
|
||||
|
||||
```bash
|
||||
cd .build
|
||||
meson install
|
||||
ninja uninstall
|
||||
meson install -C .build # Wrapper for ninja install -C .build
|
||||
ninja uninstall -C .build # Unfortunately there's no meson wrapper
|
||||
```
|
||||
|
||||
To run the unit tests:
|
||||
|
||||
```bash
|
||||
cd .build
|
||||
meson test
|
||||
meson test -C .build
|
||||
```
|
||||
|
||||
For more information about testing, please refer to: [TESTING.md](./TESTING.md)
|
||||
|
@ -209,18 +206,20 @@ Recognizing that many people are not familiar with `meson`, we're providing a se
|
|||
make
|
||||
```
|
||||
|
||||
This performs the same operations as the meson approach described above. The `configure` script is automatically invoked when running `make` by itself.
|
||||
This performs the same operations as the meson approach described above. The `configure` script is automatically invoked (using default configuration parameters) when running `make` by itself.
|
||||
|
||||
| make command | Description |
|
||||
| -------------------- | :----------------------------------------------------------- |
|
||||
| **`make`** | Invoke the `.configure` script and build the code. |
|
||||
| ----------------------------- | :----------------------------------------------------------- |
|
||||
| **`make`** | Build the code. This command will automatically invoke the `.configure` scripts (using default configuration parameters) if the project is not already configured. |
|
||||
| **`make install`** | Install the code. Requires root privileges (you will be asked to enter your password). |
|
||||
| **`make uninstall`** | Uninstall the code. Requires root privileges (you will be asked to enter your password). |
|
||||
| **`make test`** | Run the unit tests |
|
||||
| **`make clean`** | Clean build artifacts, but does not remove the meson's configuration. That is, the configuration in `.build` is preserved. |
|
||||
| **`make purge`** | Remove all build artifacts including the `.build` directory. |
|
||||
| **`make update-subprojects`** | Bring subprojects like `libnvme` up to date |
|
||||
| **`make black`** | Verify that source code complies to black coding style |
|
||||
|
||||
## Compiling and running nvme-stas in a docker container
|
||||
# Containerization
|
||||
|
||||
Use published image (optional)
|
||||
```bash
|
||||
|
@ -246,9 +245,9 @@ docker-compose exec stacd stacctl ls
|
|||
docker-compose exec stacd stacctl status
|
||||
```
|
||||
|
||||
dependencies: dbus, avahi.
|
||||
dependencies: `dbus`, `avahi`.
|
||||
|
||||
## Generating man and html pages
|
||||
# Generating the documentation
|
||||
|
||||
nvme-stas uses the following programs to generate the documentation. These can be installed as shown in the "dependencies" section below.
|
||||
|
||||
|
@ -259,13 +258,13 @@ nvme-stas uses the following programs to generate the documentation. These can b
|
|||
|
||||
The following packages must be installed to generate the documentation
|
||||
|
||||
**Debian packages (tested on Ubuntu 20.04):**
|
||||
**Debian packages (tested on Ubuntu 20.04, 22.04):**
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y docbook-xml docbook-xsl xsltproc libglib2.0-dev
|
||||
```
|
||||
|
||||
**RPM packages (tested on Fedora 34..35 and SLES15):**
|
||||
**RPM packages (tested on Fedora 34..37 and SLES15):**
|
||||
|
||||
```bash
|
||||
sudo dnf install -y docbook-style-xsl libxslt glib2-devel
|
||||
|
@ -281,7 +280,7 @@ make purge
|
|||
make
|
||||
```
|
||||
|
||||
## Generating RPM and/or DEB packages
|
||||
# Generating RPM and/or DEB packages
|
||||
```bash
|
||||
make rpm
|
||||
make deb
|
||||
|
|
2
configure
vendored
2
configure
vendored
|
@ -10,7 +10,7 @@
|
|||
BUILD_DIR="${BUILD_DIR:-.build}"
|
||||
|
||||
if [ ! -d ${BUILD_DIR} ]; then
|
||||
exec meson ${BUILD_DIR} "$@"
|
||||
exec meson setup ${BUILD_DIR} "$@"
|
||||
else
|
||||
exec meson configure ${BUILD_DIR} "$@"
|
||||
fi
|
||||
|
|
117
coverage.sh.in
117
coverage.sh.in
|
@ -35,10 +35,14 @@ log_file_contents() {
|
|||
fi
|
||||
|
||||
while IFS= read -r line; do
|
||||
msg=" ${line}"
|
||||
printf "%b%s%s%b[0m\n" "\0033" ${color} "${msg}" "\0033"
|
||||
sudo logger -t COVERAGE -i "@@@@@ " -p ${level} -- "${msg}"
|
||||
printf "%b%s%s%b[0m\n" "\0033" ${color} " ${line}" "\0033"
|
||||
done < ${file}
|
||||
|
||||
sudo file=${file} level=${level} bash <<'EOF'
|
||||
while IFS= read -r line; do
|
||||
logger -t COVERAGE -i "@@@@@ " -p ${level} -- " ${line}"
|
||||
done < ${file}
|
||||
EOF
|
||||
}
|
||||
|
||||
systemctl-exists() {
|
||||
|
@ -83,7 +87,7 @@ sd_start() {
|
|||
sudo systemctl reset-failed "${unit}" >/dev/null 2>&1
|
||||
|
||||
log "Start ${app}"
|
||||
sudo systemd-run --unit="${unit}" --working-directory=. --property=Type=dbus --property=BusName="${dbus}" --property="SyslogIdentifier=${app}" --property="ExecReload=/bin/kill -HUP \$MAINPID" --setenv=PYTHONPATH=${PYTHON_PATH} --setenv=RUNTIME_DIRECTORY=${RUNTIME_DIRECTORY} coverage run --rcfile=.coveragerc ${cmd} >/tmp/output.txt 2>&1
|
||||
sudo systemd-run --unit="${unit}" --working-directory=. --property=Type=dbus --property=BusName="${dbus}" --property="SyslogIdentifier=${app}" --setenv=PYTHONPATH=${PYTHON_PATH} --setenv=RUNTIME_DIRECTORY=${RUNTIME_DIRECTORY} coverage run --rcfile=.coveragerc ${cmd} >/tmp/output.txt 2>&1
|
||||
log_file_contents $? /tmp/output.txt
|
||||
printf "\n"
|
||||
sleep 1
|
||||
|
@ -108,10 +112,10 @@ sd_restart() {
|
|||
reload_cfg() {
|
||||
app="$1"
|
||||
unit="${app}"-cov.service
|
||||
log "Reload config ${app}"
|
||||
sudo systemctl reload "${unit}" && printf "systemctl reload %s\n" "${unit}" >/tmp/output.txt 2>&1
|
||||
#pid=$( systemctl show --property MainPID --value "${unit}" )
|
||||
#sudo kill -HUP "${pid}" >/tmp/output.txt 2>&1
|
||||
pid=$( systemctl show --property MainPID --value "${unit}" )
|
||||
log "Reload config ${app} - SIGHUP ${pid}"
|
||||
#sudo systemctl reload "${unit}" && printf "systemctl reload %s\n" "${unit}" >/tmp/output.txt 2>&1
|
||||
sudo kill -HUP "${pid}" >/tmp/output.txt 2>&1
|
||||
log_file_contents $? /tmp/output.txt
|
||||
printf "\n"
|
||||
sleep 1
|
||||
|
@ -125,9 +129,9 @@ run_unit_test() {
|
|||
else
|
||||
COVERAGE="coverage"
|
||||
fi
|
||||
test=$@
|
||||
args=$@
|
||||
log "Run unit test: ${input}"
|
||||
PYTHONPATH=${PYTHON_PATH} ${COVERAGE} run --rcfile=.coveragerc ../test/${test} >/dev/null 2>&1
|
||||
PYTHONPATH=${PYTHON_PATH} ${COVERAGE} run --rcfile=.coveragerc "${args}" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
run_cmd_coverage() {
|
||||
|
@ -464,15 +468,39 @@ run_cmd_coverage stacctl ls
|
|||
log ">>>>>>>>>>>>>>>>>>>>> Marker [6] <<<<<<<<<<<<<<<<<<<<<"
|
||||
printf "\n"
|
||||
|
||||
|
||||
#*******************************************************************************
|
||||
# Stop Avahi Publisher
|
||||
log "Stop Avahi publisher"
|
||||
log "Restart Avahi publisher with invalid protocol"
|
||||
run_cmd sudo systemctl stop ${AVAHI_PUBLISHER}
|
||||
printf "\n"
|
||||
sleep 1
|
||||
run_cmd sudo systemd-run --unit=${AVAHI_PUBLISHER} --working-directory=. avahi-publish -s SFSS _nvme-disc._tcp 8009 "p=walmart"
|
||||
printf "\n"
|
||||
sleep 2
|
||||
|
||||
#*******************************************************************************
|
||||
log "Restart Avahi publisher"
|
||||
log "Restart Avahi publisher with protocol set to RoCE"
|
||||
run_cmd sudo systemctl stop ${AVAHI_PUBLISHER}
|
||||
printf "\n"
|
||||
sleep 1
|
||||
run_cmd sudo systemd-run --unit=${AVAHI_PUBLISHER} --working-directory=. avahi-publish -s SFSS _nvme-disc._tcp 8009 "p=roce"
|
||||
printf "\n"
|
||||
sleep 2
|
||||
|
||||
#*******************************************************************************
|
||||
log "Restart Avahi publisher without specifying protocol"
|
||||
run_cmd sudo systemctl stop ${AVAHI_PUBLISHER}
|
||||
printf "\n"
|
||||
sleep 1
|
||||
run_cmd sudo systemd-run --unit=${AVAHI_PUBLISHER} --working-directory=. avahi-publish -s SFSS _nvme-disc._tcp 8009
|
||||
printf "\n"
|
||||
sleep 2
|
||||
|
||||
#*******************************************************************************
|
||||
log "Restart Avahi publisher with protocol set to TCP"
|
||||
run_cmd sudo systemctl stop ${AVAHI_PUBLISHER}
|
||||
printf "\n"
|
||||
sleep 1
|
||||
run_cmd sudo systemd-run --unit=${AVAHI_PUBLISHER} --working-directory=. avahi-publish -s SFSS _nvme-disc._tcp 8009 "p=tcp"
|
||||
printf "\n"
|
||||
sleep 2
|
||||
|
@ -515,6 +543,7 @@ reconnect-delay=1
|
|||
ctrl-loss-tmo=0
|
||||
disable-sqflow=true
|
||||
ip-family=ipv6
|
||||
pleo=disabled
|
||||
|
||||
[Discovery controller connection management]
|
||||
persistent-connections=false
|
||||
|
@ -613,6 +642,35 @@ mkdir -p "/tmp/stafd"
|
|||
sd_restart "stafd"
|
||||
sleep 2
|
||||
|
||||
|
||||
log ">>>>>>>>>>>>>>>>>>>>> Marker [11] <<<<<<<<<<<<<<<<<<<<<"
|
||||
printf "\n"
|
||||
|
||||
log "Change stafd config [5]:"
|
||||
cat > "${stafd_conf_fname}" <<'EOF'
|
||||
[Global]
|
||||
tron = true
|
||||
|
||||
[Controllers]
|
||||
controller=transport=tcp;traddr=localhost
|
||||
controller=transport=tcp;traddr=1.1.1.1
|
||||
controller=transport=tcp;traddr=2.2.2.2
|
||||
controller=transport=tcp;traddr=3.3.3.3
|
||||
controller=transport=tcp;traddr=4.4.4.4
|
||||
controller=transport=tcp;traddr=5.5.5.5
|
||||
controller=transport=tcp;traddr=6.6.6.6
|
||||
EOF
|
||||
log_file_contents 0 "${stafd_conf_fname}"
|
||||
printf "\n"
|
||||
|
||||
reload_cfg "stafd"
|
||||
sleep 2
|
||||
|
||||
sd_stop "stafd"
|
||||
sleep 5
|
||||
sd_start "stafd"
|
||||
|
||||
|
||||
#*******************************************************************************
|
||||
# Change ownership of files that were created as root
|
||||
sudo chown -R "${PRIMARY_USR}":"${PRIMARY_GRP}" coverage >/dev/null 2>&1
|
||||
|
@ -621,20 +679,25 @@ sudo chown -R "${PRIMARY_USR}":"${PRIMARY_GRP}" subprojects/libnvme/libnvme/__py
|
|||
|
||||
#*******************************************************************************
|
||||
# Run unit tests
|
||||
run_unit_test test-avahi.py
|
||||
run_unit_test test-avahi.py
|
||||
run_unit_test test-config.py
|
||||
run_unit_test test-controller.py
|
||||
run_unit_test test-gtimer.py
|
||||
run_unit_test test-iputil.py
|
||||
run_unit_test test-log.py
|
||||
run_unit_test sudo test-nvme_options.py # Test both with super user...
|
||||
run_unit_test test-nvme_options.py # ... and with regular user
|
||||
run_unit_test test-service.py
|
||||
run_unit_test test-timeparse.py
|
||||
run_unit_test test-transport_id.py
|
||||
run_unit_test test-udev.py
|
||||
run_unit_test test-version.py
|
||||
TEST_DIR=$( realpath ../test )
|
||||
run_unit_test ${TEST_DIR}/test-avahi.py
|
||||
run_unit_test ${TEST_DIR}/test-avahi.py
|
||||
run_unit_test ${TEST_DIR}/test-config.py
|
||||
run_unit_test ${TEST_DIR}/test-controller.py
|
||||
run_unit_test ${TEST_DIR}/test-gtimer.py
|
||||
run_unit_test ${TEST_DIR}/test-iputil.py
|
||||
run_unit_test ${TEST_DIR}/test-log.py
|
||||
run_unit_test ${TEST_DIR}/test-nbft.py
|
||||
run_unit_test ${TEST_DIR}/test-nbft_conf.py
|
||||
run_unit_test sudo ${TEST_DIR}/test-nvme_options.py # Test both with super user...
|
||||
run_unit_test ${TEST_DIR}/test-nvme_options.py # ... and with regular user
|
||||
run_unit_test ${TEST_DIR}/test-service.py
|
||||
run_unit_test ${TEST_DIR}/test-timeparse.py
|
||||
run_unit_test ${TEST_DIR}/test-transport_id.py
|
||||
run_unit_test ${TEST_DIR}/test-defs.py
|
||||
run_unit_test ${TEST_DIR}/test-gutil.py
|
||||
run_unit_test ${TEST_DIR}/test-udev.py
|
||||
run_unit_test ${TEST_DIR}/test-version.py
|
||||
|
||||
#*******************************************************************************
|
||||
# Stop nvme target simulator
|
||||
|
|
|
@ -33,11 +33,11 @@
|
|||
|
||||
# kato: Keep Alive Timeout (KATO): This field specifies the timeout value
|
||||
# for the Keep Alive feature in seconds. The default value for this
|
||||
# field is 30 seconds (2 minutes).
|
||||
# field is 120 seconds (2 minutes).
|
||||
# Type: Unsigned integer
|
||||
# Range: 0..N
|
||||
# Unit: Seconds
|
||||
#kato=30
|
||||
#kato=120
|
||||
|
||||
# nr-io-queues: Overrides the default number of I/O queues create by the
|
||||
# driver.
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
project(
|
||||
'nvme-stas',
|
||||
meson_version: '>= 0.53.0',
|
||||
version: '2.2.2',
|
||||
version: '2.3-rc1',
|
||||
license: 'Apache-2.0',
|
||||
default_options: [
|
||||
'buildtype=release',
|
||||
|
|
|
@ -29,9 +29,11 @@ def _txt2dict(txt: list):
|
|||
for list_of_chars in txt:
|
||||
try:
|
||||
string = functools.reduce(lambda accumulator, c: accumulator + chr(c), list_of_chars, '')
|
||||
key, val = string.split("=")
|
||||
if string.isprintable():
|
||||
key, val = string.split('=')
|
||||
if key: # Make sure the key is not an empty string
|
||||
the_dict[key.lower()] = val
|
||||
except Exception: # pylint: disable=broad-except
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return the_dict
|
||||
|
|
106
staslib/conf.py
106
staslib/conf.py
|
@ -14,7 +14,8 @@ import sys
|
|||
import logging
|
||||
import functools
|
||||
import configparser
|
||||
from staslib import defs, singleton, timeparse
|
||||
from urllib.parse import urlparse
|
||||
from staslib import defs, iputil, nbft, singleton, timeparse
|
||||
|
||||
__TOKEN_RE = re.compile(r'\s*;\s*')
|
||||
__OPTION_RE = re.compile(r'\s*=\s*')
|
||||
|
@ -83,7 +84,8 @@ def _to_ip_family(text):
|
|||
class OrderedMultisetDict(dict):
|
||||
'''This class is used to change the behavior of configparser.ConfigParser
|
||||
and allow multiple configuration parameters with the same key. The
|
||||
result is a list of values.
|
||||
result is a list of values, where values are sorted by the order they
|
||||
appear in the file.
|
||||
'''
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
|
@ -317,7 +319,7 @@ class SvcConf(metaclass=singleton.Singleton): # pylint: disable=too-many-public
|
|||
option = 'persistent-connections'
|
||||
|
||||
value = self.get_option(section, option, ignore_default=True)
|
||||
legacy = self.get_option('Global', 'persistent-connections', ignore_default=True)
|
||||
legacy = self.get_option('Global', option, ignore_default=True)
|
||||
|
||||
if value is None and legacy is None:
|
||||
return self._defaults.get((section, option), True)
|
||||
|
@ -381,7 +383,7 @@ class SvcConf(metaclass=singleton.Singleton): # pylint: disable=too-many-public
|
|||
controller_list = self.get_option('Controllers', 'exclude')
|
||||
|
||||
# 2022-09-20: Look for "blacklist". This is for backwards compatibility
|
||||
# with releases 1.0 to 1.1.6. This is to be phased out (i.e. remove by 2024)
|
||||
# with releases 1.0 to 1.1.x. This is to be phased out (i.e. remove by 2024)
|
||||
controller_list += self.get_option('Controllers', 'blacklist')
|
||||
|
||||
excluded = [_parse_controller(controller) for controller in controller_list]
|
||||
|
@ -572,7 +574,7 @@ class SysConf(metaclass=singleton.Singleton):
|
|||
try:
|
||||
value = self.__get_value('Host', 'key', defs.NVME_HOSTKEY)
|
||||
except FileNotFoundError as ex:
|
||||
logging.info('Host key undefined: %s', ex)
|
||||
logging.debug('Host key undefined: %s', ex)
|
||||
value = None
|
||||
|
||||
return value
|
||||
|
@ -701,3 +703,97 @@ class NvmeOptions(metaclass=singleton.Singleton):
|
|||
def dhchap_ctrlkey_supp(self):
|
||||
'''This option allows specifying the controller DHCHAP key used for authentication.'''
|
||||
return self._supported_options['dhchap_ctrl_secret']
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
class NbftConf(metaclass=singleton.Singleton):
|
||||
'''Read and cache configuration file.'''
|
||||
|
||||
def __init__(self, root_dir=defs.NBFT_SYSFS_PATH):
|
||||
self._disc_ctrls = []
|
||||
self._subs_ctrls = []
|
||||
|
||||
nbft_files = nbft.get_nbft_files(root_dir)
|
||||
if len(nbft_files):
|
||||
logging.info('NBFT location(s): %s', list(nbft_files.keys()))
|
||||
|
||||
for data in nbft_files.values():
|
||||
hfis = data.get('hfi', [])
|
||||
discovery = data.get('discovery', [])
|
||||
subsystem = data.get('subsystem', [])
|
||||
|
||||
self._disc_ctrls.extend(NbftConf.__nbft_disc_to_cids(discovery, hfis))
|
||||
self._subs_ctrls.extend(NbftConf.__nbft_subs_to_cids(subsystem, hfis))
|
||||
|
||||
dcs = property(lambda self: self._disc_ctrls)
|
||||
iocs = property(lambda self: self._subs_ctrls)
|
||||
|
||||
def get_controllers(self):
|
||||
'''Retrieve the list of controllers. Stafd only cares about
|
||||
discovery controllers. Stacd only cares about I/O controllers.'''
|
||||
|
||||
# For now, only return DCs. There are still unanswered questions
|
||||
# regarding I/O controllers, e.g. what if multipathing has been
|
||||
# configured.
|
||||
return self.dcs if defs.PROG_NAME == 'stafd' else []
|
||||
|
||||
@staticmethod
|
||||
def __nbft_disc_to_cids(discovery, hfis):
|
||||
cids = []
|
||||
|
||||
for ctrl in discovery:
|
||||
cid = NbftConf.__uri2cid(ctrl['uri'])
|
||||
cid['subsysnqn'] = ctrl['nqn']
|
||||
|
||||
host_iface = NbftConf.__get_host_iface(ctrl.get('hfi_index'), hfis)
|
||||
if host_iface:
|
||||
cid['host-iface'] = host_iface
|
||||
|
||||
cids.append(cid)
|
||||
|
||||
return cids
|
||||
|
||||
@staticmethod
|
||||
def __nbft_subs_to_cids(subsystem, hfis):
|
||||
cids = []
|
||||
|
||||
for ctrl in subsystem:
|
||||
cid = {
|
||||
'transport': ctrl['trtype'],
|
||||
'traddr': ctrl['traddr'],
|
||||
'trsvcid': ctrl['trsvcid'],
|
||||
'subsysnqn': ctrl['subsys_nqn'],
|
||||
'hdr-digest': ctrl['pdu_header_digest_required'],
|
||||
'data-digest': ctrl['data_digest_required'],
|
||||
}
|
||||
|
||||
indexes = ctrl.get('hfi_indexes')
|
||||
if isinstance(indexes, list) and len(indexes) > 0:
|
||||
host_iface = NbftConf.__get_host_iface(indexes[0], hfis)
|
||||
if host_iface:
|
||||
cid['host-iface'] = host_iface
|
||||
|
||||
cids.append(cid)
|
||||
|
||||
return cids
|
||||
|
||||
@staticmethod
|
||||
def __get_host_iface(indx, hfis):
|
||||
if indx is None or indx >= len(hfis):
|
||||
return None
|
||||
|
||||
mac = hfis[indx].get('mac_addr')
|
||||
if mac is None:
|
||||
return None
|
||||
|
||||
return iputil.mac2iface(mac)
|
||||
|
||||
@staticmethod
|
||||
def __uri2cid(uri: str):
|
||||
'''Convert a URI of the form "nvme+tcp://100.71.103.50:8009/" to a Controller ID'''
|
||||
obj = urlparse(uri)
|
||||
return {
|
||||
'transport': obj.scheme.split('+')[1],
|
||||
'traddr': obj.hostname,
|
||||
'trsvcid': str(obj.port),
|
||||
}
|
||||
|
|
158
staslib/ctrl.py
158
staslib/ctrl.py
|
@ -135,7 +135,16 @@ class Controller(stas.ControllerABC): # pylint: disable=too-many-instance-attri
|
|||
self._root.log_level("debug" if tron else "err")
|
||||
|
||||
def _on_udev_notification(self, udev_obj):
|
||||
if self._alive():
|
||||
if not self._alive():
|
||||
logging.debug(
|
||||
'Controller._on_udev_notification() - %s | %s: Received event on dead object. udev_obj %s: %s',
|
||||
self.id,
|
||||
self.device,
|
||||
udev_obj.action,
|
||||
udev_obj.sys_name,
|
||||
)
|
||||
return
|
||||
|
||||
if udev_obj.action == 'change':
|
||||
nvme_aen = udev_obj.get('NVME_AEN')
|
||||
nvme_event = udev_obj.get('NVME_EVENT')
|
||||
|
@ -154,14 +163,6 @@ class Controller(stas.ControllerABC): # pylint: disable=too-many-instance-attri
|
|||
udev_obj.sys_name,
|
||||
udev_obj.action,
|
||||
)
|
||||
else:
|
||||
logging.debug(
|
||||
'Controller._on_udev_notification() - %s | %s: Received event on dead object. udev_obj %s: %s',
|
||||
self.id,
|
||||
self.device,
|
||||
udev_obj.action,
|
||||
udev_obj.sys_name,
|
||||
)
|
||||
|
||||
def _on_ctrl_removed(self, udev_obj): # pylint: disable=unused-argument
|
||||
if self._udev:
|
||||
|
@ -269,24 +270,34 @@ class Controller(stas.ControllerABC): # pylint: disable=too-many-instance-attri
|
|||
op_obj.kill()
|
||||
self._connect_op = None
|
||||
|
||||
if self._alive():
|
||||
self._device = self._ctrl.name
|
||||
logging.info('%s | %s - Connection established!', self.id, self.device)
|
||||
self._connect_attempts = 0
|
||||
self._udev.register_for_device_events(self._device, self._on_udev_notification)
|
||||
else:
|
||||
if not self._alive():
|
||||
logging.debug(
|
||||
'Controller._on_connect_success() - %s | %s: Received event on dead object. data=%s',
|
||||
self.id,
|
||||
self.device,
|
||||
data,
|
||||
)
|
||||
return
|
||||
|
||||
self._device = self._ctrl.name
|
||||
logging.info('%s | %s - Connection established!', self.id, self.device)
|
||||
self._connect_attempts = 0
|
||||
self._udev.register_for_device_events(self._device, self._on_udev_notification)
|
||||
|
||||
def _on_connect_fail(self, op_obj: gutil.AsyncTask, err, fail_cnt): # pylint: disable=unused-argument
|
||||
'''@brief Function called when we fail to connect to the Controller.'''
|
||||
op_obj.kill()
|
||||
self._connect_op = None
|
||||
if self._alive():
|
||||
|
||||
if not self._alive():
|
||||
logging.debug(
|
||||
'Controller._on_connect_fail() - %s Received event on dead object. %s %s',
|
||||
self.id,
|
||||
err.domain,
|
||||
err.message,
|
||||
)
|
||||
return
|
||||
|
||||
if self._connect_attempts == 1:
|
||||
# Do a fast re-try on the first failure.
|
||||
self._retry_connect_tmr.set_timeout(self.FAST_CONNECT_RETRY_PERIOD_SEC)
|
||||
|
@ -304,13 +315,6 @@ class Controller(stas.ControllerABC): # pylint: disable=too-many-instance-attri
|
|||
self._retry_connect_tmr.get_timeout(),
|
||||
)
|
||||
self._retry_connect_tmr.start()
|
||||
else:
|
||||
logging.debug(
|
||||
'Controller._on_connect_fail() - %s Received event on dead object. %s %s',
|
||||
self.id,
|
||||
err.domain,
|
||||
err.message,
|
||||
)
|
||||
|
||||
def disconnect(self, disconnected_cb, keep_connection):
|
||||
'''@brief Issue an asynchronous disconnect command to a Controller.
|
||||
|
@ -507,11 +511,7 @@ class Dc(Controller):
|
|||
|
||||
return
|
||||
|
||||
logging.info(
|
||||
'%s | %s - Controller not responding. Retrying...',
|
||||
self.id,
|
||||
self.device,
|
||||
)
|
||||
logging.info('%s | %s - Controller not responding. Retrying...', self.id, self.device)
|
||||
|
||||
self._ctrl_unresponsive_time = None
|
||||
self._ctrl_unresponsive_tmr.stop()
|
||||
|
@ -555,8 +555,8 @@ class Dc(Controller):
|
|||
|
||||
def _post_registration_actions(self):
|
||||
# Need to check that supported_log_pages() is available (introduced in libnvme 1.2)
|
||||
has_supported_log_pages = hasattr(self._ctrl, 'supported_log_pages')
|
||||
if not has_supported_log_pages:
|
||||
get_slp = getattr(self._ctrl, 'supported_log_pages', None)
|
||||
if get_slp is None:
|
||||
logging.warning(
|
||||
'%s | %s - libnvme-%s does not support "Get supported log pages". Please upgrade libnvme.',
|
||||
self.id,
|
||||
|
@ -564,9 +564,9 @@ class Dc(Controller):
|
|||
defs.LIBNVME_VERSION,
|
||||
)
|
||||
|
||||
if conf.SvcConf().pleo_enabled and self._is_ddc() and has_supported_log_pages:
|
||||
if conf.SvcConf().pleo_enabled and self._is_ddc() and get_slp is not None:
|
||||
self._get_supported_op = gutil.AsyncTask(
|
||||
self._on_get_supported_success, self._on_get_supported_fail, self._ctrl.supported_log_pages
|
||||
self._on_get_supported_success, self._on_get_supported_fail, get_slp
|
||||
)
|
||||
self._get_supported_op.run_async()
|
||||
else:
|
||||
|
@ -580,7 +580,9 @@ class Dc(Controller):
|
|||
'''
|
||||
super()._on_connect_success(op_obj, data)
|
||||
|
||||
if self._alive():
|
||||
if not self._alive():
|
||||
return
|
||||
|
||||
self._ctrl_unresponsive_time = None
|
||||
self._ctrl_unresponsive_tmr.stop()
|
||||
self._ctrl_unresponsive_tmr.set_timeout(0)
|
||||
|
@ -613,24 +615,34 @@ class Dc(Controller):
|
|||
refers to the fact that a successful exchange was made with the DC.
|
||||
It doesn't mean that the registration itself succeeded.
|
||||
'''
|
||||
if self._alive():
|
||||
if not self._alive():
|
||||
logging.debug(
|
||||
'Dc._on_registration_success() - %s | %s: Received event on dead object.', self.id, self.device
|
||||
)
|
||||
return
|
||||
|
||||
if data is not None:
|
||||
logging.warning('%s | %s - Registration error. %s.', self.id, self.device, data)
|
||||
else:
|
||||
logging.debug('Dc._on_registration_success() - %s | %s', self.id, self.device)
|
||||
|
||||
self._post_registration_actions()
|
||||
else:
|
||||
logging.debug(
|
||||
'Dc._on_registration_success() - %s | %s: Received event on dead object.', self.id, self.device
|
||||
)
|
||||
|
||||
def _on_registration_fail(self, op_obj: gutil.AsyncTask, err, fail_cnt):
|
||||
'''@brief Function called when we fail to register with the
|
||||
Discovery Controller. See self._register_op object
|
||||
for details.
|
||||
'''
|
||||
if self._alive():
|
||||
if not self._alive():
|
||||
logging.debug(
|
||||
'Dc._on_registration_fail() - %s | %s: Received event on dead object. %s',
|
||||
self.id,
|
||||
self.device,
|
||||
err,
|
||||
)
|
||||
op_obj.kill()
|
||||
return
|
||||
|
||||
logging.debug(
|
||||
'Dc._on_registration_fail() - %s | %s: %s. Retry in %s sec',
|
||||
self.id,
|
||||
|
@ -641,14 +653,6 @@ class Dc(Controller):
|
|||
if fail_cnt == 1: # Throttle the logs. Only print the first time the command fails
|
||||
logging.error('%s | %s - Failed to register with Discovery Controller. %s', self.id, self.device, err)
|
||||
op_obj.retry(Dc.REGISTRATION_RETRY_RERIOD_SEC)
|
||||
else:
|
||||
logging.debug(
|
||||
'Dc._on_registration_fail() - %s | %s: Received event on dead object. %s',
|
||||
self.id,
|
||||
self.device,
|
||||
err,
|
||||
)
|
||||
op_obj.kill()
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
def _on_get_supported_success(self, op_obj: gutil.AsyncTask, data): # pylint: disable=unused-argument
|
||||
|
@ -660,7 +664,12 @@ class Dc(Controller):
|
|||
refers to the fact that a successful exchange was made with the DC.
|
||||
It doesn't mean that the Get Supported Log Page itself succeeded.
|
||||
'''
|
||||
if self._alive():
|
||||
if not self._alive():
|
||||
logging.debug(
|
||||
'Dc._on_get_supported_success() - %s | %s: Received event on dead object.', self.id, self.device
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
dlp_supp_opts = data[nvme.NVME_LOG_LID_DISCOVER] >> 16
|
||||
except (TypeError, IndexError):
|
||||
|
@ -688,17 +697,22 @@ class Dc(Controller):
|
|||
)
|
||||
self._get_log_op = gutil.AsyncTask(self._on_get_log_success, self._on_get_log_fail, self._ctrl.discover)
|
||||
self._get_log_op.run_async()
|
||||
else:
|
||||
logging.debug(
|
||||
'Dc._on_get_supported_success() - %s | %s: Received event on dead object.', self.id, self.device
|
||||
)
|
||||
|
||||
def _on_get_supported_fail(self, op_obj: gutil.AsyncTask, err, fail_cnt):
|
||||
'''@brief Function called when we fail to retrieve the supported log
|
||||
page from the Discovery Controller. See self._get_supported_op object
|
||||
for details.
|
||||
'''
|
||||
if self._alive():
|
||||
if not self._alive():
|
||||
logging.debug(
|
||||
'Dc._on_get_supported_fail() - %s | %s: Received event on dead object. %s',
|
||||
self.id,
|
||||
self.device,
|
||||
err,
|
||||
)
|
||||
op_obj.kill()
|
||||
return
|
||||
|
||||
logging.debug(
|
||||
'Dc._on_get_supported_fail() - %s | %s: %s. Retry in %s sec',
|
||||
self.id,
|
||||
|
@ -714,14 +728,6 @@ class Dc(Controller):
|
|||
err,
|
||||
)
|
||||
op_obj.retry(Dc.GET_SUPPORTED_RETRY_RERIOD_SEC)
|
||||
else:
|
||||
logging.debug(
|
||||
'Dc._on_get_supported_fail() - %s | %s: Received event on dead object. %s',
|
||||
self.id,
|
||||
self.device,
|
||||
err,
|
||||
)
|
||||
op_obj.kill()
|
||||
|
||||
# --------------------------------------------------------------------------
|
||||
def _on_get_log_success(self, op_obj: gutil.AsyncTask, data): # pylint: disable=unused-argument
|
||||
|
@ -729,7 +735,12 @@ class Dc(Controller):
|
|||
from the Discovery Controller. See self._get_log_op object
|
||||
for details.
|
||||
'''
|
||||
if self._alive():
|
||||
if not self._alive():
|
||||
logging.debug(
|
||||
'Dc._on_get_log_success() - %s | %s: Received event on dead object.', self.id, self.device
|
||||
)
|
||||
return
|
||||
|
||||
# Note that for historical reasons too long to explain, the CDC may
|
||||
# return invalid addresses ('0.0.0.0', '::', or ''). Those need to
|
||||
# be filtered out.
|
||||
|
@ -762,17 +773,22 @@ class Dc(Controller):
|
|||
referrals_after,
|
||||
)
|
||||
self._serv.referrals_changed()
|
||||
else:
|
||||
logging.debug(
|
||||
'Dc._on_get_log_success() - %s | %s: Received event on dead object.', self.id, self.device
|
||||
)
|
||||
|
||||
def _on_get_log_fail(self, op_obj: gutil.AsyncTask, err, fail_cnt):
|
||||
'''@brief Function called when we fail to retrieve the log pages
|
||||
from the Discovery Controller. See self._get_log_op object
|
||||
for details.
|
||||
'''
|
||||
if self._alive():
|
||||
if not self._alive():
|
||||
logging.debug(
|
||||
'Dc._on_get_log_fail() - %s | %s: Received event on dead object. %s',
|
||||
self.id,
|
||||
self.device,
|
||||
err,
|
||||
)
|
||||
op_obj.kill()
|
||||
return
|
||||
|
||||
logging.debug(
|
||||
'Dc._on_get_log_fail() - %s | %s: %s. Retry in %s sec',
|
||||
self.id,
|
||||
|
@ -783,14 +799,6 @@ class Dc(Controller):
|
|||
if fail_cnt == 1: # Throttle the logs. Only print the first time the command fails
|
||||
logging.error('%s | %s - Failed to retrieve log pages. %s', self.id, self.device, err)
|
||||
op_obj.retry(Dc.GET_LOG_PAGE_RETRY_RERIOD_SEC)
|
||||
else:
|
||||
logging.debug(
|
||||
'Dc._on_get_log_fail() - %s | %s: Received event on dead object. %s',
|
||||
self.id,
|
||||
self.device,
|
||||
err,
|
||||
)
|
||||
op_obj.kill()
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
|
|
|
@ -12,6 +12,7 @@ import os
|
|||
import sys
|
||||
import shutil
|
||||
import platform
|
||||
from libnvme import nvme
|
||||
from staslib.version import KernelVersion
|
||||
|
||||
try:
|
||||
|
@ -48,4 +49,8 @@ SYS_CONF_FILE = '/etc/stas/sys.conf'
|
|||
STAFD_CONF_FILE = '/etc/stas/stafd.conf'
|
||||
STACD_CONF_FILE = '/etc/stas/stacd.conf'
|
||||
|
||||
HAS_NBFT_SUPPORT = hasattr(nvme, 'nbft_get')
|
||||
NBFT_SYSFS_PATH = "/sys/firmware/acpi/tables"
|
||||
NBFT_SYSFS_FILENAME = "NBFT*"
|
||||
|
||||
SYSTEMCTL = shutil.which('systemctl')
|
||||
|
|
|
@ -309,7 +309,7 @@ class AsyncTask: # pylint: disable=too-many-instance-attributes
|
|||
'''Return object members as a dictionary'''
|
||||
info = {
|
||||
'fail count': self._fail_cnt,
|
||||
'completed': self._task.get_completed(),
|
||||
'completed': self._task.get_completed() if self._task else None,
|
||||
'alive': self._alive(),
|
||||
}
|
||||
|
||||
|
|
|
@ -8,45 +8,126 @@
|
|||
|
||||
'''A collection of IP address and network interface utilities'''
|
||||
|
||||
import struct
|
||||
import socket
|
||||
import logging
|
||||
import ipaddress
|
||||
from staslib import conf
|
||||
|
||||
RTM_BASE = 16
|
||||
RTM_GETLINK = 18
|
||||
RTM_NEWADDR = 20
|
||||
RTM_GETADDR = 22
|
||||
NLM_F_REQUEST = 0x01
|
||||
NLM_F_ROOT = 0x100
|
||||
NLMSG_DONE = 3
|
||||
IFLA_ADDRESS = 1
|
||||
NLMSGHDR_SZ = 16
|
||||
NLMSG_HDRLEN = 16
|
||||
IFADDRMSG_SZ = 8
|
||||
IFINFOMSG_SZ = 16
|
||||
ARPHRD_ETHER = 1
|
||||
ARPHRD_LOOPBACK = 772
|
||||
NLMSG_LENGTH = lambda msg_len: msg_len + NLMSG_HDRLEN # pylint: disable=unnecessary-lambda-assignment
|
||||
|
||||
RTATTR_SZ = 4
|
||||
RTA_ALIGN = lambda length: ((length + 3) & ~3) # pylint: disable=unnecessary-lambda-assignment
|
||||
IFLA_ADDRESS = 1
|
||||
IFLA_IFNAME = 3
|
||||
|
||||
|
||||
def _nlmsghdr(nlmsg_type, nlmsg_flags, nlmsg_seq, nlmsg_pid, msg_len: int):
|
||||
'''Implement this C struct:
|
||||
struct nlmsghdr {
|
||||
__u32 nlmsg_len; /* Length of message including header */
|
||||
__u16 nlmsg_type; /* Message content */
|
||||
__u16 nlmsg_flags; /* Additional flags */
|
||||
__u32 nlmsg_seq; /* Sequence number */
|
||||
__u32 nlmsg_pid; /* Sending process port ID */
|
||||
};
|
||||
'''
|
||||
return struct.pack('<LHHLL', NLMSG_LENGTH(msg_len), nlmsg_type, nlmsg_flags, nlmsg_seq, nlmsg_pid)
|
||||
|
||||
|
||||
def _ifaddrmsg(family=0, prefixlen=0, flags=0, scope=0, index=0):
|
||||
'''Implement this C struct:
|
||||
struct ifaddrmsg {
|
||||
__u8 ifa_family;
|
||||
__u8 ifa_prefixlen; /* The prefix length */
|
||||
__u8 ifa_flags; /* Flags */
|
||||
__u8 ifa_scope; /* Address scope */
|
||||
__u32 ifa_index; /* Link index */
|
||||
};
|
||||
'''
|
||||
return struct.pack('<BBBBL', family, prefixlen, flags, scope, index)
|
||||
|
||||
|
||||
def _ifinfomsg(family=0, dev_type=0, index=0, flags=0, change=0):
|
||||
'''Implement this C struct:
|
||||
struct ifinfomsg {
|
||||
unsigned char ifi_family; /* AF_UNSPEC */
|
||||
unsigned char __ifi_pad;
|
||||
unsigned short ifi_type; /* Device type: ARPHRD_* */
|
||||
int ifi_index; /* Interface index */
|
||||
unsigned int ifi_flags; /* Device flags: IFF_* */
|
||||
unsigned int ifi_change; /* change mask: IFF_* */
|
||||
};
|
||||
'''
|
||||
return struct.pack('<BBHiII', family, 0, dev_type, index, flags, change)
|
||||
|
||||
|
||||
def _nlmsg(nlmsg_type, nlmsg_flags, msg: bytes):
|
||||
'''Build a Netlink message'''
|
||||
return _nlmsghdr(nlmsg_type, nlmsg_flags, 0, 0, len(msg)) + msg
|
||||
|
||||
|
||||
# Netlink request (Get address command)
|
||||
GETADDRCMD = (
|
||||
# BEGIN: struct nlmsghdr
|
||||
b'\0' * 4 # nlmsg_len (placeholder - actual length calculated below)
|
||||
+ (RTM_GETADDR).to_bytes(2, byteorder='little', signed=False) # nlmsg_type
|
||||
+ (NLM_F_REQUEST | NLM_F_ROOT).to_bytes(2, byteorder='little', signed=False) # nlmsg_flags
|
||||
+ b'\0' * 2 # nlmsg_seq
|
||||
+ b'\0' * 2 # nlmsg_pid
|
||||
# END: struct nlmsghdr
|
||||
+ b'\0' * 8 # struct ifaddrmsg
|
||||
)
|
||||
GETADDRCMD = len(GETADDRCMD).to_bytes(4, byteorder='little') + GETADDRCMD[4:] # nlmsg_len
|
||||
GETADDRCMD = _nlmsg(RTM_GETADDR, NLM_F_REQUEST | NLM_F_ROOT, _ifaddrmsg())
|
||||
|
||||
# Netlink request (Get address command)
|
||||
GETLINKCMD = _nlmsg(RTM_GETLINK, NLM_F_REQUEST | NLM_F_ROOT, _ifinfomsg(family=socket.AF_UNSPEC, change=0xFFFFFFFF))
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
def get_ipaddress_obj(ipaddr):
|
||||
'''@brief Return a IPv4Address or IPv6Address depending on whether @ipaddr
|
||||
is a valid IPv4 or IPv6 address. Return None otherwise.'''
|
||||
try:
|
||||
ip = ipaddress.ip_address(ipaddr)
|
||||
except ValueError:
|
||||
return None
|
||||
def _data_matches_mac(data, mac):
|
||||
return mac.lower() == ':'.join([f'{x:02x}' for x in data[0:6]])
|
||||
|
||||
return ip
|
||||
|
||||
def mac2iface(mac: str): # pylint: disable=too-many-locals
|
||||
'''@brief Find the interface that has @mac as its assigned MAC address.
|
||||
@param mac: The MAC address to match
|
||||
'''
|
||||
with socket.socket(family=socket.AF_NETLINK, type=socket.SOCK_RAW, proto=socket.NETLINK_ROUTE) as sock:
|
||||
sock.sendall(GETLINKCMD)
|
||||
nlmsg = sock.recv(8192)
|
||||
nlmsg_idx = 0
|
||||
while True: # pylint: disable=too-many-nested-blocks
|
||||
if nlmsg_idx >= len(nlmsg):
|
||||
nlmsg += sock.recv(8192)
|
||||
|
||||
nlmsghdr = nlmsg[nlmsg_idx : nlmsg_idx + NLMSG_HDRLEN]
|
||||
nlmsg_len, nlmsg_type, _, _, _ = struct.unpack('<LHHLL', nlmsghdr)
|
||||
|
||||
if nlmsg_type == NLMSG_DONE:
|
||||
break
|
||||
|
||||
if nlmsg_type == RTM_BASE:
|
||||
msg_indx = nlmsg_idx + NLMSG_HDRLEN
|
||||
msg = nlmsg[msg_indx : msg_indx + IFINFOMSG_SZ] # ifinfomsg
|
||||
_, _, ifi_type, ifi_index, _, _ = struct.unpack('<BBHiII', msg)
|
||||
|
||||
if ifi_type in (ARPHRD_LOOPBACK, ARPHRD_ETHER):
|
||||
rtattr_indx = msg_indx + IFINFOMSG_SZ
|
||||
while rtattr_indx < (nlmsg_idx + nlmsg_len):
|
||||
rtattr = nlmsg[rtattr_indx : rtattr_indx + RTATTR_SZ]
|
||||
rta_len, rta_type = struct.unpack('<HH', rtattr)
|
||||
if rta_type == IFLA_ADDRESS:
|
||||
data = nlmsg[rtattr_indx + RTATTR_SZ : rtattr_indx + rta_len]
|
||||
if _data_matches_mac(data, mac):
|
||||
return socket.if_indextoname(ifi_index)
|
||||
|
||||
rta_len = RTA_ALIGN(rta_len) # Round up to multiple of 4
|
||||
rtattr_indx += rta_len # Move to next rtattr
|
||||
|
||||
nlmsg_idx += nlmsg_len # Move to next Netlink message
|
||||
|
||||
return ''
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
|
@ -71,8 +152,7 @@ def _data_matches_ip(data_family, data, ip):
|
|||
return other_ip == ip
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
def iface_of(src_addr):
|
||||
def _iface_of(src_addr): # pylint: disable=too-many-locals
|
||||
'''@brief Find the interface that has src_addr as one of its assigned IP addresses.
|
||||
@param src_addr: The IP address to match
|
||||
@type src_addr: Instance of ipaddress.IPv4Address or ipaddress.IPv6Address
|
||||
|
@ -85,31 +165,27 @@ def iface_of(src_addr):
|
|||
if nlmsg_idx >= len(nlmsg):
|
||||
nlmsg += sock.recv(8192)
|
||||
|
||||
nlmsg_type = int.from_bytes(nlmsg[nlmsg_idx + 4 : nlmsg_idx + 6], byteorder='little', signed=False)
|
||||
nlmsghdr = nlmsg[nlmsg_idx : nlmsg_idx + NLMSG_HDRLEN]
|
||||
nlmsg_len, nlmsg_type, _, _, _ = struct.unpack('<LHHLL', nlmsghdr)
|
||||
|
||||
if nlmsg_type == NLMSG_DONE:
|
||||
break
|
||||
|
||||
if nlmsg_type != RTM_NEWADDR:
|
||||
break
|
||||
if nlmsg_type == RTM_NEWADDR:
|
||||
msg_indx = nlmsg_idx + NLMSG_HDRLEN
|
||||
msg = nlmsg[msg_indx : msg_indx + IFADDRMSG_SZ] # ifaddrmsg
|
||||
ifa_family, _, _, _, ifa_index = struct.unpack('<BBBBL', msg)
|
||||
|
||||
nlmsg_len = int.from_bytes(nlmsg[nlmsg_idx : nlmsg_idx + 4], byteorder='little', signed=False)
|
||||
if nlmsg_len % 4: # Is msg length not a multiple of 4?
|
||||
break
|
||||
|
||||
ifaddrmsg_indx = nlmsg_idx + NLMSGHDR_SZ
|
||||
ifa_family = nlmsg[ifaddrmsg_indx]
|
||||
ifa_index = int.from_bytes(nlmsg[ifaddrmsg_indx + 4 : ifaddrmsg_indx + 8], byteorder='little', signed=False)
|
||||
|
||||
rtattr_indx = ifaddrmsg_indx + IFADDRMSG_SZ
|
||||
rtattr_indx = msg_indx + IFADDRMSG_SZ
|
||||
while rtattr_indx < (nlmsg_idx + nlmsg_len):
|
||||
rta_len = int.from_bytes(nlmsg[rtattr_indx : rtattr_indx + 2], byteorder='little', signed=False)
|
||||
rta_type = int.from_bytes(nlmsg[rtattr_indx + 2 : rtattr_indx + 4], byteorder='little', signed=False)
|
||||
rtattr = nlmsg[rtattr_indx : rtattr_indx + RTATTR_SZ]
|
||||
rta_len, rta_type = struct.unpack('<HH', rtattr)
|
||||
if rta_type == IFLA_ADDRESS:
|
||||
data = nlmsg[rtattr_indx + RTATTR_SZ : rtattr_indx + rta_len]
|
||||
if _data_matches_ip(ifa_family, data, src_addr):
|
||||
return socket.if_indextoname(ifa_index)
|
||||
|
||||
rta_len = (rta_len + 3) & ~3 # Round up to multiple of 4
|
||||
rta_len = RTA_ALIGN(rta_len) # Round up to multiple of 4
|
||||
rtattr_indx += rta_len # Move to next rtattr
|
||||
|
||||
nlmsg_idx += nlmsg_len # Move to next Netlink message
|
||||
|
@ -117,6 +193,107 @@ def iface_of(src_addr):
|
|||
return ''
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
def get_ipaddress_obj(ipaddr, ipv4_mapped_convert=False):
|
||||
'''@brief Return a IPv4Address or IPv6Address depending on whether @ipaddr
|
||||
is a valid IPv4 or IPv6 address. Return None otherwise.
|
||||
|
||||
If ipv4_mapped_resolve is set to True, IPv6 addresses that are IPv4-Mapped,
|
||||
will be converted to their IPv4 equivalent.
|
||||
'''
|
||||
try:
|
||||
ip = ipaddress.ip_address(ipaddr)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
if ipv4_mapped_convert:
|
||||
ipv4_mapped = getattr(ip, 'ipv4_mapped', None)
|
||||
if ipv4_mapped is not None:
|
||||
ip = ipv4_mapped
|
||||
|
||||
return ip
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
def net_if_addrs(): # pylint: disable=too-many-locals
|
||||
'''@brief Return a dictionary listing every IP addresses for each interface.
|
||||
The first IP address of a list is the primary address used as the default
|
||||
source address.
|
||||
@example: {
|
||||
'wlp0s20f3': {
|
||||
4: ['10.0.0.28'],
|
||||
6: [
|
||||
'fd5e:9a9e:c5bd:0:5509:890c:1848:3843',
|
||||
'fd5e:9a9e:c5bd:0:1fd5:e527:8df7:7912',
|
||||
'2605:59c8:6128:fb00:c083:1b8:c467:81d2',
|
||||
'2605:59c8:6128:fb00:e99d:1a02:38e0:ad52',
|
||||
'fe80::d71b:e807:d5ee:7614'
|
||||
],
|
||||
},
|
||||
'lo': {
|
||||
4: ['127.0.0.1'],
|
||||
6: ['::1'],
|
||||
},
|
||||
'docker0': {
|
||||
4: ['172.17.0.1'],
|
||||
6: []
|
||||
},
|
||||
}
|
||||
'''
|
||||
interfaces = {}
|
||||
with socket.socket(socket.AF_NETLINK, socket.SOCK_RAW) as sock:
|
||||
sock.sendall(GETADDRCMD)
|
||||
nlmsg = sock.recv(8192)
|
||||
nlmsg_idx = 0
|
||||
while True: # pylint: disable=too-many-nested-blocks
|
||||
if nlmsg_idx >= len(nlmsg):
|
||||
nlmsg += sock.recv(8192)
|
||||
|
||||
nlmsghdr = nlmsg[nlmsg_idx : nlmsg_idx + NLMSG_HDRLEN]
|
||||
nlmsg_len, nlmsg_type, _, _, _ = struct.unpack('<LHHLL', nlmsghdr)
|
||||
|
||||
if nlmsg_type == NLMSG_DONE:
|
||||
break
|
||||
|
||||
if nlmsg_type == RTM_NEWADDR:
|
||||
msg_indx = nlmsg_idx + NLMSG_HDRLEN
|
||||
msg = nlmsg[msg_indx : msg_indx + IFADDRMSG_SZ] # ifaddrmsg
|
||||
ifa_family, _, _, _, ifa_index = struct.unpack('<BBBBL', msg)
|
||||
|
||||
if ifa_family in (socket.AF_INET, socket.AF_INET6):
|
||||
interfaces.setdefault(ifa_index, {4: [], 6: []})
|
||||
|
||||
rtattr_indx = msg_indx + IFADDRMSG_SZ
|
||||
while rtattr_indx < (nlmsg_idx + nlmsg_len):
|
||||
rtattr = nlmsg[rtattr_indx : rtattr_indx + RTATTR_SZ]
|
||||
rta_len, rta_type = struct.unpack('<HH', rtattr)
|
||||
|
||||
if rta_type == IFLA_IFNAME:
|
||||
data = nlmsg[rtattr_indx + RTATTR_SZ : rtattr_indx + rta_len]
|
||||
ifname = data.rstrip(b'\0').decode()
|
||||
interfaces[ifa_index]['name'] = ifname
|
||||
|
||||
elif rta_type == IFLA_ADDRESS:
|
||||
data = nlmsg[rtattr_indx + RTATTR_SZ : rtattr_indx + rta_len]
|
||||
ip = get_ipaddress_obj(data)
|
||||
if ip:
|
||||
family = 4 if ifa_family == socket.AF_INET else 6
|
||||
interfaces[ifa_index][family].append(ip)
|
||||
|
||||
rta_len = RTA_ALIGN(rta_len) # Round up to multiple of 4
|
||||
rtattr_indx += rta_len # Move to next rtattr
|
||||
|
||||
nlmsg_idx += nlmsg_len # Move to next Netlink message
|
||||
|
||||
if_addrs = {}
|
||||
for value in interfaces.values():
|
||||
name = value.pop('name', None)
|
||||
if name is not None:
|
||||
if_addrs[name] = value
|
||||
|
||||
return if_addrs
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
def get_interface(src_addr):
|
||||
'''Get interface for given source address
|
||||
|
@ -128,42 +305,4 @@ def get_interface(src_addr):
|
|||
|
||||
src_addr = src_addr.split('%')[0] # remove scope-id (if any)
|
||||
src_addr = get_ipaddress_obj(src_addr)
|
||||
return '' if src_addr is None else iface_of(src_addr)
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
def remove_invalid_addresses(controllers: list):
|
||||
'''@brief Remove controllers with invalid addresses from the list of controllers.
|
||||
@param controllers: List of TIDs
|
||||
'''
|
||||
service_conf = conf.SvcConf()
|
||||
valid_controllers = list()
|
||||
for controller in controllers:
|
||||
if controller.transport in ('tcp', 'rdma'):
|
||||
# Let's make sure that traddr is
|
||||
# syntactically a valid IPv4 or IPv6 address.
|
||||
ip = get_ipaddress_obj(controller.traddr)
|
||||
if ip is None:
|
||||
logging.warning('%s IP address is not valid', controller)
|
||||
continue
|
||||
|
||||
# Let's make sure the address family is enabled.
|
||||
if ip.version not in service_conf.ip_family:
|
||||
logging.debug(
|
||||
'%s ignored because IPv%s is disabled in %s',
|
||||
controller,
|
||||
ip.version,
|
||||
service_conf.conf_file,
|
||||
)
|
||||
continue
|
||||
|
||||
valid_controllers.append(controller)
|
||||
|
||||
elif controller.transport in ('fc', 'loop'):
|
||||
# At some point, need to validate FC addresses as well...
|
||||
valid_controllers.append(controller)
|
||||
|
||||
else:
|
||||
logging.warning('Invalid transport %s', controller.transport)
|
||||
|
||||
return valid_controllers
|
||||
return '' if src_addr is None else _iface_of(src_addr)
|
||||
|
|
|
@ -24,6 +24,7 @@ files_to_copy = [
|
|||
'gutil.py',
|
||||
'iputil.py',
|
||||
'log.py',
|
||||
'nbft.py',
|
||||
'service.py',
|
||||
'singleton.py',
|
||||
'stas.py',
|
||||
|
|
28
staslib/nbft.py
Normal file
28
staslib/nbft.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
# Copyright (c) 2023, Dell Inc. or its subsidiaries. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# See the LICENSE file for details.
|
||||
#
|
||||
# This file is part of NVMe STorage Appliance Services (nvme-stas).
|
||||
#
|
||||
# Authors: Martin Belanger <Martin.Belanger@dell.com>
|
||||
#
|
||||
'''Code used to access the NVMe Boot Firmware Tables'''
|
||||
|
||||
import os
|
||||
import glob
|
||||
import logging
|
||||
from libnvme import nvme
|
||||
from staslib import defs
|
||||
|
||||
|
||||
def get_nbft_files(root_dir=defs.NBFT_SYSFS_PATH):
|
||||
"""Return a dictionary containing the NBFT data for all the NBFT binary files located in @root_dir"""
|
||||
if not defs.HAS_NBFT_SUPPORT:
|
||||
logging.warning(
|
||||
"libnvme-%s does not have NBFT support. Please upgrade libnvme.",
|
||||
defs.LIBNVME_VERSION,
|
||||
)
|
||||
return {}
|
||||
|
||||
pathname = os.path.join(root_dir, defs.NBFT_SYSFS_FILENAME)
|
||||
return {fname: nvme.nbft_get(fname) or {} for fname in glob.iglob(pathname=pathname)} # pylint: disable=no-member
|
|
@ -20,7 +20,7 @@ import dasbus.client.proxy
|
|||
|
||||
from gi.repository import GLib
|
||||
from systemd.daemon import notify as sd_notify
|
||||
from staslib import avahi, conf, ctrl, defs, gutil, iputil, stas, timeparse, trid, udev
|
||||
from staslib import avahi, conf, ctrl, defs, gutil, stas, timeparse, trid, udev
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
|
@ -402,7 +402,7 @@ class Stac(Service):
|
|||
logging.debug('Stac._config_ctrls_finish() - discovered_ctrl_list = %s', discovered_ctrl_list)
|
||||
|
||||
controllers = stas.remove_excluded(configured_ctrl_list + discovered_ctrl_list)
|
||||
controllers = iputil.remove_invalid_addresses(controllers)
|
||||
controllers = stas.remove_invalid_addresses(controllers)
|
||||
|
||||
new_controller_tids = set(controllers)
|
||||
cur_controller_tids = set(self._controllers.keys())
|
||||
|
@ -752,7 +752,7 @@ class Staf(Service):
|
|||
|
||||
all_ctrls = configured_ctrl_list + discovered_ctrl_list + referral_ctrl_list
|
||||
controllers = stas.remove_excluded(all_ctrls)
|
||||
controllers = iputil.remove_invalid_addresses(controllers)
|
||||
controllers = stas.remove_invalid_addresses(controllers)
|
||||
|
||||
new_controller_tids = set(controllers)
|
||||
cur_controller_tids = set(self._controllers.keys())
|
||||
|
@ -856,7 +856,7 @@ class Staf(Service):
|
|||
return
|
||||
|
||||
# Did we receive a Change of DLP AEN or an NVME Event indicating 'connect' or 'rediscover'?
|
||||
if not _is_dlp_changed_aen(udev_obj) and not _event_matches(udev_obj, ('connected', 'rediscover')):
|
||||
if not _is_dlp_changed_aen(udev_obj) and not _event_matches(udev_obj, ('rediscover',)):
|
||||
return
|
||||
|
||||
# We need to invoke "nvme connect-all" using nvme-cli's nvmf-connect@.service
|
||||
|
@ -873,6 +873,6 @@ class Staf(Service):
|
|||
options = r'\x09'.join(
|
||||
[fr'{option}\x3d{value}' for option, value in cnf if value not in (None, 'none', 'None', '')]
|
||||
)
|
||||
logging.info('Invoking: systemctl start nvmf-connect@%s.service', options)
|
||||
cmd = [defs.SYSTEMCTL, '--quiet', '--no-block', 'start', fr'nvmf-connect@{options}.service']
|
||||
logging.debug('Invoking: systemctl restart nvmf-connect@%s.service', options)
|
||||
cmd = [defs.SYSTEMCTL, '--quiet', '--no-block', 'restart', fr'nvmf-connect@{options}.service']
|
||||
subprocess.run(cmd, check=False)
|
||||
|
|
|
@ -21,3 +21,28 @@ class Singleton(type):
|
|||
instance = super(Singleton, cls).__call__(*args, **kwargs)
|
||||
cls._instances[cls] = instance
|
||||
return cls._instances[cls]
|
||||
|
||||
def destroy(cls):
|
||||
'''Delete a singleton instance.
|
||||
|
||||
This is to be invoked using the derived class Name.
|
||||
|
||||
For example:
|
||||
|
||||
class Child(Singleton):
|
||||
pass
|
||||
|
||||
child1 = Child() # Instantiate singleton
|
||||
child2 = Child() # Get a reference to the singleton
|
||||
|
||||
print(f'{child1 is child2}') # True
|
||||
|
||||
Child.destroy() # Delete the singleton
|
||||
|
||||
print(f'{child1 is child2}') # Still True because child1 and child2 still hold reference to the singleton
|
||||
|
||||
child1 = Child() # Instantiate a new singleton and assign to child1
|
||||
|
||||
print(f'{child1 is child2}') # False
|
||||
'''
|
||||
cls._instances.pop(cls, None)
|
||||
|
|
|
@ -18,7 +18,7 @@ import logging
|
|||
import dasbus.connection
|
||||
from gi.repository import Gio, GLib
|
||||
from systemd.daemon import notify as sd_notify
|
||||
from staslib import conf, defs, gutil, log, trid
|
||||
from staslib import conf, defs, gutil, iputil, log, trid
|
||||
|
||||
try:
|
||||
# Python 3.9 or later
|
||||
|
@ -86,6 +86,44 @@ def check_if_allowed_to_continue():
|
|||
sys.exit('Fatal error: missing nvme-tcp kernel module')
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
def remove_invalid_addresses(controllers: list):
|
||||
'''@brief Remove controllers with invalid addresses from the list of controllers.
|
||||
@param controllers: List of TIDs
|
||||
'''
|
||||
service_conf = conf.SvcConf()
|
||||
valid_controllers = list()
|
||||
for controller in controllers:
|
||||
if controller.transport in ('tcp', 'rdma'):
|
||||
# Let's make sure that traddr is
|
||||
# syntactically a valid IPv4 or IPv6 address.
|
||||
ip = iputil.get_ipaddress_obj(controller.traddr)
|
||||
if ip is None:
|
||||
logging.warning('%s IP address is not valid', controller)
|
||||
continue
|
||||
|
||||
# Let's make sure the address family is enabled.
|
||||
if ip.version not in service_conf.ip_family:
|
||||
logging.debug(
|
||||
'%s ignored because IPv%s is disabled in %s',
|
||||
controller,
|
||||
ip.version,
|
||||
service_conf.conf_file,
|
||||
)
|
||||
continue
|
||||
|
||||
valid_controllers.append(controller)
|
||||
|
||||
elif controller.transport in ('fc', 'loop'):
|
||||
# At some point, need to validate FC addresses as well...
|
||||
valid_controllers.append(controller)
|
||||
|
||||
else:
|
||||
logging.warning('Invalid transport %s', controller.transport)
|
||||
|
||||
return valid_controllers
|
||||
|
||||
|
||||
# ******************************************************************************
|
||||
def tid_from_dlpe(dlpe, host_traddr, host_iface):
|
||||
'''@brief Take a Discovery Log Page Entry and return a Controller ID as a dict.'''
|
||||
|
@ -492,7 +530,9 @@ class ServiceABC(abc.ABC): # pylint: disable=too-many-instance-attributes
|
|||
# elements after name resolution is complete (i.e. in the calback
|
||||
# function _config_ctrls_finish)
|
||||
logging.debug('ServiceABC._config_ctrls()')
|
||||
configured_controllers = [trid.TID(cid) for cid in conf.SvcConf().get_controllers()]
|
||||
configured_controllers = [
|
||||
trid.TID(cid) for cid in conf.SvcConf().get_controllers() + conf.NbftConf().get_controllers()
|
||||
]
|
||||
configured_controllers = remove_excluded(configured_controllers)
|
||||
self._resolver.resolve_ctrl_async(self._cancellable, configured_controllers, self._config_ctrls_finish)
|
||||
|
||||
|
|
|
@ -61,7 +61,6 @@ class TID: # pylint: disable=too-many-instance-attributes
|
|||
self._host_traddr = cid.get('host-traddr', '')
|
||||
self._host_iface = '' if conf.SvcConf().ignore_iface else cid.get('host-iface', '')
|
||||
self._subsysnqn = cid.get('subsysnqn', '')
|
||||
self._shortkey = (self._transport, self._traddr, self._trsvcid, self._subsysnqn, self._host_traddr)
|
||||
self._key = (self._transport, self._traddr, self._trsvcid, self._subsysnqn, self._host_traddr, self._host_iface)
|
||||
self._hash = int.from_bytes(
|
||||
hashlib.md5(''.join(self._key).encode('utf-8')).digest(), 'big'
|
||||
|
@ -121,22 +120,10 @@ class TID: # pylint: disable=too-many-instance-attributes
|
|||
return self._id
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return False
|
||||
|
||||
if self._host_iface and other._host_iface:
|
||||
return self._key == other._key
|
||||
|
||||
return self._shortkey == other._shortkey
|
||||
return isinstance(other, self.__class__) and self._key == other._key
|
||||
|
||||
def __ne__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return True
|
||||
|
||||
if self._host_iface and other._host_iface:
|
||||
return self._key != other._key
|
||||
|
||||
return self._shortkey != other._shortkey
|
||||
return not isinstance(other, self.__class__) or self._key != other._key
|
||||
|
||||
def __hash__(self):
|
||||
return self._hash
|
||||
|
|
193
staslib/udev.py
193
staslib/udev.py
|
@ -153,6 +153,169 @@ class Udev:
|
|||
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _cid_matches_tcp_tid_legacy(tid, cid): # pylint: disable=too-many-return-statements,too-many-branches
|
||||
'''On kernels older than 6.1, the src_addr parameter is not available
|
||||
from the sysfs. Therefore, we need to infer a match based on other
|
||||
parameters. And there are a few cases where we're simply not sure
|
||||
whether an existing connection (cid) matches the candidate
|
||||
connection (tid).
|
||||
'''
|
||||
cid_host_iface = cid['host-iface']
|
||||
cid_host_traddr = iputil.get_ipaddress_obj(cid['host-traddr'], ipv4_mapped_convert=True)
|
||||
|
||||
if not cid_host_iface: # cid.host_iface is undefined
|
||||
if not cid_host_traddr: # cid.host_traddr is undefined
|
||||
# When the existing cid.src_addr, cid.host_traddr, and cid.host_iface
|
||||
# are all undefined (which can only happen on kernels prior to 6.1),
|
||||
# we can't know for sure on which interface an existing connection
|
||||
# was made. In this case, we can only declare a match if both
|
||||
# tid.host_iface and tid.host_traddr are undefined as well.
|
||||
logging.debug(
|
||||
'Udev._cid_matches_tcp_tid_legacy() - cid=%s, tid=%s - Not enough info. Assume "match" but this could be wrong.',
|
||||
cid,
|
||||
tid,
|
||||
)
|
||||
return True
|
||||
|
||||
# cid.host_traddr is defined. If tid.host_traddr is also
|
||||
# defined, then it must match the existing cid.host_traddr.
|
||||
if tid.host_traddr:
|
||||
tid_host_traddr = iputil.get_ipaddress_obj(tid.host_traddr, ipv4_mapped_convert=True)
|
||||
if tid_host_traddr != cid_host_traddr:
|
||||
return False
|
||||
|
||||
# If tid.host_iface is defined, then the interface where
|
||||
# the connection is located must match. If tid.host_iface
|
||||
# is not defined, then we don't really care on which
|
||||
# interface the connection was made and we can skip this test.
|
||||
if tid.host_iface:
|
||||
# With the existing cid.host_traddr, we can find the
|
||||
# interface of the exisiting connection.
|
||||
connection_iface = iputil.get_interface(str(cid_host_traddr))
|
||||
if tid.host_iface != connection_iface:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
# cid.host_iface is defined
|
||||
if not cid_host_traddr: # cid.host_traddr is undefined
|
||||
if tid.host_iface and tid.host_iface != cid_host_iface:
|
||||
return False
|
||||
|
||||
if tid.host_traddr:
|
||||
# It's impossible to tell the existing connection source
|
||||
# address. So, we can't tell if it matches tid.host_traddr.
|
||||
# However, if the existing host_iface has only one source
|
||||
# address assigned to it, we can assume that the source
|
||||
# address used for the existing connection is that address.
|
||||
if_addrs = iputil.net_if_addrs().get(cid_host_iface, {4: [], 6: []})
|
||||
tid_host_traddr = iputil.get_ipaddress_obj(tid.host_traddr, ipv4_mapped_convert=True)
|
||||
source_addrs = if_addrs[tid_host_traddr.version]
|
||||
if len(source_addrs) != 1:
|
||||
return False
|
||||
|
||||
src_addr0 = iputil.get_ipaddress_obj(source_addrs[0], ipv4_mapped_convert=True)
|
||||
if src_addr0 != tid_host_traddr:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
# cid.host_traddr is defined
|
||||
if tid.host_iface and tid.host_iface != cid_host_iface:
|
||||
return False
|
||||
|
||||
if tid.host_traddr:
|
||||
tid_host_traddr = iputil.get_ipaddress_obj(tid.host_traddr, ipv4_mapped_convert=True)
|
||||
if tid_host_traddr != cid_host_traddr:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _cid_matches_tid(tid, cid): # pylint: disable=too-many-return-statements,too-many-branches
|
||||
'''Check if existing controller's cid matches candidate controller's tid.
|
||||
@param cid: The Connection ID of an existing controller (from the sysfs).
|
||||
@param tid: The Transport ID of a candidate controller.
|
||||
|
||||
We're trying to find if an existing connection (specified by cid) can
|
||||
be re-used for the candidate controller (specified by tid).
|
||||
|
||||
We do not have a match if the candidate's tid.transport, tid.traddr,
|
||||
tid.trsvcid, and tid.subsysnqn are not identical to those of the cid.
|
||||
These 4 parameters are mandatory for a match.
|
||||
|
||||
The tid.host_traddr and tid.host_iface depend on the transport type.
|
||||
These parameters may not apply or have a different syntax/meaning
|
||||
depending on the transport type.
|
||||
|
||||
For TCP only:
|
||||
With regards to the candidate's tid.host_traddr and tid.host_iface,
|
||||
if those are defined but do not match the existing cid.host_traddr
|
||||
and cid.host_iface, we may still be able to find a match by taking
|
||||
the existing cid.src_addr into consideration since that parameter
|
||||
identifies the actual source address of the connection and therefore
|
||||
can be used to infer the interface of the connection. However, the
|
||||
cid.src_addr can only be read from the sysfs starting with kernel
|
||||
6.1.
|
||||
'''
|
||||
# 'transport', 'traddr', 'trsvcid', and 'subsysnqn' must exactly match.
|
||||
if cid['transport'] != tid.transport or cid['trsvcid'] != tid.trsvcid or cid['subsysnqn'] != tid.subsysnqn:
|
||||
return False
|
||||
|
||||
if tid.transport in ('tcp', 'rdma'):
|
||||
# Need to convert to ipaddress objects to properly
|
||||
# handle all variations of IPv6 addresses.
|
||||
tid_traddr = iputil.get_ipaddress_obj(tid.traddr, ipv4_mapped_convert=True)
|
||||
cid_traddr = iputil.get_ipaddress_obj(cid['traddr'], ipv4_mapped_convert=True)
|
||||
else:
|
||||
cid_traddr = cid['traddr']
|
||||
tid_traddr = tid.traddr
|
||||
|
||||
if cid_traddr != tid_traddr:
|
||||
return False
|
||||
|
||||
# We need to know the type of transport to compare 'host-traddr' and
|
||||
# 'host-iface'. These parameters don't apply to all transport types
|
||||
# and may have a different meaning/syntax.
|
||||
if tid.transport == 'tcp':
|
||||
if tid.host_traddr or tid.host_iface:
|
||||
src_addr = iputil.get_ipaddress_obj(cid['src-addr'], ipv4_mapped_convert=True)
|
||||
if not src_addr:
|
||||
# For legacy kernels (i.e. older than 6.1), the existing cid.src_addr
|
||||
# is always undefined. We need to use advanced logic to determine
|
||||
# whether cid and tid match.
|
||||
return Udev._cid_matches_tcp_tid_legacy(tid, cid)
|
||||
|
||||
# The existing controller's cid.src_addr is always defined for kernel
|
||||
# 6.1 and later. We can use the existing controller's cid.src_addr to
|
||||
# find the interface on which the connection was made and therefore
|
||||
# match it to the candidate's tid.host_iface. And the cid.src_addr
|
||||
# can also be used to match the candidate's tid.host_traddr.
|
||||
if tid.host_traddr:
|
||||
tid_host_traddr = iputil.get_ipaddress_obj(tid.host_traddr, ipv4_mapped_convert=True)
|
||||
if tid_host_traddr != src_addr:
|
||||
return False
|
||||
|
||||
# host-iface is an optional tcp-only parameter.
|
||||
if tid.host_iface and tid.host_iface != iputil.get_interface(str(src_addr)):
|
||||
return False
|
||||
|
||||
elif tid.transport == 'fc':
|
||||
# host-traddr is mandatory for FC.
|
||||
if tid.host_traddr != cid['host-traddr']:
|
||||
return False
|
||||
|
||||
elif tid.transport == 'rdma':
|
||||
# host-traddr is optional for RDMA and is expressed as an IP address.
|
||||
if tid.host_traddr:
|
||||
tid_host_traddr = iputil.get_ipaddress_obj(tid.host_traddr, ipv4_mapped_convert=True)
|
||||
cid_host_traddr = iputil.get_ipaddress_obj(cid['host-traddr'], ipv4_mapped_convert=True)
|
||||
if tid_host_traddr != cid_host_traddr:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def find_nvme_dc_device(self, tid):
|
||||
'''@brief Find the nvme device associated with the specified
|
||||
Discovery Controller.
|
||||
|
@ -164,7 +327,8 @@ class Udev:
|
|||
if not self.is_dc_device(device):
|
||||
continue
|
||||
|
||||
if self.get_tid(device) != tid:
|
||||
cid = self.get_cid(device)
|
||||
if not self._cid_matches_tid(tid, cid):
|
||||
continue
|
||||
|
||||
return device
|
||||
|
@ -182,7 +346,8 @@ class Udev:
|
|||
if not self.is_ioc_device(device):
|
||||
continue
|
||||
|
||||
if self.get_tid(device) != tid:
|
||||
cid = self.get_cid(device)
|
||||
if not self._cid_matches_tid(tid, cid):
|
||||
continue
|
||||
|
||||
return device
|
||||
|
@ -300,28 +465,32 @@ class Udev:
|
|||
return attr_str[start:end]
|
||||
|
||||
@staticmethod
|
||||
def _get_host_iface(device):
|
||||
host_iface = Udev._get_property(device, 'NVME_HOST_IFACE')
|
||||
if not host_iface:
|
||||
def get_tid(device):
|
||||
'''@brief return the Transport ID associated with a udev device'''
|
||||
cid = Udev.get_cid(device)
|
||||
if cid['transport'] == 'tcp':
|
||||
src_addr = cid['src-addr']
|
||||
if not cid['host-iface'] and src_addr:
|
||||
# We'll try to find the interface from the source address on
|
||||
# the connection. Only available if kernel exposes the source
|
||||
# address (src_addr) in the "address" attribute.
|
||||
src_addr = Udev.get_key_from_attr(device, 'address', 'src_addr=')
|
||||
host_iface = iputil.get_interface(src_addr)
|
||||
return host_iface
|
||||
cid['host-iface'] = iputil.get_interface(src_addr)
|
||||
|
||||
return trid.TID(cid)
|
||||
|
||||
@staticmethod
|
||||
def get_tid(device):
|
||||
'''@brief return the Transport ID associated with a udev device'''
|
||||
def get_cid(device):
|
||||
'''@brief return the Connection ID associated with a udev device'''
|
||||
cid = {
|
||||
'transport': Udev._get_property(device, 'NVME_TRTYPE'),
|
||||
'traddr': Udev._get_property(device, 'NVME_TRADDR'),
|
||||
'trsvcid': Udev._get_property(device, 'NVME_TRSVCID'),
|
||||
'host-traddr': Udev._get_property(device, 'NVME_HOST_TRADDR'),
|
||||
'host-iface': Udev._get_host_iface(device),
|
||||
'host-iface': Udev._get_property(device, 'NVME_HOST_IFACE'),
|
||||
'subsysnqn': Udev._get_attribute(device, 'subsysnqn'),
|
||||
'src-addr': Udev.get_key_from_attr(device, 'address', 'src_addr='),
|
||||
}
|
||||
return trid.TID(cid)
|
||||
return cid
|
||||
|
||||
|
||||
UDEV = Udev() # Singleton
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[wrap-git]
|
||||
url = https://github.com/linux-nvme/libnvme.git
|
||||
revision = v1.3
|
||||
revision = HEAD
|
||||
|
||||
[provide]
|
||||
libnvme = libnvme_dep
|
||||
|
|
BIN
test/NBFT
Normal file
BIN
test/NBFT
Normal file
Binary file not shown.
0
test/NBFT-Empty
Normal file
0
test/NBFT-Empty
Normal file
|
@ -7,6 +7,7 @@
|
|||
# Authors: Martin Belanger <Martin.Belanger@dell.com>
|
||||
#
|
||||
|
||||
srce_dir = meson.current_source_dir()
|
||||
test_env = environment({'MALLOC_PERTURB_': '0'})
|
||||
|
||||
libnvme_location = '?'
|
||||
|
@ -22,6 +23,9 @@ if get_option('libnvme-sel') == 'pre-installed'
|
|||
rr = run_command(python3, '-c', 'import libnvme; print(f"{libnvme.__path__[0]}")', check: false, env: test_env)
|
||||
if rr.returncode() == 0
|
||||
libnvme_location = rr.stdout().strip()
|
||||
pythonpath = fs.parent(libnvme_location)
|
||||
test_env.prepend('PYTHONPATH', pythonpath) # Look in standard location first
|
||||
test_env.append('PYTHONPATH', PYTHONPATH) # Look in the build directory second
|
||||
endif
|
||||
endif
|
||||
|
||||
|
@ -40,7 +44,6 @@ endif
|
|||
if libnvme_location == '?'
|
||||
warning('Missing runtime package needed to run the tests: python3-libnvme.')
|
||||
else
|
||||
message('\n\n\u001b[32m\u001b[1mNOTE: Tests will be using @0@\u001b[0m\n'.format(libnvme_location))
|
||||
#---------------------------------------------------------------------------
|
||||
# pylint and pyflakes
|
||||
if modules_to_lint.length() != 0
|
||||
|
@ -53,7 +56,7 @@ else
|
|||
endif
|
||||
endif
|
||||
|
||||
rcfile = meson.current_source_dir() / 'pylint.rc'
|
||||
rcfile = srce_dir / 'pylint.rc'
|
||||
|
||||
if pylint.found()
|
||||
test('pylint', pylint, args: ['--rcfile=' + rcfile] + modules_to_lint, env: test_env)
|
||||
|
@ -91,32 +94,37 @@ else
|
|||
#---------------------------------------------------------------------------
|
||||
# Unit tests
|
||||
things_to_test = [
|
||||
['Test Configuration', 'test-config.py', []],
|
||||
['Test Controller', 'test-controller.py', ['pyfakefs']],
|
||||
['Test GTimer', 'test-gtimer.py', []],
|
||||
['Test iputil', 'test-iputil.py', []],
|
||||
['Test KernelVersion', 'test-version.py', []],
|
||||
['Test log', 'test-log.py', ['pyfakefs']],
|
||||
['Test NvmeOptions', 'test-nvme_options.py', ['pyfakefs']],
|
||||
['Test Service', 'test-service.py', ['pyfakefs']],
|
||||
['Test TID', 'test-transport_id.py', []],
|
||||
['Test Udev', 'test-udev.py', []],
|
||||
['Test timeparse', 'test-timeparse.py', []],
|
||||
['Test Configuration', [], [srce_dir / 'test-config.py', ]],
|
||||
['Test Controller', ['pyfakefs'], [srce_dir / 'test-controller.py', ]],
|
||||
['Test GTimer', [], [srce_dir / 'test-gtimer.py', ]],
|
||||
['Test iputil', [], [srce_dir / 'test-iputil.py', ]],
|
||||
['Test KernelVersion', [], [srce_dir / 'test-version.py', ]],
|
||||
['Test log', ['pyfakefs'], [srce_dir / 'test-log.py', ]],
|
||||
['Test NBFT', [], [srce_dir / 'test-nbft.py', ]],
|
||||
['Test NbftConf', [], [srce_dir / 'test-nbft_conf.py', ]],
|
||||
['Test NvmeOptions', ['pyfakefs'], [srce_dir / 'test-nvme_options.py', ]],
|
||||
['Test Service', ['pyfakefs'], [srce_dir / 'test-service.py', ]],
|
||||
['Test TID', [], [srce_dir / 'test-transport_id.py', ]],
|
||||
['Test defs.py', [], [srce_dir / 'test-defs.py', ]],
|
||||
['Test gutil.py', [], [srce_dir / 'test-gutil.py', ]],
|
||||
['Test Udev', [], [srce_dir / 'test-udev.py', ]],
|
||||
['Test timeparse', [], [srce_dir / 'test-timeparse.py', ]],
|
||||
]
|
||||
|
||||
# The Avahi test requires the Avahi and the Dbus daemons to be running.
|
||||
if want_avahi_test
|
||||
things_to_test += [['Test Avahi', 'test-avahi.py', []]]
|
||||
things_to_test += [['Test Avahi', [], [srce_dir / 'test-avahi.py']]]
|
||||
else
|
||||
warning('Skip Avahi Test due to missing dependencies')
|
||||
endif
|
||||
|
||||
foreach thing: things_to_test
|
||||
msg = thing[0]
|
||||
deps = thing[1]
|
||||
args = thing[2]
|
||||
|
||||
# Check whether all dependencies can be found
|
||||
missing_deps = []
|
||||
deps = thing[2]
|
||||
foreach dep : deps
|
||||
rr = run_command(python3, '-c', 'import @0@'.format(dep), check: false)
|
||||
if rr.returncode() != 0
|
||||
|
@ -126,8 +134,7 @@ else
|
|||
|
||||
if missing_deps.length() == 0
|
||||
# Allow the test to run if all dependencies are available
|
||||
script = meson.current_source_dir() / thing[1]
|
||||
test(msg, python3, args: script, env: test_env)
|
||||
test(msg, python3, args: args, env: test_env)
|
||||
else
|
||||
warning('"@0@" requires python module "@1@"'.format(msg, missing_deps))
|
||||
endif
|
||||
|
@ -138,15 +145,15 @@ endif
|
|||
#-------------------------------------------------------------------------------
|
||||
# Make sure code complies with minimum Python version requirement.
|
||||
tools = [
|
||||
meson.current_source_dir() / '../doc',
|
||||
meson.current_source_dir() / '../utils',
|
||||
srce_dir / '../doc',
|
||||
srce_dir / '../utils',
|
||||
]
|
||||
vermin = find_program('vermin', required: false)
|
||||
if vermin.found()
|
||||
if modules_to_lint.length() != 0
|
||||
test('vermin code', vermin, args: ['--config-file', meson.current_source_dir() / 'vermin.conf'] + modules_to_lint, env: test_env)
|
||||
test('vermin code', vermin, args: ['--config-file', srce_dir / 'vermin.conf'] + modules_to_lint, env: test_env)
|
||||
endif
|
||||
test('vermin tools', vermin, args: ['--config-file', meson.current_source_dir() / 'vermin-tools.conf'] + tools, env: test_env)
|
||||
test('vermin tools', vermin, args: ['--config-file', srce_dir / 'vermin-tools.conf'] + tools, env: test_env)
|
||||
else
|
||||
warning('Skiping some of the tests because "vermin" is missing.')
|
||||
endif
|
||||
|
|
|
@ -36,6 +36,26 @@ class Test(unittest.TestCase):
|
|||
srv.kill()
|
||||
self.assertEqual(srv.info(), {'avahi wake up timer': 'None', 'service types': [], 'services': {}})
|
||||
|
||||
def test__txt2dict(self):
|
||||
txt = [
|
||||
list('NqN=Starfleet'.encode('utf-8')),
|
||||
list('p=tcp'.encode('utf-8')),
|
||||
]
|
||||
self.assertEqual(avahi._txt2dict(txt), {'nqn': 'Starfleet', 'p': 'tcp'})
|
||||
|
||||
txt = [
|
||||
list('Nqn=Starfleet'.encode('utf-8')),
|
||||
list('p='.encode('utf-8')), # Try with a missing value for p
|
||||
list('blah'.encode('utf-8')), # Missing '='
|
||||
list('='.encode('utf-8')), # Just '='
|
||||
]
|
||||
self.assertEqual(avahi._txt2dict(txt), {'nqn': 'Starfleet', 'p': ''})
|
||||
|
||||
txt = [
|
||||
[1000, ord('='), 123456], # Try with non printable characters
|
||||
]
|
||||
self.assertEqual(avahi._txt2dict(txt), {})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -157,6 +157,11 @@ class StasProcessConfUnitTest(unittest.TestCase):
|
|||
|
||||
self.assertRaises(KeyError, service_conf.get_option, 'Babylon', 5)
|
||||
|
||||
def test__parse_single_val(self):
|
||||
self.assertEqual(conf._parse_single_val('hello'), 'hello')
|
||||
self.assertIsNone(conf._parse_single_val(None))
|
||||
self.assertEqual(conf._parse_single_val(['hello', 'goodbye']), 'goodbye')
|
||||
|
||||
|
||||
class StasSysConfUnitTest(unittest.TestCase):
|
||||
'''Sys config unit tests'''
|
||||
|
|
59
test/test-defs.py
Executable file
59
test/test-defs.py
Executable file
|
@ -0,0 +1,59 @@
|
|||
#!/usr/bin/python3
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
|
||||
class MockLibnvmeTestCase(unittest.TestCase):
|
||||
'''Testing defs.py by mocking the libnvme package'''
|
||||
|
||||
def test_libnvme_version(self):
|
||||
# For unknown reasons, this test does
|
||||
# not work when run from GitHub Actions.
|
||||
if not os.getenv('GITHUB_ACTIONS'):
|
||||
from staslib import defs
|
||||
|
||||
libnvme_ver = defs.LIBNVME_VERSION
|
||||
self.assertEqual(libnvme_ver, '?.?')
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls): # called once before all the tests
|
||||
# define what to patch sys.modules with
|
||||
cls._modules_patcher = mock.patch.dict(sys.modules, {'libnvme': mock.Mock()})
|
||||
|
||||
# actually patch it
|
||||
cls._modules_patcher.start()
|
||||
|
||||
# make the package globally visible and import it,
|
||||
# just like if you have imported it in a usual way
|
||||
# placing import statement at the top of the file,
|
||||
# but relying on a patched dependency
|
||||
global libnvme
|
||||
import libnvme
|
||||
|
||||
@classmethod # called once after all tests
|
||||
def tearDownClass(cls):
|
||||
# restore initial sys.modules state back
|
||||
cls._modules_patcher.stop()
|
||||
|
||||
|
||||
class RealLibnvmeUnitTest(unittest.TestCase):
|
||||
'''Testing defs.py with the real libnvme package'''
|
||||
|
||||
def test_libnvme_version(self):
|
||||
try:
|
||||
# We can't proceed with this test if the
|
||||
# module libnvme is not installed.
|
||||
import libnvme
|
||||
except ModuleNotFoundError:
|
||||
return
|
||||
|
||||
from staslib import defs
|
||||
|
||||
libnvme_ver = defs.LIBNVME_VERSION
|
||||
self.assertNotEqual(libnvme_ver, '?.?')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
33
test/test-gutil.py
Executable file
33
test/test-gutil.py
Executable file
|
@ -0,0 +1,33 @@
|
|||
#!/usr/bin/python3
|
||||
import unittest
|
||||
from staslib import gutil
|
||||
|
||||
|
||||
class GutilUnitTest(unittest.TestCase):
|
||||
'''Run unit test for gutil.py'''
|
||||
|
||||
def _on_success(self, op_obj: gutil.AsyncTask, data):
|
||||
op_obj.kill()
|
||||
|
||||
def _on_fail(self, op_obj: gutil.AsyncTask, err, fail_cnt):
|
||||
op_obj.kill()
|
||||
|
||||
def _operation(self, data):
|
||||
return data
|
||||
|
||||
def test_AsyncTask(self):
|
||||
op = gutil.AsyncTask(self._on_success, self._on_fail, self._operation, 'hello')
|
||||
|
||||
self.assertIsInstance(str(op), str)
|
||||
self.assertEqual(op.as_dict(), {'fail count': 0, 'completed': None, 'alive': True})
|
||||
|
||||
op.retry(10)
|
||||
self.assertIsNotNone(op.as_dict().get('retry timer'))
|
||||
|
||||
errmsg = 'something scarry happened'
|
||||
op._errmsg = errmsg
|
||||
self.assertEqual(op.as_dict().get('error'), errmsg)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -1,11 +1,12 @@
|
|||
#!/usr/bin/python3
|
||||
import json
|
||||
import shutil
|
||||
import socket
|
||||
import logging
|
||||
import unittest
|
||||
import ipaddress
|
||||
import subprocess
|
||||
from staslib import iputil, log, trid
|
||||
from staslib import iputil, log, stas, trid
|
||||
|
||||
IP = shutil.which('ip')
|
||||
|
||||
|
@ -39,6 +40,12 @@ class Test(unittest.TestCase):
|
|||
|
||||
self.assertEqual('', iputil.get_interface('255.255.255.255'))
|
||||
|
||||
def test_mac2iface(self):
|
||||
for iface in self.ifaces:
|
||||
address = iface.get('address', None)
|
||||
if address:
|
||||
self.assertEqual(iface['ifname'], iputil.mac2iface(address))
|
||||
|
||||
def test_remove_invalid_addresses(self):
|
||||
good_tcp = trid.TID({'transport': 'tcp', 'traddr': '1.1.1.1', 'subsysnqn': '', 'trsvcid': '8009'})
|
||||
bad_tcp = trid.TID({'transport': 'tcp', 'traddr': '555.555.555.555', 'subsysnqn': '', 'trsvcid': '8009'})
|
||||
|
@ -51,7 +58,7 @@ class Test(unittest.TestCase):
|
|||
any_fc,
|
||||
bad_trtype,
|
||||
]
|
||||
l2 = iputil.remove_invalid_addresses(l1)
|
||||
l2 = stas.remove_invalid_addresses(l1)
|
||||
|
||||
self.assertNotEqual(l1, l2)
|
||||
|
||||
|
@ -61,6 +68,11 @@ class Test(unittest.TestCase):
|
|||
self.assertNotIn(bad_tcp, l2)
|
||||
self.assertNotIn(bad_trtype, l2)
|
||||
|
||||
def test__data_matches_ip(self):
|
||||
self.assertFalse(iputil._data_matches_ip(None, None, None))
|
||||
self.assertFalse(iputil._data_matches_ip(socket.AF_INET, None, None))
|
||||
self.assertFalse(iputil._data_matches_ip(socket.AF_INET6, None, None))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
105
test/test-nbft.py
Executable file
105
test/test-nbft.py
Executable file
|
@ -0,0 +1,105 @@
|
|||
#!/usr/bin/python3
|
||||
import os
|
||||
import unittest
|
||||
from staslib import defs, nbft
|
||||
from libnvme import nvme
|
||||
from argparse import ArgumentParser
|
||||
|
||||
TEST_DIR = os.path.dirname(__file__)
|
||||
NBFT_FILE = os.path.join(TEST_DIR, "NBFT")
|
||||
EMPTY_NBFT_FILE = os.path.join(TEST_DIR, "NBFT-Empty")
|
||||
NBFT_DATA = {
|
||||
"discovery": [
|
||||
{
|
||||
"hfi_index": 0,
|
||||
"nqn": "nqn.2014-08.org.nvmexpress.discovery",
|
||||
"uri": "nvme+tcp://100.71.103.50:8009/",
|
||||
}
|
||||
],
|
||||
"hfi": [
|
||||
{
|
||||
"dhcp_override": True,
|
||||
"dhcp_server_ipaddr": "100.71.245.254",
|
||||
"gateway_ipaddr": "100.71.245.254",
|
||||
"ip_origin": 82,
|
||||
"ipaddr": "100.71.245.232",
|
||||
"mac_addr": "b0:26:28:e8:7c:0e",
|
||||
"pcidev": "0:40:0.0",
|
||||
"primary_dns_ipaddr": "100.64.0.5",
|
||||
"route_metric": 500,
|
||||
"secondary_dns_ipaddr": "100.64.0.6",
|
||||
"subnet_mask_prefix": 24,
|
||||
"this_hfi_is_default_route": True,
|
||||
"trtype": "tcp",
|
||||
"vlan": 0,
|
||||
}
|
||||
],
|
||||
"host": {
|
||||
"host_id_configured": True,
|
||||
"host_nqn_configured": True,
|
||||
"id": "44454c4c-3400-1036-8038-b2c04f313233",
|
||||
"nqn": "nqn.1988-11.com.dell:PowerEdge.R760.1234567",
|
||||
"primary_admin_host_flag": "not " "indicated",
|
||||
},
|
||||
"subsystem": [
|
||||
{
|
||||
"asqsz": 0,
|
||||
"controller_id": 5,
|
||||
"data_digest_required": False,
|
||||
"hfi_indexes": [0],
|
||||
"nid": "c82404ed9c15f53b8ccf0968002e0fca",
|
||||
"nid_type": "nguid",
|
||||
"nsid": 148,
|
||||
"pdu_header_digest_required": False,
|
||||
"subsys_nqn": "nqn.1988-11.com.dell:powerstore:00:2a64abf1c5b81F6C4549",
|
||||
"subsys_port_id": 0,
|
||||
"traddr": "100.71.103.48",
|
||||
"trsvcid": "4420",
|
||||
"trtype": "tcp",
|
||||
},
|
||||
{
|
||||
"asqsz": 0,
|
||||
"controller_id": 4166,
|
||||
"data_digest_required": False,
|
||||
"hfi_indexes": [0],
|
||||
"nid": "c82404ed9c15f53b8ccf0968002e0fca",
|
||||
"nid_type": "nguid",
|
||||
"nsid": 148,
|
||||
"pdu_header_digest_required": False,
|
||||
"subsys_nqn": "nqn.1988-11.com.dell:powerstore:00:2a64abf1c5b81F6C4549",
|
||||
"subsys_port_id": 0,
|
||||
"traddr": "100.71.103.49",
|
||||
"trsvcid": "4420",
|
||||
"trtype": "tcp",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
"""Unit tests for NBFT"""
|
||||
|
||||
def setUp(self):
|
||||
# Depending on the version of libnvme installed
|
||||
# we may or may not have access to NBFT support.
|
||||
nbft_get = getattr(nvme, "nbft_get", None)
|
||||
if defs.HAS_NBFT_SUPPORT:
|
||||
self.expected_nbft = {
|
||||
NBFT_FILE: NBFT_DATA,
|
||||
EMPTY_NBFT_FILE: {},
|
||||
}
|
||||
else:
|
||||
self.expected_nbft = {}
|
||||
|
||||
def test_dir_with_nbft_files(self):
|
||||
"""Make sure we get expected data when reading from binary NBFT file"""
|
||||
actual_nbft = nbft.get_nbft_files(TEST_DIR)
|
||||
self.assertEqual(actual_nbft, self.expected_nbft)
|
||||
|
||||
def test_dir_without_nbft_files(self):
|
||||
actual_nbft = nbft.get_nbft_files("/tmp")
|
||||
self.assertEqual(actual_nbft, {})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
56
test/test-nbft_conf.py
Executable file
56
test/test-nbft_conf.py
Executable file
|
@ -0,0 +1,56 @@
|
|||
#!/usr/bin/python3
|
||||
import os
|
||||
import logging
|
||||
import unittest
|
||||
from staslib import conf
|
||||
|
||||
TEST_DIR = os.path.dirname(__file__)
|
||||
EXPECTED_DCS = [
|
||||
{
|
||||
'subsysnqn': 'nqn.2014-08.org.nvmexpress.discovery',
|
||||
'traddr': '100.71.103.50',
|
||||
'transport': 'tcp',
|
||||
'trsvcid': '8009',
|
||||
}
|
||||
]
|
||||
EXPECTED_IOCS = [
|
||||
{
|
||||
'data-digest': False,
|
||||
'hdr-digest': False,
|
||||
'subsysnqn': 'nqn.1988-11.com.dell:powerstore:00:2a64abf1c5b81F6C4549',
|
||||
'traddr': '100.71.103.48',
|
||||
'transport': 'tcp',
|
||||
'trsvcid': '4420',
|
||||
},
|
||||
{
|
||||
'data-digest': False,
|
||||
'hdr-digest': False,
|
||||
'subsysnqn': 'nqn.1988-11.com.dell:powerstore:00:2a64abf1c5b81F6C4549',
|
||||
'traddr': '100.71.103.49',
|
||||
'transport': 'tcp',
|
||||
'trsvcid': '4420',
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
"""Unit tests for class NbftConf"""
|
||||
|
||||
def test_dir_with_nbft_files(self):
|
||||
conf.NbftConf.destroy() # Make sure singleton does not exist
|
||||
with self.assertLogs(logger=logging.getLogger(), level='DEBUG') as captured:
|
||||
nbft_conf = conf.NbftConf(TEST_DIR)
|
||||
self.assertNotEqual(-1, captured.records[0].getMessage().find("NBFT location(s):"))
|
||||
self.assertEqual(nbft_conf.dcs, EXPECTED_DCS)
|
||||
self.assertEqual(nbft_conf.iocs, EXPECTED_IOCS)
|
||||
|
||||
def test_dir_without_nbft_files(self):
|
||||
conf.NbftConf.destroy() # Make sure singleton does not exist
|
||||
with self.assertNoLogs(logger=logging.getLogger(), level='DEBUG'):
|
||||
nbft_conf = conf.NbftConf('/tmp')
|
||||
self.assertEqual(nbft_conf.dcs, [])
|
||||
self.assertEqual(nbft_conf.iocs, [])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -6,6 +6,19 @@ from staslib import conf, log
|
|||
from pyfakefs.fake_filesystem_unittest import TestCase
|
||||
|
||||
|
||||
class TestStandardNvmeFabricsFile(unittest.TestCase):
|
||||
def test_regular_user(self):
|
||||
conf.NvmeOptions.destroy() # Make sure singleton does not exist
|
||||
if os.path.exists('/dev/nvme-fabrics'):
|
||||
if os.geteuid() != 0:
|
||||
with self.assertRaises(PermissionError):
|
||||
nvme_options = conf.NvmeOptions()
|
||||
else:
|
||||
nvme_options = conf.NvmeOptions()
|
||||
self.assertIsInstance(nvme_options.discovery_supp, bool)
|
||||
self.assertIsInstance(nvme_options.host_iface_supp, bool)
|
||||
|
||||
|
||||
class Test(TestCase):
|
||||
"""Unit tests for class NvmeOptions"""
|
||||
|
||||
|
@ -19,24 +32,30 @@ class Test(TestCase):
|
|||
# No longer need self.tearDownPyfakefs()
|
||||
pass
|
||||
|
||||
def test_fabrics_empty_file(self):
|
||||
def test_file_missing(self):
|
||||
self.assertFalse(os.path.exists("/dev/nvme-fabrics"))
|
||||
# TODO: this is a bug
|
||||
self.fs.create_file("/dev/nvme-fabrics")
|
||||
self.assertTrue(os.path.exists('/dev/nvme-fabrics'))
|
||||
conf.NvmeOptions.destroy() # Make sure singleton does not exist
|
||||
nvme_options = conf.NvmeOptions()
|
||||
self.assertIsInstance(nvme_options.discovery_supp, bool)
|
||||
self.assertIsInstance(nvme_options.host_iface_supp, bool)
|
||||
|
||||
def test_fabrics_empty_file(self):
|
||||
self.assertFalse(os.path.exists("/dev/nvme-fabrics"))
|
||||
self.fs.create_file("/dev/nvme-fabrics")
|
||||
self.assertTrue(os.path.exists('/dev/nvme-fabrics'))
|
||||
conf.NvmeOptions.destroy() # Make sure singleton does not exist
|
||||
nvme_options = conf.NvmeOptions()
|
||||
self.assertIsInstance(nvme_options.discovery_supp, bool)
|
||||
self.assertIsInstance(nvme_options.host_iface_supp, bool)
|
||||
del nvme_options
|
||||
|
||||
def test_fabrics_wrong_file(self):
|
||||
self.assertFalse(os.path.exists("/dev/nvme-fabrics"))
|
||||
self.fs.create_file("/dev/nvme-fabrics", contents="blah")
|
||||
self.assertTrue(os.path.exists('/dev/nvme-fabrics'))
|
||||
conf.NvmeOptions.destroy() # Make sure singleton does not exist
|
||||
nvme_options = conf.NvmeOptions()
|
||||
self.assertIsInstance(nvme_options.discovery_supp, bool)
|
||||
self.assertIsInstance(nvme_options.host_iface_supp, bool)
|
||||
del nvme_options
|
||||
|
||||
def test_fabrics_correct_file(self):
|
||||
self.assertFalse(os.path.exists("/dev/nvme-fabrics"))
|
||||
|
@ -44,6 +63,7 @@ class Test(TestCase):
|
|||
'/dev/nvme-fabrics', contents='host_iface=%s,discovery,dhchap_secret=%s,dhchap_ctrl_secret=%s\n'
|
||||
)
|
||||
self.assertTrue(os.path.exists('/dev/nvme-fabrics'))
|
||||
conf.NvmeOptions.destroy() # Make sure singleton does not exist
|
||||
nvme_options = conf.NvmeOptions()
|
||||
self.assertTrue(nvme_options.discovery_supp)
|
||||
self.assertTrue(nvme_options.host_iface_supp)
|
||||
|
@ -54,7 +74,6 @@ class Test(TestCase):
|
|||
{'discovery': True, 'host_iface': True, 'dhchap_secret': True, 'dhchap_ctrl_secret': True},
|
||||
)
|
||||
self.assertTrue(str(nvme_options).startswith("supported options:"))
|
||||
del nvme_options
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,6 +1,196 @@
|
|||
#!/usr/bin/python3
|
||||
import json
|
||||
import shutil
|
||||
import logging
|
||||
import unittest
|
||||
from staslib import udev
|
||||
import ipaddress
|
||||
import subprocess
|
||||
from staslib import defs, iputil, log, trid, udev
|
||||
|
||||
IP = shutil.which('ip')
|
||||
|
||||
TRADDR4 = '1.2.3.4'
|
||||
TRADDR4_REV = '4.3.2.1'
|
||||
TRADDR6 = 'FE80::aaaa:BBBB:cccc:dddd'
|
||||
TRADDR6_REV = 'fe80::DDDD:cccc:bbbb:AAAA'
|
||||
|
||||
|
||||
def traddr(family, reverse=False):
|
||||
if reverse:
|
||||
return TRADDR4_REV if family == 4 else TRADDR6_REV
|
||||
return TRADDR4 if family == 4 else TRADDR6
|
||||
|
||||
|
||||
def get_tids_to_test(family, src_ip, ifname):
|
||||
return [
|
||||
(
|
||||
1,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ip,
|
||||
'host-iface': ifname,
|
||||
}
|
||||
),
|
||||
True,
|
||||
),
|
||||
(
|
||||
2,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'blah',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ip,
|
||||
'host-iface': ifname,
|
||||
}
|
||||
),
|
||||
False,
|
||||
),
|
||||
(
|
||||
3,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family, reverse=True),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ip,
|
||||
'host-iface': ifname,
|
||||
}
|
||||
),
|
||||
False,
|
||||
),
|
||||
(
|
||||
4,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8010',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ip,
|
||||
'host-iface': ifname,
|
||||
}
|
||||
),
|
||||
False,
|
||||
),
|
||||
(
|
||||
5,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '255.255.255.255',
|
||||
'host-iface': ifname,
|
||||
}
|
||||
),
|
||||
False,
|
||||
),
|
||||
(
|
||||
6,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ip,
|
||||
'host-iface': 'blah',
|
||||
}
|
||||
),
|
||||
False,
|
||||
),
|
||||
(
|
||||
7,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'bob',
|
||||
'host-traddr': src_ip,
|
||||
'host-iface': ifname,
|
||||
}
|
||||
),
|
||||
False,
|
||||
),
|
||||
(
|
||||
8,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-iface': ifname,
|
||||
}
|
||||
),
|
||||
True,
|
||||
),
|
||||
(
|
||||
9,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ip,
|
||||
}
|
||||
),
|
||||
True,
|
||||
),
|
||||
(
|
||||
10,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
}
|
||||
),
|
||||
True,
|
||||
),
|
||||
(
|
||||
11,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ip,
|
||||
'host-iface': ifname,
|
||||
}
|
||||
),
|
||||
True,
|
||||
),
|
||||
(
|
||||
12,
|
||||
trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(family),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-iface': ifname,
|
||||
}
|
||||
),
|
||||
True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class DummyDevice:
|
||||
...
|
||||
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
|
@ -9,6 +199,30 @@ class Test(unittest.TestCase):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def setUp(self):
|
||||
log.init(syslog=False)
|
||||
self.logger = logging.getLogger()
|
||||
self.logger.setLevel(logging.INFO)
|
||||
|
||||
# Retrieve the list of Interfaces and all the associated IP addresses
|
||||
# using standard bash utility (ip address).
|
||||
try:
|
||||
cmd = [IP, '-j', 'address', 'show']
|
||||
p = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
|
||||
ifaces = json.loads(p.stdout.decode().strip())
|
||||
except subprocess.CalledProcessError:
|
||||
ifaces = []
|
||||
|
||||
self.ifaces = {}
|
||||
for iface in ifaces:
|
||||
addr_info = iface.get('addr_info')
|
||||
if addr_info:
|
||||
ifname = iface['ifname']
|
||||
self.ifaces[ifname] = {}
|
||||
for info in addr_info:
|
||||
family = 4 if info['family'] == 'inet' else 6
|
||||
self.ifaces[ifname].setdefault(family, []).append(info['local'])
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
'''Release resources'''
|
||||
|
@ -36,6 +250,434 @@ class Test(unittest.TestCase):
|
|||
bogus = udev.UDEV.get_key_from_attr(device, 'bogus', 'BOGUS', '\n')
|
||||
self.assertEqual(bogus, '')
|
||||
|
||||
def test_is_dc_device(self):
|
||||
device = DummyDevice()
|
||||
device.children = ['vera', 'Chuck', 'Dave']
|
||||
device.attributes = {}
|
||||
|
||||
self.assertFalse(udev.UDEV.is_dc_device(device))
|
||||
|
||||
device.attributes = {'subsysnqn': defs.WELL_KNOWN_DISC_NQN.encode('utf-8')}
|
||||
self.assertTrue(udev.UDEV.is_dc_device(device))
|
||||
|
||||
device.attributes = {'cntrltype': 'discovery'.encode('utf-8')}
|
||||
self.assertTrue(udev.UDEV.is_dc_device(device))
|
||||
|
||||
device.attributes = {}
|
||||
device.children = []
|
||||
self.assertTrue(udev.UDEV.is_dc_device(device))
|
||||
|
||||
def test_is_ioc_device(self):
|
||||
device = DummyDevice()
|
||||
device.children = []
|
||||
device.attributes = {}
|
||||
|
||||
self.assertFalse(udev.UDEV.is_ioc_device(device))
|
||||
|
||||
device.attributes = {'cntrltype': 'io'.encode('utf-8')}
|
||||
self.assertTrue(udev.UDEV.is_ioc_device(device))
|
||||
|
||||
device.attributes = {}
|
||||
device.children = ['vera', 'Chuck', 'Dave']
|
||||
self.assertTrue(udev.UDEV.is_ioc_device(device))
|
||||
|
||||
def test__cid_matches_tid(self):
|
||||
for ifname, addrs in self.ifaces.items():
|
||||
##############################################
|
||||
# IPV4
|
||||
|
||||
ipv4_addrs = addrs.get(4, [])
|
||||
for src_ipv4 in ipv4_addrs:
|
||||
cid = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ipv4,
|
||||
'host-iface': ifname,
|
||||
'src-addr': src_ipv4,
|
||||
}
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ipv4,
|
||||
'host-iface': ifname,
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
for case_id, tid, match in get_tids_to_test(4, src_ipv4, ifname):
|
||||
self.assertEqual(match, udev.UDEV._cid_matches_tid(tid, cid), msg=f'Test Case {case_id} failed')
|
||||
if case_id != 8:
|
||||
self.assertEqual(
|
||||
match, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case {case_id} failed'
|
||||
)
|
||||
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '',
|
||||
'host-iface': '',
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '1.1.1.1',
|
||||
}
|
||||
)
|
||||
self.assertEqual(True, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case A4.1 failed')
|
||||
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '',
|
||||
'host-iface': ifname,
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
}
|
||||
)
|
||||
self.assertEqual(True, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case A4.2 failed')
|
||||
self.assertEqual(
|
||||
True, udev.UDEV._cid_matches_tcp_tid_legacy(tid, cid_legacy), msg=f'Legacy Test Case A4.3 failed'
|
||||
)
|
||||
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ipv4,
|
||||
'host-iface': '',
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '1.1.1.1',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case B4 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-iface': 'blah',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case C4 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-iface': ifname,
|
||||
}
|
||||
)
|
||||
self.assertEqual(True, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case D4 failed')
|
||||
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '',
|
||||
'host-iface': ifname,
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '1.1.1.1',
|
||||
'host-iface': 'blah',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case E4 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '1.1.1.1',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case F4 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(4),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': ipv4_addrs[0],
|
||||
}
|
||||
)
|
||||
match = len(ipv4_addrs) == 1 and iputil.get_ipaddress_obj(
|
||||
ipv4_addrs[0], ipv4_mapped_convert=True
|
||||
) == iputil.get_ipaddress_obj(tid.host_traddr, ipv4_mapped_convert=True)
|
||||
self.assertEqual(match, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case G4 failed')
|
||||
|
||||
##############################################
|
||||
# IPV6
|
||||
|
||||
ipv6_addrs = addrs.get(6, [])
|
||||
for src_ipv6 in ipv6_addrs:
|
||||
cid = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ipv6,
|
||||
'host-iface': ifname,
|
||||
'src-addr': src_ipv6,
|
||||
}
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ipv6,
|
||||
'host-iface': ifname,
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
for case_id, tid, match in get_tids_to_test(6, src_ipv6, ifname):
|
||||
self.assertEqual(match, udev.UDEV._cid_matches_tid(tid, cid), msg=f'Test Case {case_id} failed')
|
||||
self.assertEqual(
|
||||
match, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case {case_id} failed'
|
||||
)
|
||||
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '',
|
||||
'host-iface': '',
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': 'AAAA::FFFF',
|
||||
}
|
||||
)
|
||||
self.assertEqual(True, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case A6.1 failed')
|
||||
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '',
|
||||
'host-iface': ifname,
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
}
|
||||
)
|
||||
self.assertEqual(True, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case A6.2 failed')
|
||||
self.assertEqual(
|
||||
True, udev.UDEV._cid_matches_tcp_tid_legacy(tid, cid_legacy), msg=f'Legacy Test Case A6.3 failed'
|
||||
)
|
||||
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': src_ipv6,
|
||||
'host-iface': '',
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': 'AAAA::FFFF',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case B6 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-iface': 'blah',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case C6 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-iface': ifname,
|
||||
}
|
||||
)
|
||||
self.assertEqual(True, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case D6 failed')
|
||||
|
||||
cid_legacy = {
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '',
|
||||
'host-iface': ifname,
|
||||
'src-addr': '', # Legacy
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': 'AAA::BBBB',
|
||||
'host-iface': 'blah',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case E6 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': 'AAA::BBB',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case F6 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'tcp',
|
||||
'traddr': traddr(6),
|
||||
'trsvcid': '8009',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': ipv6_addrs[0],
|
||||
}
|
||||
)
|
||||
match = len(ipv6_addrs) == 1 and iputil.get_ipaddress_obj(
|
||||
ipv6_addrs[0], ipv4_mapped_convert=True
|
||||
) == iputil.get_ipaddress_obj(tid.host_traddr, ipv4_mapped_convert=True)
|
||||
self.assertEqual(match, udev.UDEV._cid_matches_tid(tid, cid_legacy), msg=f'Legacy Test Case G6 failed')
|
||||
|
||||
##############################################
|
||||
# FC
|
||||
cid = {
|
||||
'transport': 'fc',
|
||||
'traddr': 'ABC',
|
||||
'trsvcid': '',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': 'AAA::BBBB',
|
||||
'host-iface': '',
|
||||
'src-addr': '',
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'fc',
|
||||
'traddr': 'ABC',
|
||||
'trsvcid': '',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': 'AAA::BBBB',
|
||||
}
|
||||
)
|
||||
self.assertEqual(True, udev.UDEV._cid_matches_tid(tid, cid), msg=f'Test Case FC-1 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'fc',
|
||||
'traddr': 'ABC',
|
||||
'trsvcid': '',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': 'BBBB::AAA',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid), msg=f'Test Case FC-2 failed')
|
||||
|
||||
##############################################
|
||||
# RDMA
|
||||
cid = {
|
||||
'transport': 'rdma',
|
||||
'traddr': '2.3.4.5',
|
||||
'trsvcid': '4444',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '5.4.3.2',
|
||||
'host-iface': '',
|
||||
'src-addr': '',
|
||||
}
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'rdma',
|
||||
'traddr': '2.3.4.5',
|
||||
'trsvcid': '4444',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '5.4.3.2',
|
||||
}
|
||||
)
|
||||
self.assertEqual(True, udev.UDEV._cid_matches_tid(tid, cid), msg=f'Test Case RDMA-1 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'rdma',
|
||||
'traddr': '2.3.4.5',
|
||||
'trsvcid': '4444',
|
||||
'subsysnqn': 'hello',
|
||||
'host-traddr': '5.5.6.6',
|
||||
}
|
||||
)
|
||||
self.assertEqual(False, udev.UDEV._cid_matches_tid(tid, cid), msg=f'Test Case RDMA-2 failed')
|
||||
|
||||
tid = trid.TID(
|
||||
{
|
||||
'transport': 'rdma',
|
||||
'traddr': '2.3.4.5',
|
||||
'trsvcid': '4444',
|
||||
'subsysnqn': 'hello',
|
||||
}
|
||||
)
|
||||
self.assertEqual(True, udev.UDEV._cid_matches_tid(tid, cid), msg=f'Test Case RDMA-3 failed')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
Loading…
Add table
Reference in a new issue