1
0
Fork 0

Merging upstream version 2.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-03-17 07:17:50 +01:00
parent 50f6a45557
commit c2a4b9519f
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
35 changed files with 364 additions and 122 deletions

View file

@ -48,7 +48,7 @@ jobs:
# https://github.com/docker/metadata-action # https://github.com/docker/metadata-action
- name: Extract Docker metadata - name: Extract Docker metadata
id: meta id: meta
uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 uses: docker/metadata-action@dbef88086f6cef02e264edb7dbf63250c17cef6c
with: with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}

View file

@ -10,26 +10,33 @@ on:
jobs: jobs:
meson-build: meson-build:
runs-on: ubuntu-latest runs-on: ubuntu-24.04
steps: steps:
- name: "CHECKOUT: nvme-stas" - name: "CHECKOUT: nvme-stas"
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: "INSTALL: Overall dependencies" - name: "INSTALL: build packages"
run: | run: |
sudo apt update sudo apt update
sudo apt-get install --yes --quiet python3-pip cmake iproute2 sudo apt-get install --yes --quiet meson ninja-build cmake
sudo python3 -m pip install --upgrade pip
sudo python3 -m pip install --upgrade wheel meson ninja
- name: "INSTALL: nvme-stas dependencies" - name: "INSTALL: python packages"
run: | run: |
sudo apt-get install --yes --quiet docbook-xml docbook-xsl xsltproc libglib2.0-dev libgirepository1.0-dev libsystemd-dev sudo apt-get install --yes --quiet python3-pip python3-wheel pylint pyflakes3 python3-systemd python3-pyudev python3-lxml python3-dasbus python3-gi python3-importlib-resources python3-pyfakefs
sudo apt-get install --yes --quiet python3-systemd python3-pyudev python3-lxml
python3 -m pip install --upgrade dasbus pylint==2.17.7 pyflakes PyGObject
python3 -m pip install --upgrade vermin pyfakefs importlib-resources
- name: "INSTALL: libnvme dependencies" - name: "INSTALL: documentation packages"
run: |
sudo apt-get install --yes --quiet docbook-xml docbook-xsl xsltproc
- name: "INSTALL: remaining debian packages"
run: |
sudo apt-get install --yes --quiet iproute2 libglib2.0-dev libgirepository1.0-dev libsystemd-dev
- name: "INSTALL: pip packages"
run: |
pip install vermin
- name: "INSTALL: libnvme packages (needed to build libnvme)"
run: | run: |
sudo apt-get install --yes --quiet swig libjson-c-dev sudo apt-get install --yes --quiet swig libjson-c-dev
@ -46,7 +53,7 @@ jobs:
options: --print-errorlogs --suite nvme-stas options: --print-errorlogs --suite nvme-stas
# Preserve meson's log file on failure # Preserve meson's log file on failure
- uses: actions/upload-artifact@v3 - uses: actions/upload-artifact@v4
if: failure() if: failure()
with: with:
name: "Linux_Meson_Testlog" name: "Linux_Meson_Testlog"
@ -54,12 +61,11 @@ jobs:
- name: "Generate coverage report" - name: "Generate coverage report"
run: | run: |
python3 -m pip install --upgrade pytest sudo apt-get install python3-pytest python3-pytest-cov
python3 -m pip install --upgrade pytest-cov
echo $( pwd ) echo $( pwd )
cp -r .build/staslib/* ./staslib/. cp -r .build/staslib/* ./staslib/.
pytest --cov=./staslib --cov-report=xml test/test-*.py pytest --cov=./staslib --cov-report=xml test/test-*.py
- uses: codecov/codecov-action@v3 - uses: codecov/codecov-action@v5
with: with:
fail_ci_if_error: false fail_ci_if_error: false

View file

@ -20,29 +20,47 @@ jobs:
recursive: true recursive: true
ignore: DL3041 ignore: DL3041
python-lint: python-black:
if: ${{ !github.event.act }} # skip during local actions testing
name: python-black formatter
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps:
- name: "CHECKOUT: nvme-stas"
uses: actions/checkout@v4
- name: "BLACK"
uses: psf/black@25.1.0
with:
options: "--check --diff --color --line-length 120 --skip-string-normalization --extend-exclude (subprojects|debian|.build)"
src: "."
python-lint-Jammy:
runs-on: ubuntu-22.04
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"] python-version: ["3.9", "3.10"]
steps: steps:
- name: "CHECKOUT: nvme-stas" - name: "CHECKOUT: nvme-stas"
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: "INSTALL: additional packages" - name: "INSTALL: apt-get packages"
run: | run: |
sudo apt update sudo apt update
sudo apt-get install --yes --quiet python3-pip cmake libgirepository1.0-dev libsystemd-dev python3-systemd swig libjson-c-dev || true sudo apt-get install --yes --quiet meson ninja-build cmake libgirepository1.0-dev libsystemd-dev swig libjson-c-dev
sudo python3 -m pip install --upgrade pip wheel meson ninja sudo apt-get install --yes --quiet python3-wheel python3-systemd python3-pyudev python3-dasbus python3-gi python3-lxml pyflakes3 python3-tomli
python3 -m pip install --upgrade dasbus pylint==2.17.7 pyflakes PyGObject lxml pyudev
- name: "INSTALL: pip packages"
run: |
pip install pylint
# pip install PyGObject
- name: "BUILD: [libnvme, nvme-stas]" - name: "BUILD: [libnvme, nvme-stas]"
uses: BSFishy/meson-build@v1.0.3 uses: BSFishy/meson-build@v1.0.3
@ -60,28 +78,70 @@ jobs:
echo -e "Build Directory:\n$(ls -laF .build)" echo -e "Build Directory:\n$(ls -laF .build)"
python3 -VV python3 -VV
python3 -m site python3 -m site
python3 -m pylint --version pylint --version
echo "pyflakes $(python3 -m pyflakes --version)" echo "pyflakes3 $(pyflakes3 --version)"
- name: Pylint #- name: Pylint
run: | # run: |
python3 -m pylint -j 0 --rcfile=test/pylint.rc .build/stacctl .build/stacd .build/stafctl .build/stafd .build/stasadm .build/staslib # pylint --rcfile=test/pylint.rc .build/stacctl .build/stacd .build/stafctl .build/stafd .build/stasadm .build/staslib
- name: Pyflakes - name: Pyflakes
if: always() if: always()
run: | run: |
python3 -m pyflakes .build/stacctl .build/stacd .build/stafctl .build/stafd .build/stasadm .build/staslib pyflakes3 .build/stacctl .build/stacd .build/stafctl .build/stafd .build/stasadm .build/staslib
python-lint-Noble:
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
python-version: ["3.11", "3.12", "3.13"]
python-black:
if: ${{ !github.event.act }} # skip during local actions testing
name: python-black formatter
runs-on: ubuntu-latest
steps: steps:
- name: "CHECKOUT: nvme-stas" - name: "CHECKOUT: nvme-stas"
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: "BLACK" - name: Set up Python ${{ matrix.python-version }}
uses: psf/black@stable uses: actions/setup-python@v5.3.0
with: with:
options: "--check --diff --color --line-length 120 --skip-string-normalization --extend-exclude (subprojects|debian|.build)" python-version: ${{ matrix.python-version }}
src: "."
- name: "INSTALL: apt-get packages"
run: |
sudo apt update
sudo apt-get install --yes --quiet meson ninja-build cmake libgirepository1.0-dev libsystemd-dev swig libjson-c-dev
sudo apt-get install --yes --quiet python3-wheel python3-systemd python3-pyudev python3-dasbus python3-gi python3-lxml pylint pyflakes3 python3-tomli
#- name: "INSTALL: pip packages"
# run: |
# pip install pylint
# pip install PyGObject
- name: "BUILD: [libnvme, nvme-stas]"
uses: BSFishy/meson-build@v1.0.3
with:
action: build
directory: .build
setup-options: --buildtype=release --sysconfdir=/etc --prefix=/usr -Dlibnvme:buildtype=release -Dlibnvme:sysconfdir=/etc -Dlibnvme:prefix=/usr -Dlibnvme:python=enabled -Dlibnvme:libdbus=disabled -Dlibnvme:openssl=disabled -Dlibnvme:json-c=disabled -Dlibnvme:keyutils=disabled
- name: Set PYTHONPATH
run: |
echo "PYTHONPATH=.build:.build/subprojects/libnvme:/usr/lib/python3/dist-packages" >> $GITHUB_ENV
- name: Show test environment
run: |
echo -e "Build Directory:\n$(ls -laF .build)"
python3 -VV
python3 -m site
pylint --version
echo "pyflakes3 $(pyflakes3 --version)"
- name: Pylint
run: |
pylint --jobs=0 --rcfile=test/pylint.rc .build/stacctl .build/stacd .build/stafctl .build/stafd .build/stasadm .build/staslib
- name: Pyflakes
if: always()
run: |
pyflakes3 .build/stacctl .build/stacd .build/stafctl .build/stafd .build/stasadm .build/staslib

View file

@ -21,7 +21,7 @@ build:
- pandoc - pandoc
jobs: jobs:
post_install: post_install:
- pip3 install lxml - pip install lxml
pre_build: pre_build:
- meson .build -Dreadthedocs=true || cat .build/meson-logs/meson-log.txt - meson .build -Dreadthedocs=true || cat .build/meson-logs/meson-log.txt
- ninja -C .build - ninja -C .build

View file

@ -1,4 +1,4 @@
FROM fedora:39 FROM fedora:41
WORKDIR /root WORKDIR /root

View file

@ -38,6 +38,7 @@ endif
purge: purge:
ifneq ("$(wildcard ${BUILD-DIR})","") ifneq ("$(wildcard ${BUILD-DIR})","")
rm -rf ${BUILD-DIR} rm -rf ${BUILD-DIR}
meson subprojects purge --confirm
endif endif
.PHONY: install .PHONY: install
@ -46,7 +47,8 @@ install: stas
.PHONY: uninstall .PHONY: uninstall
uninstall: ${BUILD-DIR} uninstall: ${BUILD-DIR}
sudo ninja $@ -C ${BUILD-DIR} cd ${BUILD-DIR} && sudo meson --internal uninstall
.PHONY: dist .PHONY: dist
dist: stas dist: stas
@ -70,7 +72,7 @@ black:
black --diff --color --line-length 120 --skip-string-normalization --extend-exclude="(subprojects|debian|.build)" . black --diff --color --line-length 120 --skip-string-normalization --extend-exclude="(subprojects|debian|.build)" .
# Coverage requirements: # Coverage requirements:
# pip install coverage # apt-get install python3-coverage
.PHONY: coverage .PHONY: coverage
coverage: stas coverage: stas
cd ${BUILD-DIR} && ./coverage.sh cd ${BUILD-DIR} && ./coverage.sh

10
NEWS.md
View file

@ -1,5 +1,15 @@
# STorage Appliance Services (STAS) # STorage Appliance Services (STAS)
## Changes with release 2.4
New features:
Support for authentication
Bug fix:
* Various fixes related to unit testing and GitHub Actions
## Changes with release 2.3.1 ## Changes with release 2.3.1
Bug fix: Bug fix:

View file

@ -96,7 +96,7 @@ The following packages must be installed to use **`stafd`**/**`stacd`**
sudo apt-get install -y python3-pyudev python3-systemd python3-gi sudo apt-get install -y python3-pyudev python3-systemd python3-gi
sudo apt-get install -y python3-dasbus # Ubuntu 22.04 sudo apt-get install -y python3-dasbus # Ubuntu 22.04
OR: OR:
sudo pip3 install dasbus # Ubuntu 20.04 sudo pip install dasbus # Ubuntu 20.04 (may require --break-system-packages)
``` ```
**RPM packages (tested on Fedora 34..35 and SLES15):** **RPM packages (tested on Fedora 34..35 and SLES15):**

View file

@ -184,7 +184,7 @@ $ sudo ./nvmet.py clean
This requires the [Python coverage package](https://coverage.readthedocs.io/en/6.4.1/), which can be installed as follows: This requires the [Python coverage package](https://coverage.readthedocs.io/en/6.4.1/), which can be installed as follows:
```bash ```bash
$ sudo pip install coverage $ sudo apt-get install python3-coverage
``` ```
Note that this test cannot be run while `stafd` and `stacd` are running. Make sure to stop `stafd` and `stacd` if they are running (`systemctl stop [stafd|stacd]`). You may also need to mask those services (`systemctl mask [stafd|stacd]`) if coverage fails to start. Note that this test cannot be run while `stafd` and `stacd` are running. Make sure to stop `stafd` and `stacd` if they are running (`systemctl stop [stafd|stacd]`). You may also need to mask those services (`systemctl mask [stafd|stacd]`) if coverage fails to start.

View file

@ -377,7 +377,7 @@ persistent-connections = false
zeroconf-connections-persistence = 1:01 zeroconf-connections-persistence = 1:01
[Controllers] [Controllers]
controller = transport = tcp ; traddr = localhost ; ; ; kato=31; dhchap-ctrl-secret=not-so-secret controller = transport = tcp ; traddr = localhost ; ; ; kato=31; dhchap-ctrl-secret=DHHC-1:00:not-so-secret/not-so-secret/not-so-secret/not-so: ; dhchap-secret=DHHC-1:00:very-secret/very-secret/very-secret/very-secret/:
controller=transport=tcp;traddr=1.1.1.1 controller=transport=tcp;traddr=1.1.1.1
controller=transport=tcp;traddr=100.100.100.100 controller=transport=tcp;traddr=100.100.100.100
controller=transport=tcp;traddr=2607:f8b0:4002:c2c::71 controller=transport=tcp;traddr=2607:f8b0:4002:c2c::71

View file

@ -376,6 +376,21 @@
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry id='dhchap-secret'>
<term><varname>dhchap-secret=</varname></term>
<listitem>
<para>
NVMe In-band authentication host secret (i.e. key);
needs to be in ASCII format as specified in NVMe 2.0
section 8.13.5.8 Secret representation. If this
option is not specified, the default is read
from /etc/stas/sys.conf (see the 'key' parameter
under the [Host] section). In-band authentication
is attempted when this is present.
</para>
</listitem>
</varlistentry>
<varlistentry id='dhchap-ctrl-secret'> <varlistentry id='dhchap-ctrl-secret'>
<term><varname>dhchap-ctrl-secret=</varname></term> <term><varname>dhchap-ctrl-secret=</varname></term>
<listitem> <listitem>

View file

@ -235,6 +235,14 @@
# This forces the connection to be made on a specific interface # This forces the connection to be made on a specific interface
# instead of letting the system decide. # instead of letting the system decide.
# #
# dhchap-secret [OPTIONAL]
# NVMe In-band authentication host secret (i.e. key); needs to be
# in ASCII format as specified in NVMe 2.0 section 8.13.5.8 Secret
# representation. If this option is not specified, the default is
# read from /etc/stas/sys.conf (see the 'key' parameter under the
# [Host] section). In-band authentication is attempted when this
# is present.
#
# dhchap-ctrl-secret [OPTIONAL] # dhchap-ctrl-secret [OPTIONAL]
# NVMe In-band authentication controller secret (i.e. key) for # NVMe In-band authentication controller secret (i.e. key) for
# bi-directional authentication; needs to be in ASCII format as # bi-directional authentication; needs to be in ASCII format as

View file

@ -9,7 +9,7 @@
project( project(
'nvme-stas', 'nvme-stas',
meson_version: '>= 0.53.0', meson_version: '>= 0.53.0',
version: '2.3.1', version: '2.4',
license: 'Apache-2.0', license: 'Apache-2.0',
default_options: [ default_options: [
'buildtype=release', 'buildtype=release',
@ -39,7 +39,13 @@ if want_man or want_html or want_readthedocs
buildtime_modules += ['lxml'] buildtime_modules += ['lxml']
endif endif
python3 = import('python').find_installation('python3', modules:buildtime_modules) # On older systems we had to invoke Python 3 as "python3". On newer systems,
# Python 2 has been completely deprecated and Python 3 is simply named "python".
pymod = import('python')
python3 = pymod.find_installation('python3', modules:buildtime_modules, required:false)
if not python3.found()
python3 = pymod.find_installation('python', modules:buildtime_modules)
endif
python_version = python3.language_version() python_version = python3.language_version()
python_version_req = '>=3.6' python_version_req = '>=3.6'
if not python_version.version_compare(python_version_req) if not python_version.version_compare(python_version_req)
@ -51,7 +57,7 @@ endif
missing_runtime_mods = false missing_runtime_mods = false
py_modules_reqd = [ py_modules_reqd = [
['libnvme', 'Install python3-libnvme (deb/rpm)'], ['libnvme', 'Install python3-libnvme (deb/rpm)'],
['dasbus', 'Install python3-dasbus (deb/rpm) OR pip3 install dasbus'], ['dasbus', 'Install python3-dasbus (deb/rpm) OR pip install dasbus'],
['pyudev', 'Install python3-pyudev (deb/rpm)'], ['pyudev', 'Install python3-pyudev (deb/rpm)'],
['systemd', 'Install python3-systemd (deb/rpm)'], ['systemd', 'Install python3-systemd (deb/rpm)'],
['gi', 'Install python3-gi (deb) OR python3-gobject (rpm)'], ['gi', 'Install python3-gi (deb) OR python3-gobject (rpm)'],
@ -165,7 +171,7 @@ summary_dict = {
'dbus_conf_dir ': dbus_conf_dir, 'dbus_conf_dir ': dbus_conf_dir,
'sd_unit_dir ': sd_unit_dir, 'sd_unit_dir ': sd_unit_dir,
'build location ': meson.current_build_dir(), 'build location ': meson.current_build_dir(),
'libnvme for tests ': libnvme_location, 'libnvme location ': libnvme_location,
} }
summary(summary_dict, section: 'Directories') summary(summary_dict, section: 'Directories')

View file

@ -7,8 +7,7 @@
# #
# Authors: Martin Belanger <Martin.Belanger@dell.com> # Authors: Martin Belanger <Martin.Belanger@dell.com>
# #
''' STorage Appliance Connector Control Utility '''STorage Appliance Connector Control Utility'''
'''
import sys import sys
import json import json
import pprint import pprint

View file

@ -7,8 +7,7 @@
# #
# Authors: Martin Belanger <Martin.Belanger@dell.com> # Authors: Martin Belanger <Martin.Belanger@dell.com>
# #
''' STorage Appliance Connector Daemon '''STorage Appliance Connector Daemon'''
'''
import sys import sys
from argparse import ArgumentParser from argparse import ArgumentParser
from staslib import defs from staslib import defs

View file

@ -7,8 +7,7 @@
# #
# Authors: Martin Belanger <Martin.Belanger@dell.com> # Authors: Martin Belanger <Martin.Belanger@dell.com>
# #
''' STorage Appliance Finder Control Utility '''STorage Appliance Finder Control Utility'''
'''
import sys import sys
import json import json
import pprint import pprint

View file

@ -7,8 +7,7 @@
# #
# Authors: Martin Belanger <Martin.Belanger@dell.com> # Authors: Martin Belanger <Martin.Belanger@dell.com>
# #
''' STorage Appliance Finder Daemon '''STorage Appliance Finder Daemon'''
'''
import sys import sys
from argparse import ArgumentParser from argparse import ArgumentParser
from staslib import defs from staslib import defs

View file

@ -7,7 +7,7 @@
# #
# Authors: Martin Belanger <Martin.Belanger@dell.com> # Authors: Martin Belanger <Martin.Belanger@dell.com>
# #
''' STorage Appliance Services Admin Tool ''' '''STorage Appliance Services Admin Tool'''
import os import os
import sys import sys
import uuid import uuid

View file

@ -6,8 +6,8 @@
# #
# Authors: Martin Belanger <Martin.Belanger@dell.com> # Authors: Martin Belanger <Martin.Belanger@dell.com>
# #
''' Module that provides a way to retrieve discovered '''Module that provides a way to retrieve discovered
services from the Avahi daemon over D-Bus. services from the Avahi daemon over D-Bus.
''' '''
import socket import socket
import typing import typing
@ -167,9 +167,11 @@ class Service: # pylint: disable=too-many-instance-attributes
'trsvcid': trsvcid, 'trsvcid': trsvcid,
# host-iface permitted for tcp alone and not rdma # host-iface permitted for tcp alone and not rdma
'host-iface': host_iface, 'host-iface': host_iface,
'subsysnqn': txt.get('nqn', defs.WELL_KNOWN_DISC_NQN).strip() 'subsysnqn': (
if conf.NvmeOptions().discovery_supp txt.get('nqn', defs.WELL_KNOWN_DISC_NQN).strip()
else defs.WELL_KNOWN_DISC_NQN, if conf.NvmeOptions().discovery_supp
else defs.WELL_KNOWN_DISC_NQN
),
} }
self._ip = iputil.get_ipaddress_obj(traddr, ipv4_mapped_convert=True) self._ip = iputil.get_ipaddress_obj(traddr, ipv4_mapped_convert=True)

View file

@ -289,8 +289,6 @@ class SvcConf(metaclass=singleton.Singleton): # pylint: disable=too-many-public
nr_write_queues = property(functools.partial(get_option, section='Global', option='nr-write-queues')) nr_write_queues = property(functools.partial(get_option, section='Global', option='nr-write-queues'))
reconnect_delay = property(functools.partial(get_option, section='Global', option='reconnect-delay')) reconnect_delay = property(functools.partial(get_option, section='Global', option='reconnect-delay'))
zeroconf_enabled = property(functools.partial(get_option, section='Service Discovery', option='zeroconf'))
zeroconf_persistence_sec = property( zeroconf_persistence_sec = property(
functools.partial( functools.partial(
get_option, section='Discovery controller connection management', option='zeroconf-connections-persistence' get_option, section='Discovery controller connection management', option='zeroconf-connections-persistence'
@ -307,6 +305,11 @@ class SvcConf(metaclass=singleton.Singleton): # pylint: disable=too-many-public
functools.partial(get_option, section='I/O controller connection management', option='connect-attempts-on-ncc') functools.partial(get_option, section='I/O controller connection management', option='connect-attempts-on-ncc')
) )
@property # pylint chokes on this when defined as zeroconf_enabled=property(...). Works fine using a decorator...
def zeroconf_enabled(self):
'''Return whether zeroconf is enabled'''
return self.get_option(section='Service Discovery', option='zeroconf')
@property @property
def stypes(self): def stypes(self):
'''@brief Get the DNS-SD/mDNS service types.''' '''@brief Get the DNS-SD/mDNS service types.'''
@ -338,6 +341,7 @@ class SvcConf(metaclass=singleton.Singleton): # pylint: disable=too-many-public
'host-traddr': [TRADDR], 'host-traddr': [TRADDR],
'host-iface': [IFACE], 'host-iface': [IFACE],
'host-nqn': [NQN], 'host-nqn': [NQN],
'dhchap-secret': [KEY],
'dhchap-ctrl-secret': [KEY], 'dhchap-ctrl-secret': [KEY],
'hdr-digest': [BOOL] 'hdr-digest': [BOOL]
'data-digest': [BOOL] 'data-digest': [BOOL]
@ -707,7 +711,7 @@ class NvmeOptions(metaclass=singleton.Singleton):
# ****************************************************************************** # ******************************************************************************
class NbftConf(metaclass=singleton.Singleton): class NbftConf(metaclass=singleton.Singleton): # pylint: disable=too-few-public-methods
'''Read and cache configuration file.''' '''Read and cache configuration file.'''
def __init__(self, root_dir=defs.NBFT_SYSFS_PATH): def __init__(self, root_dir=defs.NBFT_SYSFS_PATH):

View file

@ -221,23 +221,39 @@ class Controller(stas.ControllerABC): # pylint: disable=too-many-instance-attri
host_traddr=self.tid.host_traddr if self.tid.host_traddr else None, host_traddr=self.tid.host_traddr if self.tid.host_traddr else None,
host_iface=host_iface, host_iface=host_iface,
) )
self._ctrl.discovery_ctrl_set(self._discovery_ctrl) self._ctrl.discovery_ctrl = self._discovery_ctrl
# Set the DHCHAP key on the controller # Set the DHCHAP host key on the controller
# NOTE that this will eventually have to # NOTE that this may eventually have to
# change once we have support for AVE (TP8019) # change once we have support for AVE (TP8019)
ctrl_dhchap_key = self.tid.cfg.get('dhchap-ctrl-secret') # This is used for in-band authentication
if ctrl_dhchap_key and self._nvme_options.dhchap_ctrlkey_supp: dhchap_host_key = self.tid.cfg.get('dhchap-secret')
has_dhchap_key = hasattr(self._ctrl, 'dhchap_key') if dhchap_host_key and self._nvme_options.dhchap_hostkey_supp:
if not has_dhchap_key: try:
self._ctrl.dhchap_host_key = dhchap_host_key
except AttributeError:
logging.warning( logging.warning(
'%s | %s - libnvme-%s does not allow setting the controller DHCHAP key. Please upgrade libnvme.', '%s | %s - libnvme-%s does not allow setting the host DHCHAP key on the controller. Please upgrade libnvme.',
self.id,
self.device,
defs.LIBNVME_VERSION,
)
# Set the DHCHAP controller key on the controller
# NOTE that this may eventually have to
# change once we have support for AVE (TP8019)
# This is used for bidirectional authentication
dhchap_ctrl_key = self.tid.cfg.get('dhchap-ctrl-secret')
if dhchap_ctrl_key and self._nvme_options.dhchap_ctrlkey_supp:
try:
self._ctrl.dhchap_key = dhchap_ctrl_key
except AttributeError:
logging.warning(
'%s | %s - libnvme-%s does not allow setting the controller DHCHAP key on the controller. Please upgrade libnvme.',
self.id, self.id,
self.device, self.device,
defs.LIBNVME_VERSION, defs.LIBNVME_VERSION,
) )
else:
self._ctrl.dhchap_key = ctrl_dhchap_key
# Audit existing nvme devices. If we find a match, then # Audit existing nvme devices. If we find a match, then
# we'll just borrow that device instead of creating a new one. # we'll just borrow that device instead of creating a new one.

View file

@ -6,8 +6,7 @@
# #
# Authors: Martin Belanger <Martin.Belanger@dell.com> # Authors: Martin Belanger <Martin.Belanger@dell.com>
''' @brief This file gets automagically configured by meson at build time. '''@brief This file gets automagically configured by meson at build time.'''
'''
import os import os
import sys import sys
import shutil import shutil

View file

@ -123,7 +123,7 @@ class NameResolver: # pylint: disable=too-few-public-methods
The callback @callback will be called once all hostnames have The callback @callback will be called once all hostnames have
been resolved. been resolved.
@param controllers: List of trid.TID @param controllers_in: List of trid.TID
''' '''
pending_resolution_count = 0 pending_resolution_count = 0
controllers_out = [] controllers_out = []

View file

@ -510,7 +510,7 @@ class Stac(Service):
UDEV_RULE_OVERRIDE = r''' UDEV_RULE_OVERRIDE = r'''
ACTION=="change", SUBSYSTEM=="fc", ENV{FC_EVENT}=="nvmediscovery", \ ACTION=="change", SUBSYSTEM=="fc", ENV{FC_EVENT}=="nvmediscovery", \
ENV{NVMEFC_HOST_TRADDR}=="*", ENV{NVMEFC_TRADDR}=="*", \ ENV{NVMEFC_HOST_TRADDR}=="*", ENV{NVMEFC_TRADDR}=="*", \
RUN+="%s --no-block start nvmf-connect@--transport=fc\t--traddr=$env{NVMEFC_TRADDR}\t--trsvcid=none\t--host-traddr=$env{NVMEFC_HOST_TRADDR}.service" RUN+="%s --no-block restart nvmf-connect@--device\x3dnone\x09--transport\x3dfc\x09--traddr\x3d$env{NVMEFC_TRADDR}\x09--trsvcid\x3dnone\x09--host-traddr\x3d$env{NVMEFC_HOST_TRADDR}.service"
''' '''
@ -809,11 +809,7 @@ class Staf(Service):
origin = ( origin = (
'configured' 'configured'
if tid in configured_ctrl_list if tid in configured_ctrl_list
else 'referral' else 'referral' if tid in referral_ctrl_list else 'discovered' if tid in discovered_ctrl_list else None
if tid in referral_ctrl_list
else 'discovered'
if tid in discovered_ctrl_list
else None
) )
if origin is not None: if origin is not None:
controller.origin = origin controller.origin = origin
@ -868,10 +864,12 @@ class Staf(Service):
return return
# We need to invoke "nvme connect-all" using nvme-cli's nvmf-connect@.service # We need to invoke "nvme connect-all" using nvme-cli's nvmf-connect@.service
# NOTE: Eventually, we'll be able to drop --host-traddr and --host-iface from # NOTE 1: Eventually, we'll be able to drop --host-traddr and --host-iface from
# the parameters passed to nvmf-connect@.service. A fix was added to connect-all # the parameters passed to nvmf-connect@.service. A fix was added to connect-all
# to infer these two values from the device used to connect to the DC. # to infer these two values from the device used to connect to the DC.
# Ref: https://github.com/linux-nvme/nvme-cli/pull/1812 # Ref: https://github.com/linux-nvme/nvme-cli/pull/1812
#
# NOTE 2:--transport, --traddr, and --trsvcid, not needed when using --device
cnf = [ cnf = [
('--device', udev_obj.sys_name), ('--device', udev_obj.sys_name),
('--host-traddr', udev_obj.properties.get('NVME_HOST_TRADDR', None)), ('--host-traddr', udev_obj.properties.get('NVME_HOST_TRADDR', None)),

View file

@ -33,6 +33,7 @@ class TID: # pylint: disable=too-many-instance-attributes
'host-nqn': str, # [optional] 'host-nqn': str, # [optional]
# Connection parameters # Connection parameters
'dhchap-secret': str, # [optional]
'dhchap-ctrl-secret': str, # [optional] 'dhchap-ctrl-secret': str, # [optional]
'hdr-digest': str, # [optional] 'hdr-digest': str, # [optional]
'data-digest': str, # [optional] 'data-digest': str, # [optional]

View file

@ -6,12 +6,12 @@
# #
# Authors: Martin Belanger <Martin.Belanger@dell.com> # Authors: Martin Belanger <Martin.Belanger@dell.com>
# #
''' distutils (and hence LooseVersion) is being deprecated. None of the '''distutils (and hence LooseVersion) is being deprecated. None of the
suggested replacements (e.g. from pkg_resources import parse_version) quite suggested replacements (e.g. from pkg_resources import parse_version) quite
work with Linux kernel versions the way LooseVersion does. work with Linux kernel versions the way LooseVersion does.
It was suggested to simply lift the LooseVersion code and vendor it in, It was suggested to simply lift the LooseVersion code and vendor it in,
which is what this module is about. which is what this module is about.
''' '''
import re import re

View file

@ -9,6 +9,8 @@
srce_dir = meson.current_source_dir() srce_dir = meson.current_source_dir()
test_env = environment({'MALLOC_PERTURB_': '0'}) test_env = environment({'MALLOC_PERTURB_': '0'})
test_env.append('PYTHONMALLOC', 'malloc')
test_list = modules_to_lint + packages_to_lint
libnvme_location = '?' libnvme_location = '?'
@ -23,9 +25,9 @@ if get_option('libnvme-sel') == 'pre-installed'
rr = run_command(python3, '-c', 'import libnvme; print(f"{libnvme.__path__[0]}")', check: false, env: test_env) rr = run_command(python3, '-c', 'import libnvme; print(f"{libnvme.__path__[0]}")', check: false, env: test_env)
if rr.returncode() == 0 if rr.returncode() == 0
libnvme_location = rr.stdout().strip() libnvme_location = rr.stdout().strip()
pythonpath = fs.parent(libnvme_location) libnvme_path = fs.parent(libnvme_location)
test_env.prepend('PYTHONPATH', pythonpath) # Look in standard location first PYTHONPATH = ':'.join([libnvme_path, PYTHONPATH])
test_env.append('PYTHONPATH', PYTHONPATH) # Look in the build directory second test_env.prepend('PYTHONPATH', PYTHONPATH)
endif endif
endif endif
@ -46,13 +48,7 @@ if libnvme_location == '?'
else else
#--------------------------------------------------------------------------- #---------------------------------------------------------------------------
# pylint and pyflakes # pylint and pyflakes
if test_list.length() != 0
# There's a bug with pylint 3.X. Tests should be run with pylint
# 2.17.7 (or less), which can be installed with:
# python3 -m pip install --upgrade pylint==2.17.7
if modules_to_lint.length() != 0
pylint = find_program('pylint', required: false) pylint = find_program('pylint', required: false)
pyflakes = find_program('pyflakes3', required: false) pyflakes = find_program('pyflakes3', required: false)
if not pyflakes.found() if not pyflakes.found()
@ -65,12 +61,12 @@ else
rcfile = srce_dir / 'pylint.rc' rcfile = srce_dir / 'pylint.rc'
if pylint.found() if pylint.found()
test('pylint', pylint, args: ['--rcfile=' + rcfile] + modules_to_lint + packages_to_lint, env: test_env) test('pylint', pylint, args: ['--rcfile=' + rcfile] + test_list, env: test_env)
else else
warning('Skiping some of the tests because "pylint" is missing.') warning('Skiping some of the tests because "pylint" is missing.')
endif endif
if pyflakes.found() if pyflakes.found()
test('pyflakes', pyflakes, args: modules_to_lint, env: test_env) test('pyflakes', pyflakes, args: test_list, env: test_env)
else else
warning('Skiping some of the tests because "pyflakes" is missing.') warning('Skiping some of the tests because "pyflakes" is missing.')
endif endif
@ -156,8 +152,8 @@ tools = [
] ]
vermin = find_program('vermin', required: false) vermin = find_program('vermin', required: false)
if vermin.found() if vermin.found()
if modules_to_lint.length() != 0 if test_list.length() != 0
test('vermin code', vermin, args: ['--config-file', srce_dir / 'vermin.conf'] + modules_to_lint, env: test_env) test('vermin code', vermin, args: ['--config-file', srce_dir / 'vermin.conf'] + test_list, env: test_env)
endif endif
test('vermin tools', vermin, args: ['--config-file', srce_dir / 'vermin-tools.conf'] + tools, env: test_env) test('vermin tools', vermin, args: ['--config-file', srce_dir / 'vermin-tools.conf'] + tools, env: test_env)
else else

View file

@ -43,11 +43,40 @@ class Test(unittest.TestCase):
self.assertEqual('', iputil.get_interface(ifaces, '')) self.assertEqual('', iputil.get_interface(ifaces, ''))
self.assertEqual('', iputil.get_interface(ifaces, None)) self.assertEqual('', iputil.get_interface(ifaces, None))
@staticmethod
def _is_ok_for_mac2iface(iface) -> bool:
'''mac2iface can only work with interfaces that have a proper MAC
address. One can use this function to filter out other interfaces
configured on the system.'''
if iface['link_type'] != 'ether':
# Some esoteric interface types (e.g., gre) use the address
# field to store something that is not a MAC address. Skip
# them.
return False
if 'address' not in iface:
return False
if iface['address'] == '00:00:00:00:00:00':
# All 0's is an invalid MAC address so do not bother.
# In practice, it often appears as the address of the loopback
# interface but it can also appear for other things like a gretap
# or erspan interface.
return False
return True
def test_mac2iface(self): def test_mac2iface(self):
for iface in self.ifaces: # We only test the interfaces that have a MAC address, and a valid one.
address = iface.get('address', None) candidate_ifaces = [iface for iface in self.ifaces if self._is_ok_for_mac2iface(iface)]
if address:
self.assertEqual(iface['ifname'], iputil.mac2iface(address)) for iface in candidate_ifaces:
if len([x for x in candidate_ifaces if x['address'] == iface['address']]) >= 2:
# We need to be careful, sometimes we can have the same MAC address
# on multiple interfaces. This happens with VLAN interfaces for
# instance. mac2iface will obviously be confused when dealing with
# those so let's skip the interfaces that have duplicate MAC.
logging.warning('[%s] is not the only interface with address [%s]', iface['ifname'], iface['address'])
continue
self.assertEqual(iface['ifname'], iputil.mac2iface(iface['address']))
def test_remove_invalid_addresses(self): def test_remove_invalid_addresses(self):
good_tcp = trid.TID({'transport': 'tcp', 'traddr': '1.1.1.1', 'subsysnqn': '', 'trsvcid': '8009'}) good_tcp = trid.TID({'transport': 'tcp', 'traddr': '1.1.1.1', 'subsysnqn': '', 'trsvcid': '8009'})

View file

@ -200,8 +200,7 @@ def get_tids_to_test(family, src_ip, ifname):
] ]
class DummyDevice: class DummyDevice: ...
...
class Test(unittest.TestCase): class Test(unittest.TestCase):
@ -295,6 +294,11 @@ class Test(unittest.TestCase):
def test__cid_matches_tid(self): def test__cid_matches_tid(self):
ifaces = iputil.net_if_addrs() ifaces = iputil.net_if_addrs()
for ifname, addrs in self.ifaces.items(): for ifname, addrs in self.ifaces.items():
# <ifaces> contains a subset of the interfaces found in <self.ifaces>.
# So, let's make sure that we only test with the interfaces found in both.
if ifname not in ifaces:
continue
############################################## ##############################################
# IPV4 # IPV4

View file

@ -28,5 +28,14 @@ RuntimeDirectory=stacd
CacheDirectory=stacd CacheDirectory=stacd
RuntimeDirectoryPreserve=yes RuntimeDirectoryPreserve=yes
ProtectHome=true
ProtectKernelModules=true
ProtectKernelLogs=true
ProtectControlGroups=true
ProtectProc=invisible
RestrictRealtime=true
LockPersonality=yes
MemoryDenyWriteExecute=yes
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View file

@ -31,5 +31,14 @@ RuntimeDirectory=stafd
CacheDirectory=stafd CacheDirectory=stafd
RuntimeDirectoryPreserve=yes RuntimeDirectoryPreserve=yes
ProtectHome=true
ProtectKernelModules=true
ProtectKernelLogs=true
ProtectControlGroups=true
ProtectProc=invisible
RestrictRealtime=true
LockPersonality=yes
MemoryDenyWriteExecute=yes
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

34
utils/nvmet/auth.conf Normal file
View file

@ -0,0 +1,34 @@
# Config file format: Python, i.e. dict(), list(), int, str, etc...
# port ids (id) are integers 0...N
# namespaces are integers 0..N
# subsysnqn can be integers or strings
{
'ports': [
{
'id': 1,
#'adrfam': 'ipv6',
#'traddr': '::',
'adrfam': 'ipv4',
'traddr': '0.0.0.0',
'trsvcid': 4420,
'trtype': 'tcp',
}
],
'subsystems': [
{
'subsysnqn': 'nqn.1988-11.com.dell:PowerSANxxx:01:20210225100113-454f73093ceb4847a7bdfc6e34ae8e28',
'port': 1,
'namespaces': [1],
'allowed_hosts': [
{
# Must match with the NQN and key configured on the host
# Key was generated with:
# nvme gen-dhchap-key ...
'nqn': 'nqn.2014-08.org.nvmexpress:uuid:46ba5037-7ce5-41fa-9452-48477bf00080',
'key': 'DHHC-1:00:2kx1hDTUPdvwtxHYUXFRl8pzn5hYZH7K3Z77IYM4hNN6/fQT:',
},
],
},
]
}

View file

@ -6,10 +6,10 @@
'ports': [ 'ports': [
{ {
'id': 1, 'id': 1,
'adrfam': 'ipv6', #'adrfam': 'ipv6',
'traddr': '::', #'traddr': '::',
#'adrfam': 'ipv4', 'adrfam': 'ipv4',
#'traddr': '0.0.0.0', 'traddr': '0.0.0.0',
'trsvcid': 8009, 'trsvcid': 8009,
'trtype': 'tcp', 'trtype': 'tcp',
} }

View file

@ -52,19 +52,26 @@ def _get_loaded_nvmet_modules():
return output return output
def _runcmd(cmd: list, quiet=False): def _runcmd(cmd: list, quiet=False, capture_output=False):
if not quiet: if not quiet:
print(' '.join(cmd)) print(' '.join(cmd))
if args.dry_run: if args.dry_run:
return return
subprocess.run(cmd)
try:
cp = subprocess.run(cmd, capture_output=capture_output, text=True)
except TypeError:
# For older Python versions that don't support "capture_output" or "text"
cp = subprocess.run(cmd, stdout=subprocess.PIPE, universal_newlines=True)
return cp.stdout if capture_output else None
def _modprobe(module: str, args: list = None, quiet=False): def _modprobe(module: str, args: list = None, quiet=False):
cmd = ['/usr/sbin/modprobe', module] cmd = ['/usr/sbin/modprobe', module]
if args: if args:
cmd.extend(args) cmd.extend(args)
_runcmd(cmd, quiet) _runcmd(cmd, quiet=quiet)
def _mkdir(dname: str): def _mkdir(dname: str):
@ -93,12 +100,32 @@ def _symlink(port: str, subsysnqn: str):
link.symlink_to(target) link.symlink_to(target)
def _create_subsystem(subsysnqn: str) -> str: def _symlink_allowed_hosts(hostnqn: str, subsysnqn: str):
print(
f'$( cd "/sys/kernel/config/nvmet/subsystems/{subsysnqn}/allowed_hosts" && ln -s "../../../hosts/{hostnqn}" "{hostnqn}" )'
)
if args.dry_run:
return
target = os.path.join('/sys/kernel/config/nvmet/hosts', hostnqn)
link = pathlib.Path(os.path.join('/sys/kernel/config/nvmet/subsystems', subsysnqn, 'allowed_hosts', hostnqn))
link.symlink_to(target)
def _create_subsystem(subsysnqn: str, allowed_hosts: list) -> str:
print(f'###{Fore.GREEN} Create subsystem: {subsysnqn}{Style.RESET_ALL}') print(f'###{Fore.GREEN} Create subsystem: {subsysnqn}{Style.RESET_ALL}')
dname = os.path.join('/sys/kernel/config/nvmet/subsystems/', subsysnqn) dname = os.path.join('/sys/kernel/config/nvmet/subsystems/', subsysnqn)
_mkdir(dname) _mkdir(dname)
_echo(1, os.path.join(dname, 'attr_allow_any_host')) _echo(0 if allowed_hosts else 1, os.path.join(dname, 'attr_allow_any_host'))
return dname
# Configure all the hosts that are allowed to access this subsystem
for host in allowed_hosts:
hostnqn = host.get('nqn')
hostkey = host.get('key')
if all([hostnqn, hostkey]):
dname = os.path.join('/sys/kernel/config/nvmet/hosts/', hostnqn)
_mkdir(dname)
_echo(hostkey, os.path.join(dname, 'dhchap_key'))
_symlink_allowed_hosts(hostnqn, subsysnqn)
def _create_namespace(subsysnqn: str, id: str, node: str) -> str: def _create_namespace(subsysnqn: str, id: str, node: str) -> str:
@ -107,7 +134,6 @@ def _create_namespace(subsysnqn: str, id: str, node: str) -> str:
_mkdir(dname) _mkdir(dname)
_echo(node, os.path.join(dname, 'device_path')) _echo(node, os.path.join(dname, 'device_path'))
_echo(1, os.path.join(dname, 'enable')) _echo(1, os.path.join(dname, 'enable'))
return dname
def _args_valid(id, traddr, trsvcid, trtype, adrfam): def _args_valid(id, traddr, trsvcid, trtype, adrfam):
@ -215,8 +241,9 @@ def create(args):
str(subsystem.get('port')), str(subsystem.get('port')),
subsystem.get('namespaces'), subsystem.get('namespaces'),
) )
if None not in (subsysnqn, port, namespaces): if None not in (subsysnqn, port, namespaces):
_create_subsystem(subsysnqn) _create_subsystem(subsysnqn, subsystem.get('allowed_hosts', []))
for id in namespaces: for id in namespaces:
_create_namespace(subsysnqn, str(id), dev_node) _create_namespace(subsysnqn, str(id), dev_node)
else: else:
@ -235,10 +262,16 @@ def clean(args):
if not args.dry_run and os.geteuid() != 0: if not args.dry_run and os.geteuid() != 0:
sys.exit(f'Permission denied. You need root privileges to run {os.path.basename(__file__)}.') sys.exit(f'Permission denied. You need root privileges to run {os.path.basename(__file__)}.')
print(f'###{Fore.GREEN} 1st) Remove the symlinks{Style.RESET_ALL}')
print('rm -f /sys/kernel/config/nvmet/ports/*/subsystems/*') print('rm -f /sys/kernel/config/nvmet/ports/*/subsystems/*')
for dname in pathlib.Path('/sys/kernel/config/nvmet/ports').glob('*/subsystems/*'): for dname in pathlib.Path('/sys/kernel/config/nvmet/ports').glob('*/subsystems/*'):
_runcmd(['rm', '-f', str(dname)], quiet=True) _runcmd(['rm', '-f', str(dname)], quiet=True)
print('rm -f /sys/kernel/config/nvmet/subsystems/*/allowed_hosts/*')
for dname in pathlib.Path('/sys/kernel/config/nvmet/subsystems').glob('*/allowed_hosts/*'):
_runcmd(['rm', '-f', str(dname)], quiet=True)
print(f'###{Fore.GREEN} 2nd) Remove directories{Style.RESET_ALL}')
print('rmdir /sys/kernel/config/nvmet/ports/*') print('rmdir /sys/kernel/config/nvmet/ports/*')
for dname in pathlib.Path('/sys/kernel/config/nvmet/ports').glob('*'): for dname in pathlib.Path('/sys/kernel/config/nvmet/ports').glob('*'):
_runcmd(['rmdir', str(dname)], quiet=True) _runcmd(['rmdir', str(dname)], quiet=True)
@ -251,6 +284,11 @@ def clean(args):
for dname in pathlib.Path('/sys/kernel/config/nvmet/subsystems').glob('*'): for dname in pathlib.Path('/sys/kernel/config/nvmet/subsystems').glob('*'):
_runcmd(['rmdir', str(dname)], quiet=True) _runcmd(['rmdir', str(dname)], quiet=True)
print('rmdir /sys/kernel/config/nvmet/hosts/*')
for dname in pathlib.Path('/sys/kernel/config/nvmet/hosts').glob('*'):
_runcmd(['rmdir', str(dname)], quiet=True)
print(f'###{Fore.GREEN} 3rd) Unload kernel modules{Style.RESET_ALL}')
for module in _get_loaded_nvmet_modules(): for module in _get_loaded_nvmet_modules():
_modprobe(module, ['--remove']) _modprobe(module, ['--remove'])