1
0
Fork 0

Merging upstream version 2.3.1:

- properly handles big-endian data in `iputils.py` (Closes: #1057031).

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-16 12:56:36 +01:00
parent 1fb60de7fe
commit a8f39c03aa
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
24 changed files with 598 additions and 437 deletions

View file

@ -55,7 +55,7 @@ jobs:
# Build and push Docker image with Buildx (don't push on PR) # Build and push Docker image with Buildx (don't push on PR)
# https://github.com/docker/build-push-action # https://github.com/docker/build-push-action
- name: Build and push Docker image - name: Build and push Docker image
uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56
with: with:
context: . context: .
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

View file

@ -26,7 +26,7 @@ jobs:
run: | run: |
sudo apt-get install --yes --quiet docbook-xml docbook-xsl xsltproc libglib2.0-dev libgirepository1.0-dev libsystemd-dev sudo apt-get install --yes --quiet docbook-xml docbook-xsl xsltproc libglib2.0-dev libgirepository1.0-dev libsystemd-dev
sudo apt-get install --yes --quiet python3-systemd python3-pyudev python3-lxml sudo apt-get install --yes --quiet python3-systemd python3-pyudev python3-lxml
python3 -m pip install --upgrade dasbus pylint pyflakes PyGObject python3 -m pip install --upgrade dasbus pylint==2.17.7 pyflakes PyGObject
python3 -m pip install --upgrade vermin pyfakefs importlib-resources python3 -m pip install --upgrade vermin pyfakefs importlib-resources
- name: "INSTALL: libnvme dependencies" - name: "INSTALL: libnvme dependencies"

View file

@ -12,7 +12,7 @@ jobs:
docker-lint: docker-lint:
if: ${{ !github.event.act }} # skip during local actions testing if: ${{ !github.event.act }} # skip during local actions testing
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: hadolint/hadolint-action@v3.1.0 - uses: hadolint/hadolint-action@v3.1.0
@ -21,12 +21,12 @@ jobs:
ignore: DL3041 ignore: DL3041
python-lint: python-lint:
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] python-version: ["3.7", "3.8", "3.9", "3.10"]
steps: steps:
- name: "CHECKOUT: nvme-stas" - name: "CHECKOUT: nvme-stas"
@ -42,7 +42,7 @@ jobs:
sudo apt update sudo apt update
sudo apt-get install --yes --quiet python3-pip cmake libgirepository1.0-dev libsystemd-dev python3-systemd swig libjson-c-dev || true sudo apt-get install --yes --quiet python3-pip cmake libgirepository1.0-dev libsystemd-dev python3-systemd swig libjson-c-dev || true
sudo python3 -m pip install --upgrade pip wheel meson ninja sudo python3 -m pip install --upgrade pip wheel meson ninja
python3 -m pip install --upgrade dasbus pylint pyflakes PyGObject lxml pyudev python3 -m pip install --upgrade dasbus pylint==2.17.7 pyflakes PyGObject lxml pyudev
- name: "BUILD: [libnvme, nvme-stas]" - name: "BUILD: [libnvme, nvme-stas]"
uses: BSFishy/meson-build@v1.0.3 uses: BSFishy/meson-build@v1.0.3

View file

@ -1,4 +1,4 @@
FROM fedora:38 FROM fedora:39
WORKDIR /root WORKDIR /root

View file

@ -1,5 +1,11 @@
# STorage Appliance Services (STAS) # STorage Appliance Services (STAS)
## Changes with release 2.3.1
Bug fix:
* Properly handle big-endian data in `iputils.py`. This fix ensures that `struct.[pack|unpack]` is invoked with the CPU's native endianness. This fix is required for nvme-stas to work properly on big-endian CPUs (little-endian CPUs are not affected).
## Changes with release 2.3 ## Changes with release 2.3
New features: New features:

View file

@ -188,7 +188,7 @@
DNS-SD/mDNS. DNS-SD/mDNS.
</para> </para>
<para> <para>
Discovery Controllers that support zeroconf advertize Discovery Controllers that support zeroconf advertise
themselves over mDNS with the service type themselves over mDNS with the service type
<literal>_nvme-disc._tcp</literal>. <literal>_nvme-disc._tcp</literal>.
</para> </para>

View file

@ -165,7 +165,7 @@
# before giving up. This value should never be # before giving up. This value should never be
# set to 1. A value of 1 will automatically be # set to 1. A value of 1 will automatically be
# increased to 2. That's because a single # increased to 2. That's because a single
# failure may be normal and a mimimum of 2 # failure may be normal and a minimum of 2
# attempts is required to conclude that a # attempts is required to conclude that a
# connection is not possible. # connection is not possible.
# Default: 0 # Default: 0

View file

@ -16,7 +16,7 @@
# indicates that the Host NQN can be retrieved from a separate file. # indicates that the Host NQN can be retrieved from a separate file.
# Typically, nvme-cli saves the Host NQN in /etc/nvme/hostnqn. For # Typically, nvme-cli saves the Host NQN in /etc/nvme/hostnqn. For
# compatibility with nvme-cli, nvme-stas defaults to looking for the # compatibility with nvme-cli, nvme-stas defaults to looking for the
# existance of this file and will read the NQN from it. Otherwise, you # existence of this file and will read the NQN from it. Otherwise, you
# can overwrite the default NQN by specifying its value here or # can overwrite the default NQN by specifying its value here or
# specifying another file that contains the Host NQN to use. # specifying another file that contains the Host NQN to use.
# Type: string # Type: string
@ -28,7 +28,7 @@
# indicates that the Host ID can be retrieved from a separate file. # indicates that the Host ID can be retrieved from a separate file.
# Typically, nvme-cli saves the Host ID in /etc/nvme/hostid. For # Typically, nvme-cli saves the Host ID in /etc/nvme/hostid. For
# compatibility with nvme-cli, nvme-stas defaults to looking for the # compatibility with nvme-cli, nvme-stas defaults to looking for the
# existance of this file and will read the ID from it. Otherwise, you # existence of this file and will read the ID from it. Otherwise, you
# can overwrite the default ID by specifying its value here or # can overwrite the default ID by specifying its value here or
# specifying another file that contains the Host ID to use. # specifying another file that contains the Host ID to use.
# Type: string # Type: string
@ -41,7 +41,7 @@
# A value starting with "file://" indicates that the Host Key can # A value starting with "file://" indicates that the Host Key can
# be retrieved from a separate file. Typically, nvme-cli saves the # be retrieved from a separate file. Typically, nvme-cli saves the
# Host Key in /etc/nvme/hostkey. For compatibility with nvme-cli, # Host Key in /etc/nvme/hostkey. For compatibility with nvme-cli,
# nvme-stas defaults to looking for the existance of this file and # nvme-stas defaults to looking for the existence of this file and
# will read the Key from it. Otherwise, you can overwrite the default # will read the Key from it. Otherwise, you can overwrite the default
# Key by specifying its value here or specifying another file that # Key by specifying its value here or specifying another file that
# contains an alternate Host Key to use. # contains an alternate Host Key to use.

View file

@ -9,7 +9,7 @@
project( project(
'nvme-stas', 'nvme-stas',
meson_version: '>= 0.53.0', meson_version: '>= 0.53.0',
version: '2.3', version: '2.3.1',
license: 'Apache-2.0', license: 'Apache-2.0',
default_options: [ default_options: [
'buildtype=release', 'buildtype=release',
@ -133,6 +133,7 @@ endforeach
#=========================================================================== #===========================================================================
# Make a list of modules to lint # Make a list of modules to lint
modules_to_lint = [stafd, stafctl, stacd, stacctl, stasadm] modules_to_lint = [stafd, stafctl, stacd, stacctl, stasadm]
packages_to_lint = []
# Point Python Path to Current Build Dir. # Point Python Path to Current Build Dir.

View file

@ -69,7 +69,7 @@ if __name__ == '__main__':
return STAC.tron return STAC.tron
@tron.setter @tron.setter
def tron(self, value): # pylint: disable=no-self-use def tron(self, value):
'''@brief Set Trace ON property''' '''@brief Set Trace ON property'''
STAC.tron = value STAC.tron = value
@ -89,14 +89,14 @@ if __name__ == '__main__':
info.update(STAC.info()) info.update(STAC.info())
return json.dumps(info) return json.dumps(info)
def controller_info( # pylint: disable=too-many-arguments,no-self-use def controller_info( # pylint: disable=too-many-arguments
self, transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn self, transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn
) -> str: ) -> str:
'''@brief D-Bus method used to return information about a controller''' '''@brief D-Bus method used to return information about a controller'''
controller = STAC.get_controller(transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn) controller = STAC.get_controller(transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn)
return json.dumps(controller.info()) if controller else '{}' return json.dumps(controller.info()) if controller else '{}'
def list_controllers(self, detailed) -> list: # pylint: disable=no-self-use def list_controllers(self, detailed) -> list:
'''@brief Return the list of I/O controller IDs''' '''@brief Return the list of I/O controller IDs'''
return [ return [
controller.details() if detailed else controller.controller_id_dict() controller.details() if detailed else controller.controller_id_dict()

View file

@ -88,7 +88,7 @@ if __name__ == '__main__':
return STAF.tron return STAF.tron
@tron.setter @tron.setter
def tron(self, value): # pylint: disable=no-self-use def tron(self, value):
'''@brief Set Trace ON property''' '''@brief Set Trace ON property'''
STAF.tron = value STAF.tron = value
@ -108,21 +108,21 @@ if __name__ == '__main__':
info.update(STAF.info()) info.update(STAF.info())
return json.dumps(info) return json.dumps(info)
def controller_info( # pylint: disable=no-self-use,too-many-arguments def controller_info( # pylint: disable=too-many-arguments
self, transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn self, transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn
) -> str: ) -> str:
'''@brief D-Bus method used to return information about a controller''' '''@brief D-Bus method used to return information about a controller'''
controller = STAF.get_controller(transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn) controller = STAF.get_controller(transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn)
return json.dumps(controller.info()) if controller else '{}' return json.dumps(controller.info()) if controller else '{}'
def get_log_pages( # pylint: disable=no-self-use,too-many-arguments def get_log_pages( # pylint: disable=too-many-arguments
self, transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn self, transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn
) -> list: ) -> list:
'''@brief D-Bus method used to retrieve the discovery log pages from one controller''' '''@brief D-Bus method used to retrieve the discovery log pages from one controller'''
controller = STAF.get_controller(transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn) controller = STAF.get_controller(transport, traddr, trsvcid, subsysnqn, host_traddr, host_iface, host_nqn)
return controller.log_pages() if controller else list() return controller.log_pages() if controller else list()
def get_all_log_pages(self, detailed) -> str: # pylint: disable=no-self-use def get_all_log_pages(self, detailed) -> str:
'''@brief D-Bus method used to retrieve the discovery log pages from all controllers''' '''@brief D-Bus method used to retrieve the discovery log pages from all controllers'''
log_pages = list() log_pages = list()
for controller in STAF.get_controllers(): for controller in STAF.get_controllers():
@ -134,7 +134,7 @@ if __name__ == '__main__':
) )
return json.dumps(log_pages) return json.dumps(log_pages)
def list_controllers(self, detailed) -> list: # pylint: disable=no-self-use def list_controllers(self, detailed) -> list:
'''@brief Return the list of discovery controller IDs''' '''@brief Return the list of discovery controller IDs'''
return [ return [
controller.details() if detailed else controller.controller_id_dict() controller.details() if detailed else controller.controller_id_dict()

View file

@ -580,7 +580,7 @@ class Avahi: # pylint: disable=too-many-instance-attributes
self._check_for_duplicate_ips() self._check_for_duplicate_ips()
def _failure_handler( # pylint: disable=no-self-use def _failure_handler(
self, self,
_connection, _connection,
_sender_name: str, _sender_name: str,

View file

@ -659,7 +659,7 @@ class NvmeOptions(metaclass=singleton.Singleton):
'dhchap_ctrl_secret': defs.KERNEL_VERSION >= defs.KERNEL_CTRLKEY_MIN_VERSION, 'dhchap_ctrl_secret': defs.KERNEL_VERSION >= defs.KERNEL_CTRLKEY_MIN_VERSION,
} }
# If some of the options are False, we need to check wether they can be # If some of the options are False, we need to check whether they can be
# read from '/dev/nvme-fabrics'. This method allows us to determine that # read from '/dev/nvme-fabrics'. This method allows us to determine that
# an older kernel actually supports a specific option because it was # an older kernel actually supports a specific option because it was
# backported to that kernel. # backported to that kernel.

View file

@ -425,7 +425,7 @@ class Dc(Controller):
@property @property
def origin(self): def origin(self):
'''@brief Return how this controller came into existance. Was it '''@brief Return how this controller came into existence. Was it
"discovered" through mDNS service discovery (TP8009), was it manually "discovered" through mDNS service discovery (TP8009), was it manually
"configured" in stafd.conf, or was it a "referral". "configured" in stafd.conf, or was it a "referral".
''' '''
@ -853,6 +853,6 @@ class Ioc(Controller):
self._try_to_connect_deferred.schedule() self._try_to_connect_deferred.schedule()
def _should_try_to_reconnect(self): def _should_try_to_reconnect(self):
'''@brief This is used to determine when it's time to stop trying toi connect''' '''@brief This is used to determine when it's time to stop trying to connect'''
max_connect_attempts = conf.SvcConf().connect_attempts_on_ncc if self.ncc else 0 max_connect_attempts = conf.SvcConf().connect_attempts_on_ncc if self.ncc else 0
return max_connect_attempts == 0 or self._connect_attempts < max_connect_attempts return max_connect_attempts == 0 or self._connect_attempts < max_connect_attempts

View file

@ -250,7 +250,7 @@ class _TaskRunner(GObject.Object):
task.run_in_thread(in_thread_exec) task.run_in_thread(in_thread_exec)
return task return task
def communicate_finish(self, result): # pylint: disable=no-self-use def communicate_finish(self, result):
'''@brief Use this function in your callback (see @cb_function) to '''@brief Use this function in your callback (see @cb_function) to
extract data from the result object. extract data from the result object.

View file

@ -42,7 +42,7 @@ def _nlmsghdr(nlmsg_type, nlmsg_flags, nlmsg_seq, nlmsg_pid, msg_len: int):
__u32 nlmsg_pid; /* Sending process port ID */ __u32 nlmsg_pid; /* Sending process port ID */
}; };
''' '''
return struct.pack('<LHHLL', NLMSG_LENGTH(msg_len), nlmsg_type, nlmsg_flags, nlmsg_seq, nlmsg_pid) return struct.pack('=LHHLL', NLMSG_LENGTH(msg_len), nlmsg_type, nlmsg_flags, nlmsg_seq, nlmsg_pid)
def _ifaddrmsg(family=0, prefixlen=0, flags=0, scope=0, index=0): def _ifaddrmsg(family=0, prefixlen=0, flags=0, scope=0, index=0):
@ -55,7 +55,7 @@ def _ifaddrmsg(family=0, prefixlen=0, flags=0, scope=0, index=0):
__u32 ifa_index; /* Link index */ __u32 ifa_index; /* Link index */
}; };
''' '''
return struct.pack('<BBBBL', family, prefixlen, flags, scope, index) return struct.pack('=BBBBL', family, prefixlen, flags, scope, index)
def _ifinfomsg(family=0, dev_type=0, index=0, flags=0, change=0): def _ifinfomsg(family=0, dev_type=0, index=0, flags=0, change=0):
@ -69,7 +69,7 @@ def _ifinfomsg(family=0, dev_type=0, index=0, flags=0, change=0):
unsigned int ifi_change; /* change mask: IFF_* */ unsigned int ifi_change; /* change mask: IFF_* */
}; };
''' '''
return struct.pack('<BBHiII', family, 0, dev_type, index, flags, change) return struct.pack('=BBHiII', family, 0, dev_type, index, flags, change)
def _nlmsg(nlmsg_type, nlmsg_flags, msg: bytes): def _nlmsg(nlmsg_type, nlmsg_flags, msg: bytes):
@ -102,7 +102,7 @@ def mac2iface(mac: str): # pylint: disable=too-many-locals
nlmsg += sock.recv(8192) nlmsg += sock.recv(8192)
nlmsghdr = nlmsg[nlmsg_idx : nlmsg_idx + NLMSG_HDRLEN] nlmsghdr = nlmsg[nlmsg_idx : nlmsg_idx + NLMSG_HDRLEN]
nlmsg_len, nlmsg_type, _, _, _ = struct.unpack('<LHHLL', nlmsghdr) nlmsg_len, nlmsg_type, _, _, _ = struct.unpack('=LHHLL', nlmsghdr)
if nlmsg_type == NLMSG_DONE: if nlmsg_type == NLMSG_DONE:
break break
@ -110,13 +110,13 @@ def mac2iface(mac: str): # pylint: disable=too-many-locals
if nlmsg_type == RTM_BASE: if nlmsg_type == RTM_BASE:
msg_indx = nlmsg_idx + NLMSG_HDRLEN msg_indx = nlmsg_idx + NLMSG_HDRLEN
msg = nlmsg[msg_indx : msg_indx + IFINFOMSG_SZ] # ifinfomsg msg = nlmsg[msg_indx : msg_indx + IFINFOMSG_SZ] # ifinfomsg
_, _, ifi_type, ifi_index, _, _ = struct.unpack('<BBHiII', msg) _, _, ifi_type, ifi_index, _, _ = struct.unpack('=BBHiII', msg)
if ifi_type in (ARPHRD_LOOPBACK, ARPHRD_ETHER): if ifi_type in (ARPHRD_LOOPBACK, ARPHRD_ETHER):
rtattr_indx = msg_indx + IFINFOMSG_SZ rtattr_indx = msg_indx + IFINFOMSG_SZ
while rtattr_indx < (nlmsg_idx + nlmsg_len): while rtattr_indx < (nlmsg_idx + nlmsg_len):
rtattr = nlmsg[rtattr_indx : rtattr_indx + RTATTR_SZ] rtattr = nlmsg[rtattr_indx : rtattr_indx + RTATTR_SZ]
rta_len, rta_type = struct.unpack('<HH', rtattr) rta_len, rta_type = struct.unpack('=HH', rtattr)
if rta_type == IFLA_ADDRESS: if rta_type == IFLA_ADDRESS:
data = nlmsg[rtattr_indx + RTATTR_SZ : rtattr_indx + rta_len] data = nlmsg[rtattr_indx + RTATTR_SZ : rtattr_indx + rta_len]
if _data_matches_mac(data, mac): if _data_matches_mac(data, mac):
@ -132,7 +132,7 @@ def mac2iface(mac: str): # pylint: disable=too-many-locals
# ****************************************************************************** # ******************************************************************************
def ip_equal(ip1, ip2): def ip_equal(ip1, ip2):
'''Check whther two IP addresses are equal. '''Check whether two IP addresses are equal.
@param ip1: IPv4Address or IPv6Address object @param ip1: IPv4Address or IPv6Address object
@param ip2: IPv4Address or IPv6Address object @param ip2: IPv4Address or IPv6Address object
''' '''
@ -206,7 +206,7 @@ def net_if_addrs(): # pylint: disable=too-many-locals
nlmsg += sock.recv(8192) nlmsg += sock.recv(8192)
nlmsghdr = nlmsg[nlmsg_idx : nlmsg_idx + NLMSG_HDRLEN] nlmsghdr = nlmsg[nlmsg_idx : nlmsg_idx + NLMSG_HDRLEN]
nlmsg_len, nlmsg_type, _, _, _ = struct.unpack('<LHHLL', nlmsghdr) nlmsg_len, nlmsg_type, _, _, _ = struct.unpack('=LHHLL', nlmsghdr)
if nlmsg_type == NLMSG_DONE: if nlmsg_type == NLMSG_DONE:
break break
@ -214,7 +214,7 @@ def net_if_addrs(): # pylint: disable=too-many-locals
if nlmsg_type == RTM_NEWADDR: if nlmsg_type == RTM_NEWADDR:
msg_indx = nlmsg_idx + NLMSG_HDRLEN msg_indx = nlmsg_idx + NLMSG_HDRLEN
msg = nlmsg[msg_indx : msg_indx + IFADDRMSG_SZ] # ifaddrmsg msg = nlmsg[msg_indx : msg_indx + IFADDRMSG_SZ] # ifaddrmsg
ifa_family, _, _, _, ifa_index = struct.unpack('<BBBBL', msg) ifa_family, _, _, _, ifa_index = struct.unpack('=BBBBL', msg)
if ifa_family in (socket.AF_INET, socket.AF_INET6): if ifa_family in (socket.AF_INET, socket.AF_INET6):
interfaces.setdefault(ifa_index, {4: [], 6: []}) interfaces.setdefault(ifa_index, {4: [], 6: []})
@ -222,7 +222,7 @@ def net_if_addrs(): # pylint: disable=too-many-locals
rtattr_indx = msg_indx + IFADDRMSG_SZ rtattr_indx = msg_indx + IFADDRMSG_SZ
while rtattr_indx < (nlmsg_idx + nlmsg_len): while rtattr_indx < (nlmsg_idx + nlmsg_len):
rtattr = nlmsg[rtattr_indx : rtattr_indx + RTATTR_SZ] rtattr = nlmsg[rtattr_indx : rtattr_indx + RTATTR_SZ]
rta_len, rta_type = struct.unpack('<HH', rtattr) rta_len, rta_type = struct.unpack('=HH', rtattr)
if rta_type == IFLA_IFNAME: if rta_type == IFLA_IFNAME:
data = nlmsg[rtattr_indx + RTATTR_SZ : rtattr_indx + rta_len] data = nlmsg[rtattr_indx + RTATTR_SZ : rtattr_indx + rta_len]

View file

@ -49,13 +49,4 @@ python3.install_sources(
subdir: 'staslib', subdir: 'staslib',
) )
#=============================================================================== packages_to_lint += meson.current_build_dir()
# Make a list of modules to lint
skip = ['stafd.idl', 'stacd.idl']
foreach file: files_to_install
fname = fs.name('@0@'.format(file))
if fname not in skip
modules_to_lint += file
endif
endforeach

View file

@ -725,7 +725,7 @@ class Staf(Service):
'''@brief Finish discovery controllers configuration after '''@brief Finish discovery controllers configuration after
hostnames (if any) have been resolved. All the logic associated hostnames (if any) have been resolved. All the logic associated
with discovery controller creation/deletion is found here. To with discovery controller creation/deletion is found here. To
avoid calling this algorith repetitively for each and every events, avoid calling this algorithm repetitively for each and every events,
it is called after a soaking period controlled by self._cfg_soak_tmr. it is called after a soaking period controlled by self._cfg_soak_tmr.
@param configured_ctrl_list: List of TIDs configured in stafd.conf with @param configured_ctrl_list: List of TIDs configured in stafd.conf with

View file

@ -29,7 +29,7 @@ except ImportError:
# Pre Python 3.9 backport of importlib.resources (if installed) # Pre Python 3.9 backport of importlib.resources (if installed)
from importlib_resources import files from importlib_resources import files
except ImportError: except ImportError:
# Less efficient, but avalable on older versions of Python # Less efficient, but available on older versions of Python
import pkg_resources import pkg_resources
def load_idl(idl_fname): def load_idl(idl_fname):
@ -248,7 +248,7 @@ class ControllerABC(abc.ABC):
self._try_to_connect_deferred.schedule() self._try_to_connect_deferred.schedule()
return GLib.SOURCE_REMOVE return GLib.SOURCE_REMOVE
def _should_try_to_reconnect(self): # pylint: disable=no-self-use def _should_try_to_reconnect(self):
return True return True
def _try_to_connect(self): def _try_to_connect(self):
@ -537,7 +537,7 @@ class ServiceABC(abc.ABC): # pylint: disable=too-many-instance-attributes
# controllers with traddr specified as hostname instead of IP address. # controllers with traddr specified as hostname instead of IP address.
# Because of this, we need to remove those excluded elements before # Because of this, we need to remove those excluded elements before
# running name resolution. And we will need to remove excluded # running name resolution. And we will need to remove excluded
# elements after name resolution is complete (i.e. in the calback # elements after name resolution is complete (i.e. in the callback
# function _config_ctrls_finish) # function _config_ctrls_finish)
logging.debug('ServiceABC._config_ctrls()') logging.debug('ServiceABC._config_ctrls()')
configured_controllers = [ configured_controllers = [

View file

@ -46,6 +46,12 @@ if libnvme_location == '?'
else else
#--------------------------------------------------------------------------- #---------------------------------------------------------------------------
# pylint and pyflakes # pylint and pyflakes
# There's a bug with pylint 3.X. Tests should be run with pylint
# 2.17.7 (or less), which can be installed with:
# python3 -m pip install --upgrade pylint==2.17.7
if modules_to_lint.length() != 0 if modules_to_lint.length() != 0
pylint = find_program('pylint', required: false) pylint = find_program('pylint', required: false)
pyflakes = find_program('pyflakes3', required: false) pyflakes = find_program('pyflakes3', required: false)
@ -59,7 +65,7 @@ else
rcfile = srce_dir / 'pylint.rc' rcfile = srce_dir / 'pylint.rc'
if pylint.found() if pylint.found()
test('pylint', pylint, args: ['--rcfile=' + rcfile] + modules_to_lint, env: test_env) test('pylint', pylint, args: ['--rcfile=' + rcfile] + modules_to_lint + packages_to_lint, env: test_env)
else else
warning('Skiping some of the tests because "pylint" is missing.') warning('Skiping some of the tests because "pylint" is missing.')
endif endif

File diff suppressed because it is too large Load diff

View file

@ -194,9 +194,9 @@ class StasSysConfUnitTest(unittest.TestCase):
], ],
FNAME_4: [ FNAME_4: [
'[Host]\n', '[Host]\n',
'nqn=file:///some/non/exisiting/file/!@#\n', 'nqn=file:///some/non/existing/file/!@#\n',
'id=file:///some/non/exisiting/file/!@#\n', 'id=file:///some/non/existing/file/!@#\n',
'symname=file:///some/non/exisiting/file/!@#\n', 'symname=file:///some/non/existing/file/!@#\n',
], ],
} }

View file

@ -1,4 +1,5 @@
#!/usr/bin/python3 #!/usr/bin/python3
import contextlib
import os import os
import sys import sys
import unittest import unittest
@ -9,13 +10,17 @@ class MockLibnvmeTestCase(unittest.TestCase):
'''Testing defs.py by mocking the libnvme package''' '''Testing defs.py by mocking the libnvme package'''
def test_libnvme_version(self): def test_libnvme_version(self):
# For unknown reasons, this test does # Ensure that we re-import staslib & staslib.defs if the current Python
# not work when run from GitHub Actions. # process has them already imported.
if not os.getenv('GITHUB_ACTIONS'): with contextlib.suppress(KeyError):
from staslib import defs sys.modules.pop('staslib.defs')
with contextlib.suppress(KeyError):
sys.modules.pop('staslib')
libnvme_ver = defs.LIBNVME_VERSION from staslib import defs
self.assertEqual(libnvme_ver, '?.?')
libnvme_ver = defs.LIBNVME_VERSION
self.assertEqual(libnvme_ver, '?.?')
@classmethod @classmethod
def setUpClass(cls): # called once before all the tests def setUpClass(cls): # called once before all the tests

View file

@ -679,7 +679,7 @@ class Test(unittest.TestCase):
'host-nqn': '', 'host-nqn': '',
} }
) )
match = len(ipv6_addrs) == 1 and iputil.get_ipaddress_obj( match = len(ipv6_addrs) >= 1 and iputil.get_ipaddress_obj(
ipv6_addrs[0], ipv4_mapped_convert=True ipv6_addrs[0], ipv4_mapped_convert=True
) == iputil.get_ipaddress_obj(tid.host_traddr, ipv4_mapped_convert=True) ) == iputil.get_ipaddress_obj(tid.host_traddr, ipv4_mapped_convert=True)
self.assertEqual( self.assertEqual(